bool IdealWeightInverseMeanRatio::evaluate( PatchData& pd, size_t handle, double& m, MsqError& err ) { const MsqMeshEntity* e = &pd.element_by_index(handle); EntityTopology topo = e->get_element_type(); const MsqVertex *vertices = pd.get_vertex_array(err); MSQ_ERRZERO(err); const size_t *v_i = e->get_vertex_index_array(); Vector3D n; // Surface normal for 2D objects // Prism and Hex element descriptions static const int locs_pri[6][4] = {{0, 1, 2, 3}, {1, 2, 0, 4}, {2, 0, 1, 5}, {3, 5, 4, 0}, {4, 3, 5, 1}, {5, 4, 3, 2}}; static const int locs_hex[8][4] = {{0, 1, 3, 4}, {1, 2, 0, 5}, {2, 3, 1, 6}, {3, 0, 2, 7}, {4, 7, 5, 0}, {5, 4, 6, 1}, {6, 5, 7, 2}, {7, 6, 4, 3}}; const Vector3D d_con(1.0, 1.0, 1.0); int i; m = 0.0; bool metric_valid = false; switch(topo) { case TRIANGLE: pd.get_domain_normal_at_element(e, n, err); MSQ_ERRZERO(err); n = n / n.length(); // Need unit normal mCoords[0] = vertices[v_i[0]]; mCoords[1] = vertices[v_i[1]]; mCoords[2] = vertices[v_i[2]]; metric_valid = m_fcn_2e(m, mCoords, n, a2Con, b2Con, c2Con); if (!metric_valid) return false; break; case QUADRILATERAL: pd.get_domain_normal_at_element(e, n, err); MSQ_ERRZERO(err); n = n / n.length(); // Need unit normal for (i = 0; i < 4; ++i) { mCoords[0] = vertices[v_i[locs_hex[i][0]]]; mCoords[1] = vertices[v_i[locs_hex[i][1]]]; mCoords[2] = vertices[v_i[locs_hex[i][2]]]; metric_valid = m_fcn_2i(mMetrics[i], mCoords, n, a2Con, b2Con, c2Con, d_con); if (!metric_valid) return false; } m = average_metrics(mMetrics, 4, err); break; case TETRAHEDRON: mCoords[0] = vertices[v_i[0]]; mCoords[1] = vertices[v_i[1]]; mCoords[2] = vertices[v_i[2]]; mCoords[3] = vertices[v_i[3]]; metric_valid = m_fcn_3e(m, mCoords, a3Con, b3Con, c3Con); if (!metric_valid) return false; break; case PYRAMID: for (i = 0; i < 4; ++i) { mCoords[0] = vertices[v_i[ i ]]; mCoords[1] = vertices[v_i[(i+1)%4]]; mCoords[2] = vertices[v_i[(i+3)%4]]; mCoords[3] = vertices[v_i[ 4 ]]; metric_valid = m_fcn_3p(mMetrics[i], mCoords, a3Con, b3Con, c3Con); if (!metric_valid) return false; } m = average_metrics(mMetrics, 4, err); MSQ_ERRZERO(err); break; case PRISM: for (i = 0; i < 6; ++i) { mCoords[0] = vertices[v_i[locs_pri[i][0]]]; mCoords[1] = vertices[v_i[locs_pri[i][1]]]; mCoords[2] = vertices[v_i[locs_pri[i][2]]]; mCoords[3] = vertices[v_i[locs_pri[i][3]]]; metric_valid = m_fcn_3w(mMetrics[i], mCoords, a3Con, b3Con, c3Con); if (!metric_valid) return false; } m = average_metrics(mMetrics, 6, err); MSQ_ERRZERO(err); break; case HEXAHEDRON: for (i = 0; i < 8; ++i) { mCoords[0] = vertices[v_i[locs_hex[i][0]]]; mCoords[1] = vertices[v_i[locs_hex[i][1]]]; mCoords[2] = vertices[v_i[locs_hex[i][2]]]; mCoords[3] = vertices[v_i[locs_hex[i][3]]]; metric_valid = m_fcn_3i(mMetrics[i], mCoords, a3Con, b3Con, c3Con, d_con); if (!metric_valid) return false; } m = average_metrics(mMetrics, 8, err); MSQ_ERRZERO(err); break; default: MSQ_SETERR(err)(MsqError::UNSUPPORTED_ELEMENT, "Element type (%d) not supported in IdealWeightInverseMeanRatio", (int)topo); return false; } // end switch over element type return true; }
void QuasiNewton::optimize_vertex_positions( PatchData& pd, MsqError& err ) { TerminationCriterion& term = *get_inner_termination_criterion(); OFEvaluator& func = get_objective_function_evaluator(); const double sigma = 1e-4; const double beta0 = 0.25; const double beta1 = 0.80; const double tol1 = 1e-8; const double epsilon = 1e-10; double norm_r; //, norm_g; double alpha, beta; double obj, objn; size_t i; // Initialize stuff const size_t nn = pd.num_free_vertices(); double a[QNVEC], b[QNVEC], r[QNVEC]; for (i = 0; i < QNVEC; ++i) r[i] = 0; for (i = 0; i <= QNVEC; ++i) { v[i].clear(); v[i].resize( nn, Vector3D(0.0) ); w[i].clear(); w[i].resize( nn, Vector3D(0.0) ); } d.resize( nn ); mHess.resize( nn ); //hMesh(mesh); bool valid = func.update( pd, obj, v[QNVEC], mHess, err ); MSQ_ERRRTN(err); if (!valid) { MSQ_SETERR(err)("Initial objective function is not valid", MsqError::INVALID_MESH); return; } while (!term.terminate()) { pd.recreate_vertices_memento( mMemento, err ); MSQ_ERRRTN(err); pd.get_free_vertex_coordinates( w[QNVEC] ); x = v[QNVEC]; for (i = QNVEC; i--; ) { a[i] = r[i] * inner( &(w[i][0]), arrptr(x), nn ); plus_eq_scaled( arrptr(x), -a[i], &v[i][0], nn ); } solve( arrptr(d), arrptr(x) ); for (i = QNVEC; i--; ) { b[i] = r[i] * inner( &(v[i][0]), arrptr(d), nn ); plus_eq_scaled( arrptr(d), a[i]-b[i], &(w[i][0]), nn ); } alpha = -inner( &(v[QNVEC][0]), arrptr(d), nn ); /* direction is negated */ if (alpha > 0.0) { MSQ_SETERR(err)("No descent.", MsqError::INVALID_MESH); return; } alpha *= sigma; beta = 1.0; pd.move_free_vertices_constrained( arrptr(d), nn, -beta, err ); MSQ_ERRRTN(err); valid = func.evaluate( pd, objn, v[QNVEC], err ); if (err.error_code() == err.BARRIER_VIOLATED) err.clear(); // barrier violated does not represent an actual error here MSQ_ERRRTN(err); if (!valid || (obj - objn < -alpha*beta - epsilon && length( &(v[QNVEC][0]), nn ) >= tol1)) { if (!valid) // function not defined at trial point beta *= beta0; else // unacceptable iterate beta *= beta1; for (;;) { if (beta < tol1) { pd.set_to_vertices_memento( mMemento, err ); MSQ_ERRRTN(err); MSQ_SETERR(err)("Newton step not good", MsqError::INTERNAL_ERROR); return; } pd.set_free_vertices_constrained( mMemento, arrptr(d), nn, -beta, err ); MSQ_ERRRTN(err); valid = func.evaluate( pd, objn, err ); if (err.error_code() == err.BARRIER_VIOLATED) err.clear(); // barrier violated does not represent an actual error here MSQ_ERRRTN(err); if (!valid) // function undefined at trial point beta *= beta0; else if (obj - objn < -alpha*beta - epsilon) // unacceptlable iterate beta *= beta1; else break; } } for (i = 0; i < QNVEC-1; ++i) { r[i] = r[i+1]; w[i].swap( w[i+1] ); v[i].swap( v[i+1] ); } w[QNVEC-1].swap( w[0] ); v[QNVEC-1].swap( v[0] ); func.update( pd, obj, v[QNVEC], mHess, err ); MSQ_ERRRTN(err); norm_r = length_squared( &(v[QNVEC][0]), nn ); //norm_g = sqrt(norm_r); // checks stopping criterion term.accumulate_patch( pd, err ); MSQ_ERRRTN(err); term.accumulate_inner( pd, objn, &v[QNVEC][0], err ); MSQ_ERRRTN(err); } }
/*!Performs Conjugate gradient minimization on the PatchData, pd.*/ void ConjugateGradient::optimize_vertex_positions(PatchData &pd, MsqError &err){ // pd.reorder(); MSQ_FUNCTION_TIMER( "ConjugateGradient::optimize_vertex_positions" ); Timer c_timer; size_t num_vert=pd.num_free_vertices(); if(num_vert<1){ MSQ_DBGOUT(1) << "\nEmpty free vertex list in ConjugateGradient\n"; return; } /* //zero out arrays int zero_loop=0; while(zero_loop<arraySize){ fGrad[zero_loop].set(0,0,0); pGrad[zero_loop].set(0,0,0); fNewGrad[zero_loop].set(0,0,0); ++zero_loop; } */ // get OF evaluator OFEvaluator& objFunc = get_objective_function_evaluator(); size_t ind; //Michael cull list: possibly set soft_fixed flags here //MsqFreeVertexIndexIterator free_iter(pd, err); MSQ_ERRRTN(err); double f=0; //Michael, this isn't equivalent to CUBIT because we only want to check //the objective function value of the 'bad' elements //if invalid initial patch set an error. bool temp_bool = objFunc.update(pd, f, fGrad, err); assert(fGrad.size() == num_vert); if(MSQ_CHKERR(err)) return; if( ! temp_bool){ MSQ_SETERR(err)("Conjugate Gradient not able to get valid gradient " "and function values on intial patch.", MsqError::INVALID_MESH); return; } double grad_norm=MSQ_MAX_CAP; if(conjGradDebug>0){ MSQ_PRINT(2)("\nCG's DEGUB LEVEL = %i \n",conjGradDebug); grad_norm=Linf(arrptr(fGrad),fGrad.size()); MSQ_PRINT(2)("\nCG's FIRST VALUE = %f,grad_norm = %f",f,grad_norm); MSQ_PRINT(2)("\n TIME %f",c_timer.since_birth()); grad_norm=MSQ_MAX_CAP; } //Initializing pGrad (search direction). pGrad.resize(fGrad.size()); for (ind = 0; ind < num_vert; ++ind) pGrad[ind]=(-fGrad[ind]); int j=0; // total nb of step size changes ... not used much int i=0; // iteration counter unsigned m=0; // double alp=MSQ_MAX_CAP; // alp: scale factor of search direction //we know inner_criterion is false because it was checked in //loop_over_mesh before being sent here. TerminationCriterion* term_crit=get_inner_termination_criterion(); //while ((i<maxIteration && alp>stepBound && grad_norm>normGradientBound) // && !inner_criterion){ while(!term_crit->terminate()){ ++i; //std::cout<<"\Michael delete i = "<<i; int k=0; alp=get_step(pd,f,k,err); j+=k; if(conjGradDebug>2){ MSQ_PRINT(2)("\n Alp initial, alp = %20.18f",alp); } // if alp == 0, revert to steepest descent search direction if(alp==0){ for (m = 0; m < num_vert; ++m) { pGrad[m]=(-fGrad[m]); } alp=get_step(pd,f,k,err); j+=k; if(conjGradDebug>1){ MSQ_PRINT(2)("\n CG's search direction reset."); if(conjGradDebug>2) MSQ_PRINT(2)("\n Alp was zero, alp = %20.18f",alp); } } if(alp!=0){ pd.move_free_vertices_constrained( arrptr(pGrad), num_vert, alp, err ); MSQ_ERRRTN(err); if (! objFunc.update(pd, f, fNewGrad, err)){ MSQ_SETERR(err)("Error inside Conjugate Gradient, vertices moved " "making function value invalid.", MsqError::INVALID_MESH); return; } assert(fNewGrad.size() == (unsigned)num_vert); if(conjGradDebug>0){ grad_norm=Linf(arrptr(fNewGrad),num_vert); MSQ_PRINT(2)("\nCG's VALUE = %f, iter. = %i, grad_norm = %f, alp = %f",f,i,grad_norm,alp); MSQ_PRINT(2)("\n TIME %f",c_timer.since_birth()); } double s11=0; double s12=0; double s22=0; //free_iter.reset(); //while (free_iter.next()) { // m=free_iter.value(); for (m = 0; m < num_vert; ++m) { s11+=fGrad[m]%fGrad[m]; s12+=fGrad[m]%fNewGrad[m]; s22+=fNewGrad[m]%fNewGrad[m]; } // Steepest Descent (takes 2-3 times as long as P-R) //double bet=0; // Fletcher-Reeves (takes twice as long as P-R) //double bet = s22/s11; // Polack-Ribiere double bet; if (!divide( s22-s12, s11, bet )) return; // gradient is zero //free_iter.reset(); //while (free_iter.next()) { // m=free_iter.value(); for (m = 0; m < num_vert; ++m) { pGrad[m]=(-fNewGrad[m]+(bet*pGrad[m])); fGrad[m]=fNewGrad[m]; } if(conjGradDebug>2){ MSQ_PRINT(2)(" \nSEARCH DIRECTION INFINITY NORM = %e", Linf(arrptr(fNewGrad),num_vert)); } }//end if on alp == 0 term_crit->accumulate_patch( pd, err ); MSQ_ERRRTN(err); term_crit->accumulate_inner( pd, f, arrptr(fGrad), err ); MSQ_ERRRTN(err); }//end while if(conjGradDebug>0){ MSQ_PRINT(2)("\nConjugate Gradient complete i=%i ",i); MSQ_PRINT(2)("\n- FINAL value = %f, alp=%4.2e grad_norm=%4.2e",f,alp,grad_norm); MSQ_PRINT(2)("\n FINAL TIME %f",c_timer.since_birth()); } }
double TetDihedralWeight::get_weight( PatchData& pd, size_t element, Sample , MsqError& err ) { const double eps = 1e-10; MsqMeshEntity &elem = pd.element_by_index( element ); if (elem.get_element_type() != TETRAHEDRON) { MSQ_SETERR(err)(MsqError::UNSUPPORTED_ELEMENT); return 0.0; } const size_t *indices = elem.get_vertex_index_array(); Vector3D v01, v02, v31, v32; if (refMesh) { const Mesh::VertexHandle* vtx_hdl = pd.get_vertex_handles_array(); Mesh::VertexHandle handles[] = { vtx_hdl[indices[0]], vtx_hdl[indices[1]], vtx_hdl[indices[2]], vtx_hdl[indices[3]] }; Vector3D coords[4]; refMesh->get_reference_vertex_coordinates( handles, 4, coords, err ); MSQ_ERRZERO(err); v01 = coords[1] - coords[0]; v02 = coords[2] - coords[0]; v31 = coords[1] - coords[3]; v32 = coords[2] - coords[3]; } else { const MsqVertex* coords = pd.get_vertex_array(); v01 = coords[indices[1]] - coords[indices[0]]; v02 = coords[indices[2]] - coords[indices[0]]; v31 = coords[indices[1]] - coords[indices[3]]; v32 = coords[indices[2]] - coords[indices[3]]; } Vector3D n012 = v02 * v01; Vector3D n013 = v31 * v01; Vector3D n023 = v02 * v32; Vector3D n123 = v31 * v32; // normalize face vectors. double l012 = n012.length(); double l013 = n013.length(); double l023 = n023.length(); double l123 = n123.length(); n012 *= (l012 < eps) ? 0.0 : 1.0/l012; n013 *= (l013 < eps) ? 0.0 : 1.0/l013; n023 *= (l023 < eps) ? 0.0 : 1.0/l023; n123 *= (l123 < eps) ? 0.0 : 1.0/l123; // calculate dihedral handles for each edge double ds[] = { da(n012 % n013), da(n012 % n123), da(n012 % n023), da(n013 % n023), da(n013 % n123), da(n023 % n123) }; // calculate weight from max dihedral handle double d = *std::max_element( ds, ds+6 ); return 1/(1 + exp(-mA*(d - mCutoff))); }
void ArrayMesh::release_entity_handles( const EntityHandle*, size_t, MsqError& err ) { MSQ_SETERR(err)(MsqError::NOT_IMPLEMENTED); }
static inline bool do_numerical_hessian( AWMetric* metric, MsqMatrix<Dim, Dim> A, const MsqMatrix<Dim, Dim>& W, double& value, MsqMatrix<Dim, Dim>& grad, MsqMatrix<Dim, Dim>* Hess, MsqError& err ) { // zero hessian data const int num_block = Dim * (Dim + 1) / 2; for (int i = 0; i < num_block; ++i) Hess[i].zero(); // evaluate gradient for input values bool valid = metric->evaluate_with_grad( A, W, value, grad, err ); if (MSQ_CHKERR(err) || !valid) return false; // do finite difference for each term of A const double INITAL_STEP = std::max( 1e-6, fabs(1e-14*value) ); double value2; MsqMatrix<Dim,Dim> grad2; for (unsigned r = 0; r < Dim; ++r) { // for each row of A for (unsigned c = 0; c < Dim; ++c) { // for each column of A const double in_val = A(r,c); double step; for (step = INITAL_STEP; step > std::numeric_limits<double>::epsilon(); step *= 0.1) { A(r,c) = in_val + step; valid = metric->evaluate_with_grad( A, W, value2, grad2, err ); MSQ_ERRZERO(err); if (valid) break; } // if no valid step size, try step in other direction if (!valid) { for (step = -INITAL_STEP; step < -std::numeric_limits<double>::epsilon(); step *= 0.1) { A(r,c) = in_val + step; valid = metric->evaluate_with_grad( A, W, value2, grad2, err ); MSQ_ERRZERO(err); if (valid) break; } // if still no valid step size, give up. if (!valid) { MSQ_SETERR(err)("No valid step size for finite difference of 2D target metric.", MsqError::INTERNAL_ERROR); return false; } } // restore A. A(r,c) = in_val; // add values into result matrix // values of grad2, in row-major order, are a single 9-value row of the Hessian grad2 -= grad; grad2 /= step; for (unsigned b = 0; b < r; ++b) { const int idx = Dim*b - b*(b+1)/2 + r; Hess[idx].add_column( c, transpose( grad2.row(b) ) ); } for (unsigned b = r; b < Dim; ++b) { const int idx = Dim*r - r*(r+1)/2 + b; Hess[idx].add_row( c, grad2.row(b) ); } } // for (c) } // for (r) // Values in non-diagonal blocks were added twice. for (unsigned r = 0, h = 1; r < Dim-1; ++r, ++h) for (unsigned c = r + 1; c < Dim; ++c, ++h) Hess[h] *= 0.5; return true; }
/*! \brief creates a sparse structure for a Hessian, based on the connectivity information contained in the PatchData. Only the upper triangular part of the Hessian is stored. */ void MsqHessian::initialize(PatchData &pd, MsqError &err) { MSQ_FUNCTION_TIMER( "MsqHession::initialize" ); delete[] mEntries; delete[] mRowStart; delete[] mColIndex; size_t num_vertices = pd.num_free_vertices(); size_t num_elements = pd.num_elements(); size_t const * vtx_list; size_t e, r, rs, re, c, cs, ce, nz, nnz, nve, i, j; MsqMeshEntity* patchElemArray = pd.get_element_array(err); MSQ_CHKERR(err); if (num_vertices == 0) { MSQ_SETERR( err )( "No vertices in PatchData", MsqError::INVALID_ARG); return; } mSize = num_vertices; // Calculate the offsets for a CSC representation of the accumulation // pattern. size_t* col_start = new size_t[num_vertices + 1]; //mAccumElemStart = new size_t[num_elements+1]; //mAccumElemStart[0] = 0; for (i = 0; i < num_vertices; ++i) { col_start[i] = 0; } for (e = 0; e < num_elements; ++e) { nve = patchElemArray[e].node_count(); vtx_list = patchElemArray[e].get_vertex_index_array(); int nfe = 0; for (i = 0; i < nve; ++i) { r = vtx_list[i]; if (r < num_vertices) ++nfe; for (j = i; j < nve; ++j) { c = vtx_list[j]; if (r <= c) { if (c < num_vertices) ++col_start[c]; } else { if (r < num_vertices) ++col_start[r]; } } } //mAccumElemStart[e+1] = mAccumElemStart[e] + (nfe+1)*nfe/2; } nz = 0; for (i = 0; i < num_vertices; ++i) { j = col_start[i]; col_start[i] = nz; nz += j; } col_start[i] = nz; // Finished putting matrix into CSC representation int* row_instr = new int[5*nz]; size_t* row_index = new size_t[nz]; nz = 0; for (e = 0; e < num_elements; ++e) { nve = patchElemArray[e].node_count(); vtx_list = patchElemArray[e].get_vertex_index_array(); for (i = 0; i < nve; ++i) { r = vtx_list[i]; for (j = i; j < nve; ++j) { c = vtx_list[j]; if (r <= c) { if (c < num_vertices) { row_index[col_start[c]] = r; row_instr[col_start[c]] = nz++; ++col_start[c]; } } else { if (r < num_vertices) { row_index[col_start[r]] = c; //can't use -nz, but can negate row_instr[col_start[r]] row_instr[col_start[r]] = nz++; row_instr[col_start[r]] = -row_instr[col_start[r]]; ++col_start[r]; } } } } } for (i = num_vertices-1; i > 0; --i) { col_start[i+1] = col_start[i]; } col_start[1] = col_start[0]; col_start[0] = 0; // cout << "col_start: "; // for (int t=0; t<num_vertices+1; ++t) // cout << col_start[t] << " "; // cout << endl; // cout << "row_index: "; // for (int t=0; t<nz; ++t) // cout << row_index[t] << " "; // cout << endl; // cout << "row_instr: "; // for (int t=0; t<nz; ++t) // cout << row_instr[t] << " "; // cout << endl; // Convert CSC to CSR // First calculate the offsets in the row size_t* row_start = new size_t[num_vertices + 1]; for (i = 0; i < num_vertices; ++i) { row_start[i] = 0; } for (i = 0; i < nz; ++i) { ++row_start[row_index[i]]; } nz = 0; for (i = 0; i < num_vertices; ++i) { j = row_start[i]; row_start[i] = nz; nz += j; } row_start[i] = nz; // Now calculate the pattern size_t* col_index = new size_t[nz]; int* col_instr = new int[nz]; for (i = 0; i < num_vertices; ++i) { cs = col_start[i]; ce = col_start[i+1]; while(cs < ce) { r = row_index[cs]; col_index[row_start[r]] = i; col_instr[row_start[r]] = row_instr[cs]; ++row_start[r]; ++cs; } } for (i = num_vertices-1; i > 0; --i) { row_start[i+1] = row_start[i]; } row_start[1] = row_start[0]; row_start[0] = 0; delete[] row_index; // Now that the matrix is CSR // Column indices for each row are sorted // Compaction -- count the number of nonzeros mRowStart = col_start; // don't need to reallocate //mAccumulation = row_instr; // don't need to reallocate delete [] row_instr; for (i = 0; i <= num_vertices; ++i) { mRowStart[i] = 0; } nnz = 0; for (i = 0; i < num_vertices; ++i) { rs = row_start[i]; re = row_start[i+1]; c = num_vertices; while(rs < re) { if (c != col_index[rs]) { // This is an unseen nonzero c = col_index[rs]; ++mRowStart[i]; ++nnz; } //if (col_instr[rs] >= 0) { // mAccumulation[col_instr[rs]] = nnz - 1; //} //else { // mAccumulation[-col_instr[rs]] = 1 - nnz; //} ++rs; } } nnz = 0; for (i = 0; i < num_vertices; ++i) { j = mRowStart[i]; mRowStart[i] = nnz; nnz += j; } mRowStart[i] = nnz; delete [] col_instr; // Fill in the compacted hessian matrix mColIndex = new size_t[nnz]; for (i = 0; i < num_vertices; ++i) { rs = row_start[i]; re = row_start[i+1]; c = num_vertices; while(rs < re) { if (c != col_index[rs]) { // This is an unseen nonzero c = col_index[rs]; mColIndex[mRowStart[i]] = c; mRowStart[i]++; } ++rs; } } for (i = num_vertices-1; i > 0; --i) { mRowStart[i+1] = mRowStart[i]; } mRowStart[1] = mRowStart[0]; mRowStart[0] = 0; delete [] row_start; delete [] col_index; mEntries = new Matrix3D[nnz]; // On Solaris, no initializer allowed for new of an array for (i=0;i<nnz;++i) mEntries[i] = 0.; // so we initialize all entries manually. //origin_pd = &pd; return; }
bool TQualityMetric::evaluate_with_Hessian( PatchData& pd, size_t handle, double& value, std::vector<size_t>& indices, std::vector<Vector3D>& grad, std::vector<Matrix3D>& Hessian, MsqError& err ) { const Sample s = ElemSampleQM::sample( handle ); const size_t e = ElemSampleQM:: elem( handle ); MsqMeshEntity& elem = pd.element_by_index( e ); EntityTopology type = elem.get_element_type(); unsigned edim = TopologyInfo::dimension( type ); size_t num_idx = 0; const NodeSet bits = pd.non_slave_node_set( e ); bool rval; if (edim == 3) { // 3x3 or 3x2 targets ? const MappingFunction3D* mf = pd.get_mapping_function_3D( type ); if (!mf) { MSQ_SETERR(err)( "No mapping function for element type", MsqError::UNSUPPORTED_ELEMENT ); return false; } MsqMatrix<3,3> A, W, dmdT, d2mdT2[6]; mf->jacobian( pd, e, bits, s, mIndices, mDerivs3D, num_idx, A, err ); MSQ_ERRZERO(err); targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err); const MsqMatrix<3,3> Winv = inverse(W); const MsqMatrix<3,3> T = A*Winv; rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err ); MSQ_ERRZERO(err); gradient<3>( num_idx, mDerivs3D, dmdT*transpose(Winv), grad ); second_deriv_wrt_product_factor( d2mdT2, Winv ); Hessian.resize( num_idx*(num_idx+1)/2 ); if (num_idx) hessian<3>( num_idx, mDerivs3D, d2mdT2, arrptr(Hessian) ); #ifdef PRINT_INFO print_info<3>( e, s, A, W, A * inverse(W) ); #endif } else if (edim == 2) { #ifdef NUMERICAL_2D_HESSIAN // return finite difference approximation for now return QualityMetric::evaluate_with_Hessian( pd, handle, value, indices, grad, Hessian, err ); #else MsqMatrix<2,2> W, A, dmdT, d2mdT2[3]; MsqMatrix<3,2> M; rval = evaluate_surface_common( pd, s, e, bits, mIndices, num_idx, mDerivs2D, W, A, M, err ); if (MSQ_CHKERR(err) || !rval) return false; const MsqMatrix<2,2> Winv = inverse(W); const MsqMatrix<2,2> T = A*Winv; rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err ); MSQ_ERRZERO(err); gradient<2>( num_idx, mDerivs2D, M * dmdT * transpose(Winv), grad ); // calculate 2D hessian second_deriv_wrt_product_factor( d2mdT2, Winv ); const size_t n = num_idx*(num_idx+1)/2; hess2d.resize(n); if (n) hessian<2>( num_idx, mDerivs2D, d2mdT2, arrptr(hess2d) ); // calculate surface hessian as transform of 2D hessian Hessian.resize(n); for (size_t i = 0; i < n; ++i) Hessian[i] = Matrix3D( (M * hess2d[i] * transpose(M)).data() ); #ifdef PRINT_INFO print_info<2>( e, s, J, Wp, A * inverse(W) ); #endif #endif } else { assert(0); return false; } // pass back index list indices.resize( num_idx ); std::copy( mIndices, mIndices+num_idx, indices.begin() ); // apply target weight to value if (!num_idx) weight( pd, s, e, num_idx, value, 0, 0, 0, err ); else weight( pd, s, e, num_idx, value, arrptr(grad), 0, arrptr(Hessian), err ); MSQ_ERRZERO(err); return rval; }
bool TQualityMetric::evaluate_with_Hessian_diagonal( PatchData& pd, size_t handle, double& value, std::vector<size_t>& indices, std::vector<Vector3D>& grad, std::vector<SymMatrix3D>& diagonal, MsqError& err ) { const Sample s = ElemSampleQM::sample( handle ); const size_t e = ElemSampleQM:: elem( handle ); MsqMeshEntity& elem = pd.element_by_index( e ); EntityTopology type = elem.get_element_type(); unsigned edim = TopologyInfo::dimension( type ); size_t num_idx = 0; const NodeSet bits = pd.non_slave_node_set( e ); bool rval; if (edim == 3) { // 3x3 or 3x2 targets ? const MappingFunction3D* mf = pd.get_mapping_function_3D( type ); if (!mf) { MSQ_SETERR(err)( "No mapping function for element type", MsqError::UNSUPPORTED_ELEMENT ); return false; } MsqMatrix<3,3> A, W, dmdT, d2mdT2[6]; mf->jacobian( pd, e, bits, s, mIndices, mDerivs3D, num_idx, A, err ); MSQ_ERRZERO(err); targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err); const MsqMatrix<3,3> Winv = inverse(W); const MsqMatrix<3,3> T = A*Winv; rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err ); MSQ_ERRZERO(err); gradient<3>( num_idx, mDerivs3D, dmdT * transpose(Winv), grad ); second_deriv_wrt_product_factor( d2mdT2, Winv ); diagonal.resize( num_idx ); hessian_diagonal<3>(num_idx, mDerivs3D, d2mdT2, arrptr(diagonal) ); #ifdef PRINT_INFO print_info<3>( e, s, A, W, A * inverse(W) ); #endif } else if (edim == 2) { #ifdef NUMERICAL_2D_HESSIAN // use finite diference approximation for now return QualityMetric::evaluate_with_Hessian_diagonal( pd, handle, value, indices, grad, diagonal, err ); #else MsqMatrix<2,2> W, A, dmdT, d2mdT2[3]; MsqMatrix<3,2> M; rval = evaluate_surface_common( pd, s, e, bits, mIndices, num_idx, mDerivs2D, W, A, M, err ); if (MSQ_CHKERR(err) || !rval) return false; const MsqMatrix<2,2> Winv = inverse(W); const MsqMatrix<2,2> T = A*Winv; rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err ); MSQ_ERRZERO(err); gradient<2>( num_idx, mDerivs2D, M * dmdT * transpose(Winv), grad ); second_deriv_wrt_product_factor( d2mdT2, Winv ); diagonal.resize( num_idx ); for (size_t i = 0; i < num_idx; ++i) { MsqMatrix<2,2> block2d; block2d(0,0) = transpose(mDerivs2D[i]) * d2mdT2[0] * mDerivs2D[i]; block2d(0,1) = transpose(mDerivs2D[i]) * d2mdT2[1] * mDerivs2D[i]; block2d(1,0) = block2d(0,1); block2d(1,1) = transpose(mDerivs2D[i]) * d2mdT2[2] * mDerivs2D[i]; MsqMatrix<3,2> p = M * block2d; SymMatrix3D& H = diagonal[i]; H[0] = p.row(0) * transpose(M.row(0)); H[1] = p.row(0) * transpose(M.row(1)); H[2] = p.row(0) * transpose(M.row(2)); H[3] = p.row(1) * transpose(M.row(1)); H[4] = p.row(1) * transpose(M.row(2)); H[5] = p.row(2) * transpose(M.row(2)); } #ifdef PRINT_INFO print_info<2>( e, s, J, Wp, A * inverse(W) ); #endif #endif } else { assert(0); return false; } // pass back index list indices.resize( num_idx ); std::copy( mIndices, mIndices+num_idx, indices.begin() ); // apply target weight to value if (!num_idx) weight( pd, s, e, num_idx, value, 0, 0, 0, err ); else weight( pd, s, e, num_idx, value, arrptr(grad), arrptr(diagonal), 0, err ); MSQ_ERRZERO(err); return rval; }
/*! Writes a VTK file directly from the MeshSet. This means that any mesh imported successfully into Mesquite can be outputed in VTK format. This is not geared for performance, since it has to load a global Patch from the mesh to write a mesh file. */ void MeshSet::write_vtk(const char* out_filename, Mesquite::MsqError &err) { // Open the file msq_stdio::ofstream file(out_filename); if (!file) { MSQ_SETERR(err)(MsqError::FILE_ACCESS); return; } // loads a global patch PatchData pd; PatchDataParameters pd_params; pd_params.set_patch_type(PatchData::GLOBAL_PATCH, err); MSQ_ERRRTN(err); pd_params.no_culling_method(); get_next_patch(pd, pd_params, err); MSQ_ERRRTN(err); // Write a header file << "# vtk DataFile Version 2.0\n"; file << "Mesquite Mesh " << out_filename << " .\n"; file << "ASCII\n"; file << "DATASET UNSTRUCTURED_GRID\n"; // Write vertex coordinates file << "POINTS " << pd.num_nodes() << " float\n"; size_t i; for (i = 0; i < pd.num_nodes(); i++) { file << pd.vertexArray[i][0] << ' ' << pd.vertexArray[i][1] << ' ' << pd.vertexArray[i][2] << '\n'; } // Write out the connectivity table size_t connectivity_size = 0; for (i = 0; i < pd.num_elements(); ++i) connectivity_size += pd.elementArray[i].node_count()+1; file << "CELLS " << pd.num_elements() << ' ' << connectivity_size << '\n'; for (i = 0; i < pd.num_elements(); i++) { std::vector<size_t> vtx_indices; pd.elementArray[i].get_node_indices(vtx_indices); file << vtx_indices.size(); for (msq_stdc::size_t j = 0; j < vtx_indices.size(); ++j) { file << ' ' << vtx_indices[j]; } file << '\n'; } // Write out the element types file << "CELL_TYPES " << pd.num_elements() << '\n'; for (i = 0; i < pd.num_elements(); i++) { unsigned char type_id = 0; switch (pd.elementArray[i].get_element_type()) { case Mesquite::TRIANGLE: type_id = 5; break; case Mesquite::QUADRILATERAL: type_id = 9; break; case Mesquite::TETRAHEDRON: type_id = 10; break; case Mesquite::HEXAHEDRON: type_id = 12; break; default: MSQ_SETERR(err)("element type not implemented",MsqError::NOT_IMPLEMENTED); break; } file << (int)type_id << '\n'; } // Write out which points are fixed. file << "POINT_DATA " << pd.num_nodes() << "\nSCALARS fixed float\nLOOKUP_TABLE default\n"; for (i = 0; i < pd.num_nodes(); ++i) { if (pd.vertexArray[i].is_free_vertex()) file << "0\n"; else file << "1\n"; } // Close the file file.close(); }
bool TQualityMetric::evaluate_with_gradient( PatchData& pd, size_t handle, double& value, std::vector<size_t>& indices, std::vector<Vector3D>& grad, MsqError& err ) { const Sample s = ElemSampleQM::sample( handle ); const size_t e = ElemSampleQM:: elem( handle ); MsqMeshEntity& elem = pd.element_by_index( e ); EntityTopology type = elem.get_element_type(); unsigned edim = TopologyInfo::dimension( type ); size_t num_idx = 0; const NodeSet bits = pd.non_slave_node_set( e ); bool rval; if (edim == 3) { // 3x3 or 3x2 targets ? const MappingFunction3D* mf = pd.get_mapping_function_3D( type ); if (!mf) { MSQ_SETERR(err)( "No mapping function for element type", MsqError::UNSUPPORTED_ELEMENT ); return false; } MsqMatrix<3,3> A, W, dmdT; mf->jacobian( pd, e, bits, s, mIndices, mDerivs3D, num_idx, A, err ); MSQ_ERRZERO(err); targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err); const MsqMatrix<3,3> Winv = inverse(W); const MsqMatrix<3,3> T = A*Winv; rval = targetMetric->evaluate_with_grad( T, value, dmdT, err ); MSQ_ERRZERO(err); gradient<3>( num_idx, mDerivs3D, dmdT * transpose(Winv), grad ); #ifdef PRINT_INFO print_info<3>( e, s, A, W, A * inverse(W) ); #endif } else if (edim == 2) { MsqMatrix<2,2> W, A, dmdT; MsqMatrix<3,2> S_a_transpose_Theta; rval = evaluate_surface_common( pd, s, e, bits, mIndices, num_idx, mDerivs2D, W, A, S_a_transpose_Theta, err ); if (MSQ_CHKERR(err) || !rval) return false; const MsqMatrix<2,2> Winv = inverse(W); const MsqMatrix<2,2> T = A*Winv; rval = targetMetric->evaluate_with_grad( T, value, dmdT, err ); MSQ_ERRZERO(err); gradient<2>( num_idx, mDerivs2D, S_a_transpose_Theta*dmdT*transpose(Winv), grad ); #ifdef PRINT_INFO print_info<2>( e, s, J, Wp, A * inverse(W) ); #endif } else { assert(false); return false; } // pass back index list indices.resize( num_idx ); std::copy( mIndices, mIndices+num_idx, indices.begin() ); // apply target weight to value weight( pd, s, e, num_idx, value, grad.empty() ? 0 : arrptr(grad), 0, 0, err ); MSQ_ERRZERO(err); return rval; }
// Currently, the only thing supported is updating each vertices // coordinates and flags. Connectivity changes aren't supported yet. void Mesquite::MeshSet::update_mesh(const PatchData &pd, MsqError &err) { MSQ_FUNCTION_TIMER( "MeshSet::update_mesh" ); if (pd.num_nodes() == 0) return; size_t i; switch (pd.type()) { // If the patch type is marked as local, // all handles belong to the currentMesh. case PatchData::ELEMENTS_ON_VERTEX_PATCH: // For each vertex, update the coordinates // and the "mesquite byte". for (i = 0; i < pd.num_nodes(); i++) { if(!pd.vertexArray[i].is_flag_set( MsqVertex::MSQ_HARD_FIXED)) { (*currentMesh)->vertex_set_coordinates(pd.vertexHandlesArray[i], pd.vertexArray[i], err); MSQ_ERRRTN(err); } (*currentMesh)->vertex_set_byte(pd.vertexHandlesArray[i], pd.vertexArray[i].vertexBitFlags, err); MSQ_ERRRTN(err); } break; // If the patch type is marked as global, // the handles may belong to more than // one Mesh. case PatchData::GLOBAL_PATCH: { list<Mesquite::Mesh*>::iterator mesh_itr = meshSet.begin(); assert( mesh_itr != meshSet.end() ); Mesquite::Mesh* cur_mesh = *mesh_itr; Mesquite::VertexIterator *vert_itr = cur_mesh->vertex_iterator(err); MSQ_ERRRTN(err); for (i = 0; i < pd.num_nodes(); i++) { if (vert_itr->is_at_end()) { mesh_itr++; if ( mesh_itr==meshSet.end() ) return; cur_mesh = *mesh_itr; delete vert_itr; vert_itr = cur_mesh->vertex_iterator(err); MSQ_ERRRTN(err); } if(!pd.vertexArray[i].is_flag_set( MsqVertex::MSQ_HARD_FIXED)) { cur_mesh->vertex_set_coordinates(pd.vertexHandlesArray[i], pd.vertexArray[i], err); MSQ_ERRRTN(err); } cur_mesh->vertex_set_byte(pd.vertexHandlesArray[i], pd.vertexArray[i].vertexBitFlags, err); MSQ_ERRRTN(err); } delete vert_itr; } break; default: { MSQ_SETERR(err)("PatchData Type not accepted yet.", MsqError::NOT_IMPLEMENTED); break; } } }
bool MeshSet::get_next_global_patch( PatchData& pd, PatchDataParameters& pd_params, MsqError& err ) { // We only support global patches for a single Mesh if (meshSet.size() != 1) { MSQ_SETERR(err)( "Global patches only supported for single-Mesh MeshSets.", MsqError::NOT_IMPLEMENTED ); return false; } pd.mType = PatchData::GLOBAL_PATCH; pd.domainHint = NO_DOMAIN_HINT; if (mDomain) pd.domainHint = mDomain->hint(); // for a global patch, we always reset to start of the mesh. reset(err); if (MSQ_CHKERR(err)) return false; size_t i; // Get sizes for mesh data size_t num_verts, num_elems, num_uses; (*currentMesh)->get_all_sizes( num_verts, num_elems, num_uses, err ); MSQ_ERRZERO(err); // Get handles and connectivity pd.vertexHandlesArray.resize( num_verts ); pd.elementHandlesArray.resize( num_elems ); pd.elemConnectivityArray.resize( num_uses ); msq_std::vector<size_t> offsets(num_elems+1); (*currentMesh)->get_all_mesh( &pd.vertexHandlesArray[0], num_verts, &pd.elementHandlesArray[0], num_elems, &offsets[0], offsets.size(), &pd.elemConnectivityArray[0], pd.elemConnectivityArray.size(), err ); MSQ_ERRZERO(err); // Get element topologies pd.elementArray.resize( num_elems ); msq_std::vector<EntityTopology> elem_topologies(num_elems); (*currentMesh)->elements_get_topologies( &pd.elementHandlesArray[0], &elem_topologies[0], num_elems, err );MSQ_ERRZERO(err); // Put them into the patch MsqMeshEntity* pd_elem_array = pd.get_element_array(err);MSQ_ERRZERO(err); for (i = 0; i < num_elems; ++i) pd_elem_array[i].set_element_type( elem_topologies[i] ); // Complete connectivity data in patch pd.initialize_data( &offsets[0], err ); MSQ_ERRZERO(err); // Get vertex coordinates pd.vertexArray.resize( num_verts ); MsqVertex* pd_vert_array = pd.get_vertex_array(err);MSQ_ERRZERO(err); (*currentMesh)->vertices_get_coordinates(&pd.vertexHandlesArray[0], pd_vert_array, num_verts, err); MSQ_ERRZERO(err); // Get vertex boundary flag if (vertArraySize < num_verts) { delete [] vertexOnBoundary; vertArraySize = num_verts; vertexOnBoundary = new bool[vertArraySize]; } (*currentMesh)->vertices_are_on_boundary( &pd.vertexHandlesArray[0], vertexOnBoundary, num_verts, err );MSQ_ERRZERO(err); for (i = 0; i < num_verts; i++) { // Get its flags /*(*currentMesh)->vertex_get_byte(vertArray[i], &(pd_vert_array[i].vertexBitFlags), err); MSQ_CHKERR(err);*/ // Set its hard-fixed flag if (/*(*currentMesh)->vertex_is_fixed(vertArray[i], err) ||*/ vertexOnBoundary[i]) { pd_vert_array[i].vertexBitFlags |= MsqVertex::MSQ_HARD_FIXED; } else { pd_vert_array[i].vertexBitFlags &= ~(MsqVertex::MSQ_HARD_FIXED); } } return true; }
bool MeshSet::get_next_elem_on_vert_patch( PatchData& pd, PatchDataParameters &pd_params, MsqError& err ) { size_t i; // Get the patch parameters. long unsigned int culling_method_bits = pd_params.get_culling_method_bits(); //variable to store the center vertex's fixed flag MsqVertex::FlagMask center_fixed_byte; // Make sure we're only getting a patch depth of 1 int num_layers = pd_params.get_nb_layers(err); if (MSQ_CHKERR(err)) return false; if ((unsigned)num_layers > (unsigned)1) { MSQ_SETERR(err)( "no implementation for patch depth > 1.", MsqError::NOT_IMPLEMENTED ); return false; } // Set the patch type pd.mType = PatchData::ELEMENTS_ON_VERTEX_PATCH; pd.domainHint = NO_DOMAIN_HINT; if (mDomain) pd.domainHint = mDomain->hint(); // If this is our first time through the mesh, // initialize everything. if (!vertexIterator) { reset(err); if (MSQ_CHKERR(err)) return false; } // currentVertex is pointing at next potential center vertex. // Move forward in the list of vertices if necessary. bool next_vertex_identified = false; while (!next_vertex_identified) { // Move to next mesh if necessary while (vertexIterator->is_at_end()) { delete vertexIterator; ++currentMesh; if (currentMesh == meshSet.end()) { vertexIterator = NULL; return false; } vertexIterator = (*currentMesh)->vertex_iterator(err); MSQ_CHKERR(err); } bool on_bnd = false; bool is_mid = false; Mesquite::Mesh::VertexHandle vtx = **vertexIterator; (*currentMesh)->vertices_are_on_boundary(&vtx, &on_bnd, 1, err); if (MSQ_CHKERR(err)) return false; //(*currentMesh)->vertices_are_midnodes(&vtx, &is_mid, 1, err); //if (MSQ_CHKERR(err)) return false; //always skip midnodes if (is_mid) { vertexIterator->operator++(); } //if this is a 'boundary' fixed flag, skip it now else if ((culling_method_bits & PatchData::NO_BOUNDARY_VTX) && (on_bnd==true)) { ++(*vertexIterator); } else if ((culling_method_bits & PatchData::NO_INTERIOR_VTX) && (on_bnd==false)) { ++(*vertexIterator); } //otherwise we check to see if this vertex has been culled else{ //get the fixed_bit_flag for the center vertex (*currentMesh)->vertex_get_byte(**vertexIterator,¢er_fixed_byte, err); if (MSQ_CHKERR(err)) return false; //remove the hard fixed flag if it has been set center_fixed_byte &= ~(MsqVertex::MSQ_HARD_FIXED); //if it is culled, skip it if(center_fixed_byte & cullFlag) { ++(*vertexIterator); } else { // We found the right one next_vertex_identified = true; }//end else (vertex was not fixed [boundary] or culled) }//end else (iterator was not at the end and vertex was not boundary) }//end while (!next_vertex_identified) Mesh::VertexHandle vertex = **vertexIterator; vertexIterator->operator++(); if(num_layers == 0 ){ pd.vertexArray.resize( 1 ); MsqVertex* pd_vert_array = pd.get_vertex_array(err); (*currentMesh)->vertices_get_coordinates(&vertex, pd_vert_array, 1, err); MSQ_ERRZERO(err); pd_vert_array[0].vertexBitFlags=center_fixed_byte; pd.vertexHandlesArray.resize(1); pd.vertexHandlesArray[0]=vertex; pd.initialize_data( NULL, err ); MSQ_ERRZERO(err); return true; } // Get the number of elements in this vertex size_t num_elems = (*currentMesh)->vertex_get_attached_element_count(vertex, err); if (MSQ_CHKERR(err)) return false; pd.elementHandlesArray.resize( num_elems ); // Get the elements attached to this vertex if (elemArraySize < num_elems) { delete [] elemTopologies; elemTopologies = new EntityTopology[num_elems]; elemArraySize = num_elems; } (*currentMesh)->vertex_get_attached_elements(vertex, &pd.elementHandlesArray[0], num_elems, err); if (MSQ_CHKERR(err)) return false; // Get the topologies of those elements (*currentMesh)->elements_get_topologies(&pd.elementHandlesArray[0], elemTopologies, num_elems, err); if (MSQ_CHKERR(err)) return false; // Figure out how many vertices we need to allocate //size_t num_vert_uses = 1; //size_t i; //for (i = 0; i < num_elems; ++i) // num_vert_uses += vertices_in_topology(elemTopologies[i]); size_t num_vert_uses = (*currentMesh)-> get_vertex_use_count( &pd.elementHandlesArray[0], num_elems, err ); MSQ_ERRZERO(err); // All elems share at least 1 vertex (the center vertex). The // center vertex is used 1 time, but it was counted num_elems times. size_t num_verts = num_vert_uses - num_elems + 1; pd.vertexHandlesArray.resize( num_verts ); pd.elementArray.resize( num_elems ); pd.elemConnectivityArray.resize( num_vert_uses ); // Get the vertices attached to those elements if (csrOffsetsSize < num_elems + 1) { delete [] csrOffsets; csrOffsets = new size_t[num_elems + 1]; csrOffsetsSize = num_elems + 1; } (*currentMesh)->elements_get_attached_vertices(&pd.elementHandlesArray[0], num_elems, &pd.vertexHandlesArray[0], num_verts, &pd.elemConnectivityArray[0], num_vert_uses, csrOffsets, err); if (MSQ_CHKERR(err)) return false; // Update with actual vertex count pd.vertexHandlesArray.resize( num_verts ); // Put the elements into the PatchData MsqMeshEntity* pd_elem_array = pd.get_element_array(err); for (i = 0; i < num_elems; ++i) pd_elem_array[i].set_element_type( elemTopologies[i] ); pd.initialize_data( csrOffsets, err ); MSQ_ERRZERO(err); // Get the coordinates of the vertices and its flags. pd.vertexArray.resize( num_verts ); MsqVertex* pd_vert_array = pd.get_vertex_array(err); //get the coordinates (*currentMesh)->vertices_get_coordinates(&pd.vertexHandlesArray[0], pd_vert_array, num_verts, err); if (MSQ_CHKERR(err)) return false; for (i = 0; i < num_verts; i++) { // If it's not the center vertex, mark it as hard fixed if (pd.vertexHandlesArray[i] != vertex) { // Get its flags (*currentMesh)->vertex_get_byte(pd.vertexHandlesArray[i], &(pd_vert_array[i].vertexBitFlags), err); if (MSQ_CHKERR(err)) return false; pd_vert_array[i].vertexBitFlags |= MsqVertex::MSQ_HARD_FIXED; } //else it is the center vertex. We therefore already have //the fixed flag stored center_fixed_byte. The hard fixed //flag has already been removed (when flag was retreived). else{ pd_vert_array[i].vertexBitFlags = (center_fixed_byte); } } return true; }
/*!Reset function using using a PatchData object. This function is called for the inner-stopping criterion directly from the loop over mesh function in VertexMover. For outer criterion, it is called from the reset function which takes a MeshSet object. This function prepares the object to be used by setting the initial values of some of the data members. As examples, if needed, it resets the cpu timer to zero, the iteration counter to zero, and the initial and previous objective function values to the current objective function value for this patch. The return value for this function is similar to that of terminate(). The function returns false if the checked criteria have not been satisfied, and true if they have been. reset() only checks the GRADIENT_INF_NORM_ABSOLUTE, GRADIENT_L2_NORM_ABSOLUTE, and the QUALITY_IMPROVEMENT_ABSOLUTE criteria. Checking these criteria allows the QualityImprover to skip the entire optimization if the initial mesh satisfies the appropriate conditions. */ void TerminationCriterion::reset_inner(PatchData &pd, OFEvaluator& obj_eval, MsqError &err) { const unsigned long totalFlag = terminationCriterionFlag | cullingMethodFlag; // clear flag for BOUNDED_VERTEX_MOVEMENT vertexMovementExceedsBound = 0; // Use -1 to denote that this isn't initialized yet. // As all valid values must be >= 0.0, a negative // value indicates that it is uninitialized and is // always less than any valid value. maxSquaredMovement = -1; // Clear the iteration count. iterationCounter = 0; //reset the inner timer if needed if(totalFlag & CPU_TIME){ mTimer.reset(); } //GRADIENT currentGradInfNorm = initialGradInfNorm = 0.0; currentGradL2NormSquared = initialGradL2NormSquared = 0.0; if(totalFlag & GRAD_FLAGS) { if (!obj_eval.have_objective_function()) { MSQ_SETERR(err)("Error termination criteria set which uses objective " "functions, but no objective function is available.", MsqError::INVALID_STATE); return; } int num_vertices=pd.num_free_vertices(); mGrad.resize( num_vertices ); //get gradient and make sure it is valid bool b = obj_eval.evaluate(pd, currentOFValue, mGrad, err); MSQ_ERRRTN(err); if (!b) { MSQ_SETERR(err)("Initial patch is invalid for gradient computation.", MsqError::INVALID_STATE); return; } //get the gradient norms if (totalFlag & (GRADIENT_INF_NORM_ABSOLUTE|GRADIENT_INF_NORM_RELATIVE)) { currentGradInfNorm = initialGradInfNorm = Linf(mGrad); MSQ_DBGOUT_P0_ONLY(debugLevel) << par_string() << " o Initial gradient Inf norm: " << " " << RPM(initialGradInfNorm) << std::endl; } if (totalFlag & (GRADIENT_L2_NORM_ABSOLUTE|GRADIENT_L2_NORM_RELATIVE)) { currentGradL2NormSquared = initialGradL2NormSquared = length_squared(mGrad); MSQ_DBGOUT_P0_ONLY(debugLevel) << par_string() << " o Initial gradient L2 norm: " << " " << RPM(std::sqrt(initialGradL2NormSquared)) << std::endl; } //the OFvalue comes for free, so save it previousOFValue=currentOFValue; initialOFValue=currentOFValue; } //find the initial objective function value if needed and not already //computed. If we needed the gradient, we have the OF value for free. // Also, if possible, get initial OF value if writing plot file. Solvers // often supply the OF value for subsequent iterations so by calculating // the initial value we can generate OF value plots. else if ((totalFlag & OF_FLAGS) || (plotFile.is_open() && pd.num_free_vertices() && obj_eval.have_objective_function())) { //ensure the obj_ptr is not null if(!obj_eval.have_objective_function()){ MSQ_SETERR(err)("Error termination criteria set which uses objective " "functions, but no objective function is available.", MsqError::INVALID_STATE); return; } bool b = obj_eval.evaluate(pd, currentOFValue, err); MSQ_ERRRTN(err); if (!b){ MSQ_SETERR(err)("Initial patch is invalid for evaluation.",MsqError::INVALID_STATE); return; } //std::cout<<"\nReseting initial of value = "<<initialOFValue; previousOFValue=currentOFValue; initialOFValue=currentOFValue; } if (totalFlag & (GRAD_FLAGS|OF_FLAGS)) MSQ_DBGOUT_P0_ONLY(debugLevel) << par_string() << " o Initial OF value: " << " " << RPM(initialOFValue) << std::endl; // Store current vertex locations now, because we'll // need them later to compare the current movement with. if (totalFlag & VERTEX_MOVEMENT_RELATIVE) { if (initialVerticesMemento) { pd.recreate_vertices_memento( initialVerticesMemento, err ); } else { initialVerticesMemento = pd.create_vertices_memento( err ); } MSQ_ERRRTN(err); maxSquaredInitialMovement = DBL_MAX; } else { maxSquaredInitialMovement = 0; } if (terminationCriterionFlag & UNTANGLED_MESH) { globalInvertedCount = count_inverted( pd, err ); //if (innerOuterType==TYPE_OUTER) MSQ_DBGOUT_P0_ONLY(debugLevel) << par_string() << " o Num Inverted: " << " " << globalInvertedCount << std::endl; patchInvertedCount = 0; MSQ_ERRRTN(err); } if (timeStepFileType) { // If didn't already calculate gradient abive, calculate it now. if (!(totalFlag & GRAD_FLAGS)) { mGrad.resize( pd.num_free_vertices() ); obj_eval.evaluate(pd, currentOFValue, mGrad, err); err.clear(); } write_timestep( pd, mGrad.empty() ? 0 : arrptr(mGrad), err); } if (plotFile.is_open()) { // two newlines so GNU plot knows that we are starting a new data set plotFile << std::endl << std::endl; // write column headings as comment in data file plotFile << "#Iter\tCPU\tObjFunc\tGradL2\tGradInf\tMovement\tInverted" << std::endl; // write initial values plotFile << 0 << '\t' << mTimer.since_birth() << '\t' << initialOFValue << '\t' << std::sqrt( currentGradL2NormSquared ) << '\t' << currentGradInfNorm << '\t' << 0.0 << '\t' << globalInvertedCount << std::endl; } }
/*! average_metrics takes an array of length num_value and averages the contents using averaging method 'method'. */ inline double QualityMetric::average_metrics(const double metric_values[], const int& num_values, MsqError &err) { //MSQ_MAX needs to be made global? //double MSQ_MAX=1e10; double total_value=0.0; double temp_value=0.0; int i=0; int j=0; //if no values, return zero if (num_values<=0){ return 0.0; } switch(avgMethod){ case GEOMETRIC: total_value=1.0; for (i=0;i<num_values;++i){ total_value*=(metric_values[i]); } total_value=pow(total_value, (1/((double) num_values))); break; case HARMONIC: //ensure no divide by zero, return zero for (i=0;i<num_values;++i){ if(metric_values[i]<MSQ_MIN){ if(metric_values[i]>MSQ_MIN){ return 0.0; } } total_value+=(1/metric_values[i]); } //ensure no divide by zero, return MSQ_MAX_CAP if(total_value<MSQ_MIN){ if(total_value>MSQ_MIN){ return MSQ_MAX_CAP; } } total_value=num_values/total_value; break; case LINEAR: for (i=0;i<num_values;++i){ total_value+=metric_values[i]; } total_value/= (double) num_values; break; case MAXIMUM: total_value = metric_values[0]; for (i = 1; i < num_values; ++i){ if (metric_values[i] > total_value){ total_value = metric_values[i]; } } break; case MINIMUM: total_value = metric_values[0]; for (i = 1; i < num_values; ++i){ if (metric_values[i] < total_value) { total_value = metric_values[i]; } } break; case NONE: MSQ_SETERR(err)("Averaging method set to NONE", MsqError::INVALID_ARG); break; case RMS: for (i=0;i<num_values;++i){ total_value+=(metric_values[i]*metric_values[i]); } total_value/= (double) num_values; total_value=sqrt(total_value); break; case HMS: //ensure no divide by zero, return zero for (i=0;i<num_values;++i){ if (metric_values[i]*metric_values[i] < MSQ_MIN) { return 0.0; } total_value += (1.0/(metric_values[i]*metric_values[i])); } //ensure no divide by zero, return MSQ_MAX_CAP if (total_value < MSQ_MIN) { return MSQ_MAX_CAP; } total_value = sqrt(num_values/total_value); break; case STANDARD_DEVIATION: total_value=0; temp_value=0; for (i=0;i<num_values;++i){ temp_value+=metric_values[i]; total_value+=(metric_values[i]*metric_values[i]); } temp_value/= (double) num_values; temp_value*=temp_value; total_value/= (double) num_values; total_value-=temp_value; break; case SUM: for (i=0;i<num_values;++i){ total_value+=metric_values[i]; } break; case SUM_SQUARED: for (i=0;i<num_values;++i){ total_value+= (metric_values[i]*metric_values[i]); } break; case MAX_MINUS_MIN: //total_value used to store the maximum //temp_value used to store the minimum temp_value=MSQ_MAX_CAP; for (i=0;i<num_values;++i){ if(metric_values[i]<temp_value){ temp_value=metric_values[i]; } if(metric_values[i]>total_value){ total_value=metric_values[i]; } } //ensure no divide by zero, return MSQ_MAX_CAP if (temp_value < MSQ_MIN) { return MSQ_MAX_CAP; } total_value-=temp_value; break; case MAX_OVER_MIN: //total_value used to store the maximum //temp_value used to store the minimum temp_value=MSQ_MAX_CAP; for (i=0;i<num_values;++i){ if(metric_values[i]<temp_value){ temp_value=metric_values[i]; } if(metric_values[i]>total_value){ total_value=metric_values[i]; } } //ensure no divide by zero, return MSQ_MAX_CAP if (temp_value < MSQ_MIN) { return MSQ_MAX_CAP; } total_value/=temp_value; break; case SUM_OF_RATIOS_SQUARED: for (j=0;j<num_values;++j){ //ensure no divide by zero, return MSQ_MAX_CAP if (metric_values[j] < MSQ_MIN) { return MSQ_MAX_CAP; } for (i=0;i<num_values;++i){ total_value+=((metric_values[i]/metric_values[j])* (metric_values[i]/metric_values[j])); } } total_value/=(double)(num_values*num_values); break; default: //Return error saying Averaging Method mode not implemented MSQ_SETERR(err)("Requested Averaging Method Not Implemented", MsqError::NOT_IMPLEMENTED); return 0; } return total_value; }
/*!This function checks the culling method criterion supplied to the object by the user. If the user does not supply a culling method criterion, the default criterion is NONE, and in that case, no culling is performed. If the culling method criterion is satisfied, the interior vertices of the given patch are flagged as soft_fixed. Otherwise, the soft_fixed flag is removed from each of the vertices in the patch (interior and boundary vertices). Also, if the criterion was satisfied, then the function returns true. Otherwise, the function returns false. */ bool TerminationCriterion::cull_vertices(PatchData &pd, OFEvaluator& of_eval, MsqError &err) { //PRINT_INFO("CULLING_METHOD FLAG = %i",cullingMethodFlag); //cull_bool will be changed to true if the criterion is satisfied bool b, cull_bool=false; double prev_m, init_m; switch(cullingMethodFlag){ //if no culling is requested, always return false case NONE: return cull_bool; //if culling on quality improvement absolute case QUALITY_IMPROVEMENT_ABSOLUTE: //get objective function value b = of_eval.evaluate(pd, currentOFValue, err); if (MSQ_CHKERR(err)) return false; if (!b) { MSQ_SETERR(err)(MsqError::INVALID_MESH); return false; } //if the improvement was enough, cull if(currentOFValue <= cullingEps) { cull_bool=true; } //PRINT_INFO("\ncurrentOFValue = %f, bool = %i\n",currentOFValue,cull_bool); break; //if culing on quality improvement relative case QUALITY_IMPROVEMENT_RELATIVE: //get objective function value b = of_eval.evaluate(pd, currentOFValue, err); if (MSQ_CHKERR(err)) return false; if(!b){ MSQ_SETERR(err)(MsqError::INVALID_MESH); return false; } //if the improvement was enough, cull if((currentOFValue-lowerOFBound)<= (cullingEps*(initialOFValue-lowerOFBound))) { cull_bool=true; } break; //if culling on vertex movement absolute case VERTEX_MOVEMENT_ABSOLUTE: case VERTEX_MOVEMENT_ABS_EDGE_LENGTH: //if movement was enough, cull prev_m = pd.get_max_vertex_movement_squared(previousVerticesMemento,err); MSQ_ERRZERO(err); if(prev_m <= cullingEps*cullingEps){ cull_bool=true; } break; //if culling on vertex movement relative case VERTEX_MOVEMENT_RELATIVE: //if movement was small enough, cull prev_m = pd.get_max_vertex_movement_squared(previousVerticesMemento,err); MSQ_ERRZERO(err); init_m = pd.get_max_vertex_movement_squared(initialVerticesMemento,err); MSQ_ERRZERO(err); if(prev_m <= (cullingEps*cullingEps * init_m)){ cull_bool=true; } break; case UNTANGLED_MESH: if (!patchInvertedCount) cull_bool = true; break; default: MSQ_SETERR(err)("Requested culling method not yet implemented.", MsqError::NOT_IMPLEMENTED); return false; }; //Now actually have patch data cull vertices if(cull_bool) { pd.set_free_vertices_soft_fixed(err); MSQ_ERRZERO(err); } else { pd.set_all_vertices_soft_free(err); MSQ_ERRZERO(err); } return cull_bool; }
/*! \brief Returns an array of all vertices in the PatchData. */ inline const MsqVertex* PatchData::get_vertex_array(MsqError &err) const { if (vertexArray.empty()) MSQ_SETERR(err)( "No vertex array defined", MsqError::INVALID_STATE ); return arrptr(vertexArray); }
bool AWMetric3D::evaluate( const MsqMatrix<2,2>&, const MsqMatrix<2,2>&, double&, MsqError& err ) { MSQ_SETERR(err)("2D target metric cannot be evaluated for volume elements", MsqError::UNSUPPORTED_ELEMENT); return false; }
inline MsqMeshEntity* PatchData::get_element_array(MsqError &err) { if (elementArray.empty()) MSQ_SETERR(err)( "No element array defined", MsqError::INVALID_STATE ); return arrptr(elementArray); }
void TrustRegion::optimize_vertex_positions( PatchData& pd, MsqError& err ) { TerminationCriterion& term = *get_inner_termination_criterion(); OFEvaluator& func = get_objective_function_evaluator(); const double cg_tol = 1e-2; const double eta_1 = 0.01; const double eta_2 = 0.90; const double tr_incr = 10; const double tr_decr_def = 0.25; const double tr_decr_undef = 0.25; const double tr_num_tol = 1e-6; const int max_cg_iter = 10000; double radius = 1000; /* delta*delta */ const int nn = pd.num_free_vertices(); wVect.resize(nn); Vector3D* w = arrptr(wVect); zVect.resize(nn); Vector3D* z = arrptr(zVect); dVect.resize(nn); Vector3D* d = arrptr(dVect); pVect.resize(nn); Vector3D* p = arrptr(pVect); rVect.resize(nn); Vector3D* r = arrptr(rVect); double norm_r, norm_g; double alpha, beta, kappa; double rz, rzm1; double dMp, norm_d, norm_dp1, norm_p; double obj, objn; int cg_iter; bool valid; mHess.initialize( pd, err ); //hMesh(mesh); valid = func.update( pd, obj, mGrad, mHess, err ); MSQ_ERRRTN(err); if (!valid) { MSQ_SETERR(err)("Initial objective function is not valid", MsqError::INVALID_MESH); return; } compute_preconditioner( err ); MSQ_ERRRTN(err); pd.recreate_vertices_memento( mMemento, err ); MSQ_ERRRTN(err); while (!term.terminate() && (radius > 1e-20)) { norm_r = length_squared(arrptr(mGrad), nn); norm_g = sqrt(norm_r); memset(d, 0, 3*sizeof(double)*nn); memcpy(r, arrptr(mGrad), nn*sizeof(Vector3D)); //memcpy(r, mesh->g, 3*sizeof(double)*nn); norm_g *= cg_tol; apply_preconditioner( z, r, err); MSQ_ERRRTN(err); //prec->apply(z, r, prec, mesh); negate(p, z, nn); rz = inner(r, z, nn); dMp = 0; norm_p = rz; norm_d = 0; cg_iter = 0; while ((sqrt(norm_r) > norm_g) && #ifdef DO_STEEP_DESC (norm_d > tr_num_tol) && #endif (cg_iter < max_cg_iter)) { ++cg_iter; memset(w, 0, 3*sizeof(double)*nn); //matmul(w, mHess, p); //matmul(w, mesh, p); mHess.product( w, p ); kappa = inner(p, w, nn); if (kappa <= 0.0) { alpha = (sqrt(dMp*dMp+norm_p*(radius-norm_d))-dMp)/norm_p; plus_eq_scaled( d, alpha, p, nn ); break; } alpha = rz / kappa; norm_dp1 = norm_d + 2.0*alpha*dMp + alpha*alpha*norm_p; if (norm_dp1 >= radius) { alpha = (sqrt(dMp*dMp+norm_p*(radius-norm_d))-dMp)/norm_p; plus_eq_scaled( d, alpha, p, nn ); break; } plus_eq_scaled( d, alpha, p, nn ); plus_eq_scaled( r, alpha, w, nn ); norm_r = length_squared(r, nn); apply_preconditioner( z, r, err); MSQ_ERRRTN(err); //prec->apply(z, r, prec, mesh); rzm1 = rz; rz = inner(r, z, nn); beta = rz / rzm1; times_eq_minus( p, beta, z, nn ); dMp = beta*(dMp + alpha*norm_p); norm_p = rz + beta*beta*norm_p; norm_d = norm_dp1; } #ifdef DO_STEEP_DESC if (norm_d <= tr_num_tol) { norm_g = length(arrptr(mGrad), nn); double ll = 1.0; if (norm_g < tr_num_tol) break; if (norm_g > radius) ll = radius / nurm_g; for (int i = 0; i < nn; ++i) d[i] = ll * mGrad[i]; } #endif alpha = inner( arrptr(mGrad), d, nn ); // inner(mesh->g, d, nn); memset(p, 0, 3*sizeof(double)*nn); //matmul(p, mHess, d); //matmul(p, mesh, d); mHess.product( p, d ); beta = 0.5*inner(p, d, nn); kappa = alpha + beta; /* Put the new point into the locations */ pd.move_free_vertices_constrained( d, nn, 1.0, err ); MSQ_ERRRTN(err); valid = func.evaluate( pd, objn, err ); MSQ_ERRRTN(err); if (!valid) { /* Function not defined at trial point */ radius *= tr_decr_undef; pd.set_to_vertices_memento( mMemento, err ); MSQ_ERRRTN(err); continue; } if ((fabs(kappa) <= tr_num_tol) && (fabs(objn - obj) <= tr_num_tol)) { kappa = 1; } else { kappa = (objn - obj) / kappa; } if (kappa < eta_1) { /* Iterate is unacceptable */ radius *= tr_decr_def; pd.set_to_vertices_memento( mMemento, err ); MSQ_ERRRTN(err); continue; } /* Iterate is acceptable */ if (kappa >= eta_2) { /* Iterate is a very good step, increase radius */ radius *= tr_incr; if (radius > 1e20) { radius = 1e20; } } func.update( pd, obj, mGrad, mHess, err ); compute_preconditioner( err ); MSQ_ERRRTN(err); pd.recreate_vertices_memento( mMemento, err ); MSQ_ERRRTN(err); // checks stopping criterion term.accumulate_patch( pd, err ); MSQ_ERRRTN(err); term.accumulate_inner( pd, objn, arrptr(mGrad), err ); MSQ_ERRRTN(err); } }
bool QualityMetric::evaluate_with_gradient( PatchData& pd, size_t handle, double& value, std::vector<size_t>& indices, std::vector<Vector3D>& gradient, MsqError& err ) { indices.clear(); bool valid = evaluate_with_indices( pd, handle, value, indices, err); if (MSQ_CHKERR(err) || !valid) return false; if (indices.empty()) return true; // get initial pertubation amount double delta_C = finiteDiffEps; if (!haveFiniteDiffEps) { delta_C = get_delta_C( pd, indices, err ); MSQ_ERRZERO(err); if (keepFiniteDiffEps) { finiteDiffEps = delta_C; haveFiniteDiffEps = true; } } const double delta_inv_C = 1.0/delta_C; const int reduction_limit = 15; gradient.resize( indices.size() ); for (size_t v=0; v<indices.size(); ++v) { const Vector3D pos = pd.vertex_by_index(indices[v]); /* gradient in the x, y, z direction */ for (int j=0;j<3;++j) { double delta = delta_C; double delta_inv = delta_inv_C; double metric_value; Vector3D delta_v( 0, 0, 0 ); //perturb the node and calculate gradient. The while loop is a //safety net to make sure the epsilon perturbation does not take //the element out of the feasible region. int counter = 0; for (;;) { // perturb the coordinates of the free vertex in the j direction // by delta delta_v[j] = delta; pd.set_vertex_coordinates( pos+delta_v, indices[v], err ); MSQ_ERRZERO(err); //compute the function at the perturbed point location valid = evaluate( pd, handle, metric_value, err); if (!MSQ_CHKERR(err) && valid) break; if (++counter >= reduction_limit) { MSQ_SETERR(err)("Perturbing vertex by delta caused an inverted element.", MsqError::INTERNAL_ERROR); return false; } delta*=0.1; delta_inv*=10.; } // put the coordinates back where they belong pd.set_vertex_coordinates( pos, indices[v], err ); // compute the numerical gradient gradient[v][j] = (metric_value - value) * delta_inv; } // for(j) } // for(indices) return true; }
bool AffineMapMetric::evaluate( PatchData& pd, size_t handle, double& value, MsqError& err ) { Sample s = ElemSampleQM::sample( handle ); size_t e = ElemSampleQM:: elem( handle ); MsqMeshEntity& elem = pd.element_by_index( e ); EntityTopology type = elem.get_element_type(); unsigned edim = TopologyInfo::dimension( type ); const size_t* conn = elem.get_vertex_index_array(); // This metric only supports sampling at corners, except for simplices. // If element is a simpex, then the Jacobian is constant over a linear // element. In this case, always evaluate at any vertex. //unsigned corner = s.number; if (s.dimension != 0) { if (type == TRIANGLE || type == TETRAHEDRON) /*corner = 0*/; else { MSQ_SETERR(err)("Invalid sample point for AffineMapMetric", MsqError::UNSUPPORTED_ELEMENT ); return false; } } bool rval; if (edim == 3) { // 3x3 or 3x2 targets ? Vector3D c[3] = { Vector3D(0,0,0), Vector3D(0,0,0), Vector3D(0,0,0) }; unsigned n; const unsigned* adj = TopologyInfo::adjacent_vertices( type, s.number, n ); c[0] = pd.vertex_by_index( conn[adj[0]] ) - pd.vertex_by_index( conn[s.number] ); c[1] = pd.vertex_by_index( conn[adj[1]] ) - pd.vertex_by_index( conn[s.number] ); c[2] = pd.vertex_by_index( conn[adj[2]] ) - pd.vertex_by_index( conn[s.number] ); MsqMatrix<3,3> A; A.set_column( 0, MsqMatrix<3,1>(c[0].to_array()) ); A.set_column( 1, MsqMatrix<3,1>(c[1].to_array()) ); A.set_column( 2, MsqMatrix<3,1>(c[2].to_array()) ); if (type == TETRAHEDRON) A = A * TET_XFORM; MsqMatrix<3,3> W; targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err); rval = targetMetric->evaluate( A * inverse(W), value, err ); MSQ_ERRZERO(err); } else { Vector3D c[2] = { Vector3D(0,0,0), Vector3D(0,0,0) }; unsigned n; const unsigned* adj = TopologyInfo::adjacent_vertices( type, s.number, n ); c[0] = pd.vertex_by_index( conn[adj[0]] ) - pd.vertex_by_index( conn[s.number] ); c[1] = pd.vertex_by_index( conn[adj[1]] ) - pd.vertex_by_index( conn[s.number] ); MsqMatrix<3,2> App; App.set_column( 0, MsqMatrix<3,1>(c[0].to_array()) ); App.set_column( 1, MsqMatrix<3,1>(c[1].to_array()) ); MsqMatrix<3,2> Wp; targetCalc->get_surface_target( pd, e, s, Wp, err ); MSQ_ERRZERO(err); MsqMatrix<2,2> A, W; MsqMatrix<3,2> RZ; surface_to_2d( App, Wp, W, RZ ); A = transpose(RZ) * App; if (type == TRIANGLE) A = A * TRI_XFORM; rval = targetMetric->evaluate( A*inverse(W), value, err ); MSQ_ERRZERO(err); } // apply target weight to value if (weightCalc) { double ck = weightCalc->get_weight( pd, e, s, err ); MSQ_ERRZERO(err); value *= ck; } return rval; }
bool QualityMetric::evaluate_with_Hessian( PatchData& pd, size_t handle, double& value, std::vector<size_t>& indices, std::vector<Vector3D>& gradient, std::vector<Matrix3D>& Hessian, MsqError& err ) { indices.clear(); gradient.clear(); keepFiniteDiffEps = true; bool valid = evaluate_with_gradient( pd, handle, value, indices, gradient, err ); keepFiniteDiffEps = false; if (MSQ_CHKERR(err) || !valid) { haveFiniteDiffEps = false; return false; } if (indices.empty()){ haveFiniteDiffEps = false; return true; } // get initial pertubation amount double delta_C; if (haveFiniteDiffEps) { delta_C = finiteDiffEps; } else { delta_C = get_delta_C( pd, indices, err ); MSQ_ERRZERO(err); } assert(delta_C < 1e30); const double delta_inv_C = 1.0/delta_C; const int reduction_limit = 15; std::vector<Vector3D> temp_gradient( indices.size() ); const int num_hess = indices.size() * (indices.size() + 1) / 2; Hessian.resize( num_hess ); for (unsigned v = 0; v < indices.size(); ++v) { const Vector3D pos = pd.vertex_by_index(indices[v]); for (int j = 0; j < 3; ++j ) { // x, y, and z double delta = delta_C; double delta_inv = delta_inv_C; double metric_value; Vector3D delta_v(0,0,0); // find finite difference for gradient int counter = 0; for (;;) { delta_v[j] = delta; pd.set_vertex_coordinates( pos+delta_v, indices[v], err ); MSQ_ERRZERO(err); valid = evaluate_with_gradient( pd, handle, metric_value, indices, temp_gradient, err ); if (!MSQ_CHKERR(err) && valid) break; if (++counter >= reduction_limit) { MSQ_SETERR(err)("Algorithm did not successfully compute element's " "Hessian.\n",MsqError::INTERNAL_ERROR); haveFiniteDiffEps = false; return false; } delta *= 0.1; delta_inv *= 10.0; } pd.set_vertex_coordinates( pos, indices[v], err ); MSQ_ERRZERO(err); //compute the numerical Hessian for (unsigned w = 0; w <= v; ++w) { //finite difference to get some entries of the Hessian Vector3D fd( temp_gradient[w] ); fd -= gradient[w]; fd *= delta_inv; // For the block at position w,v in a matrix, we need the corresponding index // (mat_index) in a 1D array containing only upper triangular blocks. unsigned sum_w = w*(w+1)/2; // 1+2+3+...+w unsigned mat_index = w*indices.size() + v - sum_w; Hessian[mat_index][0][j] = fd[0]; Hessian[mat_index][1][j] = fd[1]; Hessian[mat_index][2][j] = fd[2]; } } // for(j) } // for(indices) haveFiniteDiffEps = false; return true; }
void MeshImplData::copy_higher_order( std::vector<size_t>& mid_nodes, std::vector<size_t>& vertices, std::vector<size_t>& vertex_indices, std::vector<size_t>& index_offsets, MsqError& err ) { mid_nodes.clear(); vertices.clear(); vertex_indices.clear(); index_offsets.clear(); // Create a map of from vertex handle to index in "vertices" // Use vertexList.size() to mean uninitialized. size_t v; std::vector<size_t> vert_map( vertexList.size() ); for (v = 0; v < vertexList.size(); ++v) vert_map[v] = vertexList.size(); // Loop over all mid-side vertices for (v = 0; v < vertexList.size(); ++v) { const Vertex& vert = vertexList[v]; // Not a mid-side vertex, skip it if (!vert.valid || !vert.midcount) continue; // Populate "verts" with the handles of all adjacent corner vertices assert( vert.adjacencies.size() ); // shouldn't be able to fail if vert.midcount > 0 int elem_indx = vert.adjacencies[0]; Element& elem = elementList[elem_indx]; // Find index of node in elem's connectivity list unsigned index; for (index = 0; index < elem.connectivity.size(); ++index) if (elem.connectivity[index] == v) break; if (index == elem.connectivity.size()) { MSQ_SETERR(err)("Inconsistent data.", MsqError::INTERNAL_ERROR); return; } // Given the index in the element's connectivity list, // get the side of the element containing the mid-node. unsigned side_dim, side_num; TopologyInfo::side_number( elem.topology, elem.connectivity.size(), index, side_dim, side_num, err ); MSQ_ERRRTN(err); if (!side_dim) // Not a mid-side node { MSQ_SETERR(err)(MsqError::INVALID_STATE,"Improperly connected mesh."); return; } // Get the adjacent corner vertices from the element side. unsigned num_corners; const unsigned* corner_indices = TopologyInfo::side_vertices( elem.topology, side_dim, side_num, num_corners, err ); MSQ_ERRRTN(err); // Add the mid-side node to the output list mid_nodes.push_back( v ); // Store offset at which the indices of the corner // vertices adjacent to this mid-side node will be // stored in "vertex_indices". index_offsets.push_back( vertex_indices.size() ); // For each adjacent corner vertex, if the vertex is not // already in "vertices" add it, and add the index to // the adjacency list for this mid-side node. for (unsigned i = 0; i < num_corners; ++i) { size_t vert_idx = elem.connectivity[corner_indices[i]]; assert( is_vertex_valid(vert_idx) ); if (vert_map[vert_idx] == vertexList.size()) { vert_map[vert_idx] = vertices.size(); vertices.push_back( vert_idx ); } vertex_indices.push_back( vert_map[vert_idx] ); } } index_offsets.push_back( vertex_indices.size() ); }
const char* FileTokenizer::get_string( MsqError& err ) { // If the whitepsace character marking the end of the // last token was a newline, increment the line count. if (lastChar == '\n') ++lineNumber; // Loop until either found the start of a token to return or have // reached the end of the file. for (;;) { // If the buffer is empty, read more. if (nextToken == bufferEnd) { size_t count = fread( buffer, 1, sizeof(buffer) - 1, filePtr ); if (!count) { if (feof(filePtr)) MSQ_SETERR(err)( "File truncated.\n", MsqError::PARSE_ERROR ); else MSQ_SETERR(err)( MsqError::IO_ERROR ); return NULL; } nextToken = buffer; bufferEnd = buffer + count; } // If the current character is not a space, we've found a token. if (!isspace(*nextToken)) break; // If the current space character is a newline, // increment the line number count. if (*nextToken == '\n') ++lineNumber; ++nextToken; } // Store the start of the token in "result" and // advance "nextToken" to one past the end of the // token. char* result = nextToken; while (nextToken != bufferEnd && !isspace(*nextToken)) ++nextToken; // If we have reached the end of the buffer without finding // a whitespace character terminating the token, we need to // read more from the file. Only try once. If the token is // too large to fit in the buffer, give up. if (nextToken == bufferEnd) { // Shift the (possibly) partial token to the start of the buffer. size_t remaining = bufferEnd - result; memmove( buffer, result, remaining ); result = buffer; nextToken = result + remaining; // Fill the remainder of the buffer after the token. size_t count = fread( nextToken, 1, sizeof(buffer) - remaining - 1, filePtr ); if (!count && !feof(filePtr)) { MSQ_SETERR(err)( "I/O error.\n", MsqError::IO_ERROR ); return NULL; } bufferEnd = nextToken + count; // Continue to advance nextToken until we find the space // terminating the token. while (nextToken != bufferEnd && !isspace(*nextToken)) ++nextToken; if (nextToken == bufferEnd) // EOF { *bufferEnd = '\0'; ++bufferEnd; } } // Save terminating whitespace character (or NULL char if EOF). lastChar = *nextToken; // Put null in buffer to mark end of current token. *nextToken = '\0'; // Advance nextToken to the next character to search next time. ++nextToken; return result; }
void MappingFunction::convert_connectivity_indices_impl( EntityTopology topo, int input_type, int output_type, size_t* index_list, unsigned num_indices, MsqError& err ) { bool in_edges, in_faces, in_region, out_edges, out_faces, out_region; TopologyInfo::higher_order( topo, input_type, in_edges, in_faces, in_region, err ); MSQ_ERRRTN(err); TopologyInfo::higher_order( topo, output_type, out_edges, out_faces, out_region, err ); MSQ_ERRRTN(err); // We could probably use TopologyInfo to do this more forward-compatible, // but for efficiency assume the current ITAPS node ordering, where // all mid-edge nodes occur before mid-face nodes and the mid-region // node is always last. // If both have mid-region nodes and they don't have the same stuff // preceeding the mid-region node, then we need to change that index. bool region_diff = in_region && out_region && (in_faces != out_faces || in_edges != out_edges); // If both have mid-face nodes and one has mid-edge nodes and the other // does not, then we need to change the face indices. bool face_diff = in_faces && out_faces && in_edges != out_edges; // if nothing to change, return if (!face_diff && !region_diff) return; const unsigned corners = TopologyInfo::corners(topo); const unsigned edges = TopologyInfo::edges(topo); const unsigned faces = TopologyInfo::faces(topo); const unsigned in_face_offset = in_edges ? corners+edges : corners; const unsigned in_regn_offset = in_faces ? in_face_offset+faces : in_face_offset; const unsigned out_face_offset = out_edges ? corners+edges : corners; const unsigned out_regn_offset = out_faces ? out_face_offset+faces : out_face_offset; // In the code below, assertions are used to validate the input // connectivity data as we assume it is an internal mesquite coding // error for it to be inconsistent. True error checking is used // if the elements are incompatible (the index list for the input // type contains indices for which there is no correpsonding logical // node location in the connectivity list of the output element type) // because that indicates an invalid setup (the combination of element // type and slave nodes does not result in a reduced element that is // compatible with the mapping function.) The latter should probably // have been caught by the mapping function, but to be safe we check // again here. for (size_t i = 0; i < num_indices; ++i) { if (index_list[i] < in_face_offset) { // corner or mid-edge node // nothing to change for these, but check that if it is a mid-edge // node that the other type also has edges if (index_list[i] >= corners && !out_edges) { MSQ_SETERR(err)("Incompatible nodes present.", MsqError::UNSUPPORTED_ELEMENT ); return; } } else if (index_list[i] < in_regn_offset) { // mid-face node assert( TopologyInfo::dimension(topo) == 3 || index_list[i] == (unsigned)input_type - 1 ); if (!out_faces) { MSQ_SETERR(err)("Incompatible nodes present.", MsqError::UNSUPPORTED_ELEMENT ); return; } // working with unsigned type (size_t), so make sure we express this // such that there are no intermediate negative values. index_list[i] = index_list[i] + out_face_offset - in_face_offset; } else { // region assert( in_region ); assert( TopologyInfo::dimension(topo) == 3 && index_list[i] == (unsigned)input_type - 1 ); if (!out_region) { MSQ_SETERR(err)("Incompatible nodes present.", MsqError::UNSUPPORTED_ELEMENT ); return; } // working with unsigned type (size_t), so make sure we express this // such that there are no intermediate negative values. index_list[i] = index_list[i] + out_regn_offset - in_regn_offset; } } }
void SteepestDescent::optimize_vertex_positions(PatchData &pd, MsqError &err) { MSQ_FUNCTION_TIMER( "SteepestDescent::optimize_vertex_positions" ); const int SEARCH_MAX = 100; const double c1 = 1e-4; //std::vector<Vector3D> unprojected(pd.num_free_vertices()); std::vector<Vector3D> gradient(pd.num_free_vertices()); bool feasible=true;//bool for OF values double min_edge_len, max_edge_len; double step_size=0, original_value=0, new_value=0; double norm_squared=0; PatchDataVerticesMemento* pd_previous_coords; TerminationCriterion* term_crit=get_inner_termination_criterion(); OFEvaluator& obj_func = get_objective_function_evaluator(); // get vertex memento so we can restore vertex coordinates for bad steps. pd_previous_coords = pd.create_vertices_memento( err ); MSQ_ERRRTN(err); // use auto_ptr to automatically delete memento when we exit this function std::auto_ptr<PatchDataVerticesMemento> memento_deleter( pd_previous_coords ); // Evaluate objective function. // // Always use 'update' version when beginning optimization so that // if doing block coordinate descent the OF code knows the set of // vertices we are modifying during the optimziation (the subset // of the mesh contained in the current patch.) This has to be // done up-front because typically an OF will just store the portion // of the OF value (e.g. the numeric contribution to the sum for an // averaging OF) for the initial patch. feasible = obj_func.update( pd, original_value, gradient, err ); MSQ_ERRRTN(err); // calculate gradient dotted with itself norm_squared = length_squared( gradient ); //set an error if initial patch is invalid. if(!feasible){ MSQ_SETERR(err)("SteepestDescent passed invalid initial patch.", MsqError::INVALID_ARG); return; } // use edge length as an initial guess for for step size pd.get_minmax_edge_length( min_edge_len, max_edge_len ); //step_size = max_edge_len / std::sqrt(norm_squared); //if (!finite(step_size)) // zero-length gradient // return; // if (norm_squared < DBL_EPSILON) // return; if (norm_squared >= DBL_EPSILON) step_size = max_edge_len / std::sqrt(norm_squared) * pd.num_free_vertices(); // The steepest descent loop... // We loop until the user-specified termination criteria are met. while (!term_crit->terminate()) { MSQ_DBGOUT(3) << "Iteration " << term_crit->get_iteration_count() << std::endl; MSQ_DBGOUT(3) << " o original_value: " << original_value << std::endl; MSQ_DBGOUT(3) << " o grad norm suqared: " << norm_squared << std::endl; // Save current vertex coords so that they can be restored if // the step was bad. pd.recreate_vertices_memento( pd_previous_coords, err ); MSQ_ERRRTN(err); // Reduce step size until it satisfies Armijo condition int counter = 0; for (;;) { if (++counter > SEARCH_MAX || step_size < DBL_EPSILON) { MSQ_DBGOUT(3) << " o No valid step found. Giving Up." << std::endl; return; } // Move vertices to new positions. // Note: step direction is -gradient so we pass +gradient and // -step_size to achieve the same thing. pd.move_free_vertices_constrained( arrptr(gradient), gradient.size(), -step_size, err ); MSQ_ERRRTN(err); // Evaluate objective function for new vertices. We call the // 'evaluate' form here because we aren't sure yet if we want to // keep these vertices. Until we call 'update', we have the option // of reverting a block coordinate decent objective function's state // to that of the initial vertex coordinates. However, for block // coordinate decent to work correctly, we will need to call an // 'update' form if we decide to keep the new vertex coordinates. feasible = obj_func.evaluate( pd, new_value, err ); if (err.error_code() == err.BARRIER_VIOLATED) err.clear(); // barrier violated does not represent an actual error here MSQ_ERRRTN(err); MSQ_DBGOUT(3) << " o step_size: " << step_size << std::endl; MSQ_DBGOUT(3) << " o new_value: " << new_value << std::endl; if (!feasible) { // OF value is invalid, decrease step_size a lot step_size *= 0.2; } else if (new_value > original_value - c1 * step_size * norm_squared) { // Armijo condition not met. step_size *= 0.5; } else { // Armijo condition met, stop break; } // undo previous step : restore vertex coordinates pd.set_to_vertices_memento( pd_previous_coords, err ); MSQ_ERRRTN(err); } // Re-evaluate objective function to get gradient. // Calling the 'update' form here incorporates the new vertex // positions into the 'accumulated' value if we are doing a // block coordinate descent optimization. obj_func.update(pd, original_value, gradient, err ); MSQ_ERRRTN(err); if (projectGradient) { //if (cosineStep) { // unprojected = gradient; // pd.project_gradient( gradient, err ); MSQ_ERRRTN(err); // double dot = inner_product( arrptr(gradient), arrptr(unprojected), gradient.size() ); // double lensqr1 = length_squared( gradient ); // double lensqr2 = length_squared( unprojected ); // double cossqr = dot * dot / lensqr1 / lensqr2; // step_size *= sqrt(cossqr); //} //else { pd.project_gradient( gradient, err ); MSQ_ERRRTN(err); //} } // Update terination criterion for next iteration. // This is necessary for efficiency. Some values can be adjusted // for each iteration so we don't need to re-caculate the value // over the entire mesh. term_crit->accumulate_patch( pd, err ); MSQ_ERRRTN(err); term_crit->accumulate_inner( pd, original_value, arrptr(gradient), err ); MSQ_ERRRTN(err); // Calculate initial step size for next iteration using step size // from this iteration step_size *= norm_squared; norm_squared = length_squared( gradient ); // if (norm_squared < DBL_EPSILON) // break; if (norm_squared >= DBL_EPSILON) step_size /= norm_squared; } }
double ConjugateGradient::get_step(PatchData &pd,double f0,int &j, MsqError &err) { // get OF evaluator OFEvaluator& objFunc = get_objective_function_evaluator(); size_t num_vertices=pd.num_free_vertices(); //initial guess for alp double alp=1.0; int jmax=100; double rho=0.5; //feasible=false implies the mesh is not in the feasible region bool feasible=false; int found=0; //f and fnew hold the objective function value double f=0; double fnew=0; //Counter to avoid infinitly scaling alp j=0; //save memento pd.recreate_vertices_memento(pMemento, err); //if we must check feasiblility //while step takes mesh into infeasible region and ... while (j<jmax && !feasible && alp>MSQ_MIN) { ++j; pd.set_free_vertices_constrained(pMemento,arrptr(pGrad),num_vertices,alp,err); feasible=objFunc.evaluate(pd,f,err); MSQ_ERRZERO(err); //if not feasible, try a smaller alp (take smaller step) if(!feasible){ alp*=rho; } }//end while ... //if above while ended due to j>=jmax, no valid step was found. if(j>=jmax){ MSQ_PRINT(2)("\nFeasible Point Not Found"); return 0.0; } //Message::print_info("\nOriginal f %f, first new f = %f, alp = %f",f0,f,alp); //if new f is larger than original, our step was too large if(f>=f0){ j=0; while (j<jmax && found == 0){ ++j; alp *= rho; pd.set_free_vertices_constrained(pMemento,arrptr(pGrad),num_vertices,alp,err); //Get new obj value //if patch is now invalid, then the feasible region is convex or //we have an error. For now, we assume an error. if(! objFunc.evaluate(pd,f,err) ){ MSQ_SETERR(err)("Non-convex feasiblility region found.",MsqError::INVALID_MESH); } pd.set_to_vertices_memento(pMemento,err);MSQ_ERRZERO(err); //if our step has now improved the objective function value if(f<f0){ found=1; } }// end while j less than jmax //Message::print_info("\nj = %d found = %d f = %20.18f f0 = %20.18f\n",j,found,f,f0); //if above ended because of j>=jmax, take no step if(found==0){ //Message::print_info("alp = %10.8f, but returning zero\n",alp); alp=0.0; return alp; } j=0; //while shrinking the step improves the objFunc value further, //scale alp down. Return alp, when scaling once more would //no longer improve the objFunc value. while(j<jmax){ ++j; alp*=rho; //step alp in search direction from original positions pd.set_free_vertices_constrained(pMemento,arrptr(pGrad),num_vertices,alp,err);MSQ_ERRZERO(err); //get new objective function value if (! objFunc.evaluate(pd,fnew,err)) MSQ_SETERR(err)("Non-convex feasiblility region found while " "computing new f.",MsqError::INVALID_MESH); if(fnew<f){ f=fnew; } else{ //Reset the vertices to original position pd.set_to_vertices_memento(pMemento,err);MSQ_ERRZERO(err); alp/=rho; return alp; } } //Reset the vertices to original position and return alp pd.set_to_vertices_memento(pMemento,err);MSQ_ERRZERO(err); return alp; } //else our new f was already smaller than our original else{ j=0; //check to see how large of step we can take while (j<jmax && found == 0) { ++j; //scale alp up (rho must be less than 1) alp /= rho; //step alp in search direction from original positions pd.set_free_vertices_constrained(pMemento,arrptr(pGrad),num_vertices,alp,err);MSQ_ERRZERO(err); feasible = objFunc.evaluate(pd,fnew, err);MSQ_ERRZERO(err); if ( ! feasible ){ alp *= rho; //Reset the vertices to original position and return alp pd.set_to_vertices_memento(pMemento,err);MSQ_ERRZERO(err); return alp; } if (fnew<f) { f = fnew; } else { found=1; alp *= rho; } } //Reset the vertices to original position and return alp pd.set_to_vertices_memento(pMemento,err);MSQ_ERRZERO(err); return alp; } }
bool IdealWeightInverseMeanRatio::evaluate_with_Hessian( PatchData& pd, size_t handle, double& m, std::vector<size_t>& indices, std::vector<Vector3D>& g, std::vector<Matrix3D>& h, MsqError& err ) { const MsqMeshEntity* e = &pd.element_by_index(handle); EntityTopology topo = e->get_element_type(); if (!analytical_average_hessian() && topo != TRIANGLE && topo != TETRAHEDRON) { static bool print = true; if (print) { MSQ_DBGOUT(1) << "Analyical gradient not available for selected averaging scheme. " << "Using (possibly much slower) numerical approximation of gradient" << " of quality metric. " << std::endl; print = false; } return QualityMetric::evaluate_with_Hessian( pd, handle, m, indices, g, h, err ); } const MsqVertex *vertices = pd.get_vertex_array(err); MSQ_ERRZERO(err); const size_t *v_i = e->get_vertex_index_array(); Vector3D n; // Surface normal for 2D objects // Prism and Hex element descriptions static const int locs_pri[6][4] = {{0, 1, 2, 3}, {1, 2, 0, 4}, {2, 0, 1, 5}, {3, 5, 4, 0}, {4, 3, 5, 1}, {5, 4, 3, 2}}; static const int locs_hex[8][4] = {{0, 1, 3, 4}, {1, 2, 0, 5}, {2, 3, 1, 6}, {3, 0, 2, 7}, {4, 7, 5, 0}, {5, 4, 6, 1}, {6, 5, 7, 2}, {7, 6, 4, 3}}; const Vector3D d_con(1.0, 1.0, 1.0); int i; bool metric_valid = false; const uint32_t fm = fixed_vertex_bitmap( pd, e, indices ); m = 0.0; switch(topo) { case TRIANGLE: pd.get_domain_normal_at_element(e, n, err); MSQ_ERRZERO(err); n /= n.length(); // Need unit normal mCoords[0] = vertices[v_i[0]]; mCoords[1] = vertices[v_i[1]]; mCoords[2] = vertices[v_i[2]]; g.resize(3); h.resize(6); if (!h_fcn_2e(m, arrptr(g), arrptr(h), mCoords, n, a2Con, b2Con, c2Con)) return false; break; case QUADRILATERAL: pd.get_domain_normal_at_element(e, n, err); MSQ_ERRZERO(err); n /= n.length(); // Need unit normal for (i = 0; i < 4; ++i) { mCoords[0] = vertices[v_i[locs_hex[i][0]]]; mCoords[1] = vertices[v_i[locs_hex[i][1]]]; mCoords[2] = vertices[v_i[locs_hex[i][2]]]; if (!h_fcn_2i(mMetrics[i], mGradients+3*i, mHessians+6*i, mCoords, n, a2Con, b2Con, c2Con, d_con)) return false; } g.resize(4); h.resize(10); m = average_corner_hessians( QUADRILATERAL, fm, 4, mMetrics, mGradients, mHessians, arrptr(g), arrptr(h), err ); MSQ_ERRZERO( err ); break; case TETRAHEDRON: mCoords[0] = vertices[v_i[0]]; mCoords[1] = vertices[v_i[1]]; mCoords[2] = vertices[v_i[2]]; mCoords[3] = vertices[v_i[3]]; g.resize(4); h.resize(10); metric_valid = h_fcn_3e(m, arrptr(g), arrptr(h), mCoords, a3Con, b3Con, c3Con); if (!metric_valid) return false; break; case PYRAMID: for (i = 0; i < 4; ++i) { mCoords[0] = vertices[v_i[ i ]]; mCoords[1] = vertices[v_i[(i+1)%4]]; mCoords[2] = vertices[v_i[(i+3)%4]]; mCoords[3] = vertices[v_i[ 4 ]]; metric_valid = h_fcn_3p(mMetrics[i], mGradients+4*i, mHessians+10*i, mCoords, a3Con, b3Con, c3Con); if (!metric_valid) return false; } g.resize(5); h.resize(15); m = average_corner_hessians( PYRAMID, fm, 4, mMetrics, mGradients, mHessians, arrptr(g), arrptr(h), err ); MSQ_ERRZERO( err ); break; case PRISM: for (i = 0; i < 6; ++i) { mCoords[0] = vertices[v_i[locs_pri[i][0]]]; mCoords[1] = vertices[v_i[locs_pri[i][1]]]; mCoords[2] = vertices[v_i[locs_pri[i][2]]]; mCoords[3] = vertices[v_i[locs_pri[i][3]]]; if (!h_fcn_3w(mMetrics[i], mGradients+4*i, mHessians+10*i, mCoords, a3Con, b3Con, c3Con)) return false; } g.resize(6); h.resize(21); m = average_corner_hessians( PRISM, fm, 6, mMetrics, mGradients, mHessians, arrptr(g), arrptr(h), err ); MSQ_ERRZERO( err ); break; case HEXAHEDRON: for (i = 0; i < 8; ++i) { mCoords[0] = vertices[v_i[locs_hex[i][0]]]; mCoords[1] = vertices[v_i[locs_hex[i][1]]]; mCoords[2] = vertices[v_i[locs_hex[i][2]]]; mCoords[3] = vertices[v_i[locs_hex[i][3]]]; if (!h_fcn_3i(mMetrics[i], mGradients+4*i, mHessians+10*i, mCoords, a3Con, b3Con, c3Con, d_con)) return false; } g.resize(8); h.resize(36); m = average_corner_hessians( HEXAHEDRON, fm, 8, mMetrics, mGradients, mHessians, arrptr(g), arrptr(h), err ); MSQ_ERRZERO( err ); break; default: MSQ_SETERR(err)(MsqError::UNSUPPORTED_ELEMENT, "Element type (%d) not supported in IdealWeightInverseMeanRatio", (int)topo); return false; } // end switch over element type remove_fixed_gradients( topo, fm, g ); remove_fixed_hessians( topo, fm, h ); return true; }