double Fields::computeADMMass() { Basis* thetaBasis = basis.getBasis(SphericalBasis::COORD2); const double* xi = thetaBasis->getAbscissas(); int nR = basis.getRank(SphericalBasis::COORD1); const double* r = basis.getCoord(SphericalBasis::COORD1); int nTheta = basis.getRank(SphericalBasis::COORD2); const double* theta = basis.getCoord(SphericalBasis::COORD2); double integrand[nTheta]; for (int iTheta = 0; iTheta < nTheta; iTheta++) { // This formula holds because of the boundary condition. double myPsi = psi(r[nR-1], theta[iTheta]); integrand[iTheta] = (myPsi - 1.)* r[nR-1]*sin(theta[iTheta]); // Change to the spectral coordinate to integrate... integrand[iTheta] *= 0.5*M_PI*sqrt(1.0 - xi[iTheta]*xi[iTheta]); } return thetaBasis->integrate(integrand); }
int DefaultEvaluatorForIntegralOperators<BasisFunctionType, KernelType, ResultType, GeometryFactory>::farFieldQuadOrder( const Basis<BasisFunctionType>& basis) const { int elementOrder = (basis.order()); // Order required for exact quadrature on affine elements with kernel // approximated by a polynomial of order identical with that of the basis int defaultQuadratureOrder = 2 * elementOrder; return m_quadratureOptions.quadratureOrder(defaultQuadratureOrder); }
//constraint between two different rigidbodies btVehicleJacobianEntry( const Basis &world2A, const Basis &world2B, const Vector3 &rel_pos1, const Vector3 &rel_pos2, const Vector3 &jointAxis, const Vector3 &inertiaInvA, const real_t massInvA, const Vector3 &inertiaInvB, const real_t massInvB) : m_linearJointAxis(jointAxis) { m_aJ = world2A.xform(rel_pos1.cross(m_linearJointAxis)); m_bJ = world2B.xform(rel_pos2.cross(-m_linearJointAxis)); m_0MinvJt = inertiaInvA * m_aJ; m_1MinvJt = inertiaInvB * m_bJ; m_Adiag = massInvA + m_0MinvJt.dot(m_aJ) + massInvB + m_1MinvJt.dot(m_bJ); //btAssert(m_Adiag > real_t(0.0)); }
NeutronDrop::NeutronDrop(Basis &_basis, Interaction &_inter, int _nbNeut, double _omega, int _nPoints) : System(std::string("NeutronDrop"), _basis, arma::ivec( { _nbNeut }), std::vector<std::string>({"neutron"}), _inter, _nPoints), omega(_omega),TBME(_basis.size, _basis.size) { if(basis->type == "SpBasis" || basis->type == "ReducedSpBasis") { for (int i = 0; i < basis->size; i++) for (int j = 0; j < basis->size; j++) { TBME(i, j) = arma::zeros(_basis.size, basis->size); for (int k = 0; k < basis->size; k++) for (int l = 0; l < basis->size; l++) { arma::field<arma::mat> dummyR; TBME(i, j)(k, l) = inter->get(dummyR, 0, i, 0, l, 0, j, 0, k); } } } else if (basis->type == "FullSpBasis") { for (int i = 0; i < basis->size; i++) for (int j = 0; j < basis->size; j++) { if (_basis.qNumbers(i, 1) != _basis.qNumbers(j, 1)) continue; if (_basis.qNumbers(i, 2) != _basis.qNumbers(j, 2)) continue; TBME(i, j) = arma::zeros(_basis.size, basis->size); for (int k = 0; k < basis->size; k++) for (int l = 0; l < basis->size; l++) { if (_basis.qNumbers(k, 1) != _basis.qNumbers(l, 1)) continue; if (_basis.qNumbers(k, 2) != _basis.qNumbers(l, 2)) continue; arma::field<arma::mat> dummyR; TBME(i, j)(k, l) = inter->get(dummyR, 0, i, 0, l, 0, j, 0, k); } } } }
void GridMapEditor::_update_duplicate_indicator() { if (!selection.active || input_action!=INPUT_DUPLICATE) { Transform xf; xf.basis.set_zero(); VisualServer::get_singleton()->instance_set_transform(duplicate_instance,xf); return; } Transform xf; xf.scale(Vector3(1,1,1)*(Vector3(1,1,1)+(selection.end-selection.begin))*node->get_cell_size()); xf.origin=(selection.begin+(selection.current-selection.click))*node->get_cell_size(); Basis rot; rot.set_orthogonal_index(selection.duplicate_rot); xf.basis = rot * xf.basis; VisualServer::get_singleton()->instance_set_transform(duplicate_instance,node->get_global_transform() * xf); }
static void UE3_QuantizeBasis(Basis & basis) { float handness = basis.handness(); UE3_QuantizeVector(basis.normal); UE3_QuantizeVector(basis.tangent); // Reconstruct bitangent as done in the vertex shader: basis.bitangent = cross(basis.normal, basis.tangent) * handness; // @@ Does the vertex shader normalize normal, tangent, and bitangent? }
FnMap minimizeLength(const FnWord &u, const Basis &basis) { /* Returns an automorphism phi such that phi(u) has minimal length. */ int r = basis.getRank(); FnMap phi(r); if (u == Id) return phi; if (u.length() == 1) return phi; int old_norm,new_norm; FnWord u_min(u),tmp; FnMap whAuto(r); QList<WhiteheadData> whAutos = whiteheadAutos(basis); QListIterator<WhiteheadData> move(whAutos); bool reduced_norm = true; while (reduced_norm) { old_norm = u_min.length(); new_norm = old_norm; move.toFront(); while (old_norm <= new_norm && move.hasNext()) { whAuto = whitehead(move.next(),basis); tmp = whAuto(u_min); new_norm = tmp.length(); } if (new_norm < old_norm) { u_min = tmp; phi = whAuto*phi; } else reduced_norm = false; } // end while (reduced_norm) // now u_min = phi(u) return phi; }
// This is doing a simple ear-clipping algorithm that skips invalid triangles. Ideally, we should // also sort the ears by angle, start with the ones that have the smallest angle and proceed in order. HalfEdge::Mesh * nv::triangulate(const HalfEdge::Mesh * inputMesh) { HalfEdge::Mesh * mesh = new HalfEdge::Mesh; // Add all vertices. const uint vertexCount = inputMesh->vertexCount(); for (uint v = 0; v < vertexCount; v++) { const HalfEdge::Vertex * vertex = inputMesh->vertexAt(v); mesh->addVertex(vertex->pos); } Array<int> polygonVertices; Array<float> polygonAngles; Array<Vector2> polygonPoints; const uint faceCount = inputMesh->faceCount(); for (uint f = 0; f < faceCount; f++) { const HalfEdge::Face * face = inputMesh->faceAt(f); nvDebugCheck(face != NULL); const uint edgeCount = face->edgeCount(); nvDebugCheck(edgeCount >= 3); polygonVertices.clear(); polygonVertices.reserve(edgeCount); if (edgeCount == 3) { // Simple case for triangles. for (HalfEdge::Face::ConstEdgeIterator it(face->edges()); !it.isDone(); it.advance()) { const HalfEdge::Edge * edge = it.current(); const HalfEdge::Vertex * vertex = edge->vertex; polygonVertices.append(vertex->id); } int v0 = polygonVertices[0]; int v1 = polygonVertices[1]; int v2 = polygonVertices[2]; mesh->addFace(v0, v1, v2); } else { // Build 2D polygon projecting vertices onto normal plane. // Faces are not necesarily planar, this is for example the case, when the face comes from filling a hole. In such cases // it's much better to use the best fit plane. const Vector3 fn = face->normal(); Basis basis; basis.buildFrameForDirection(fn); polygonPoints.clear(); polygonPoints.reserve(edgeCount); polygonAngles.clear(); polygonAngles.reserve(edgeCount); for (HalfEdge::Face::ConstEdgeIterator it(face->edges()); !it.isDone(); it.advance()) { const HalfEdge::Edge * edge = it.current(); const HalfEdge::Vertex * vertex = edge->vertex; polygonVertices.append(vertex->id); Vector2 p; p.x = dot(basis.tangent, vertex->pos); p.y = dot(basis.bitangent, vertex->pos); polygonPoints.append(p); } polygonAngles.resize(edgeCount); while (polygonVertices.size() > 2) { uint size = polygonVertices.size(); // Update polygon angles. @@ Update only those that have changed. float minAngle = 2 * PI; uint bestEar = 0; // Use first one if none of them is valid. bool bestIsValid = false; for (uint i = 0; i < size; i++) { uint i0 = i; uint i1 = (i+1) % size; // Use Sean's polygon interation trick. uint i2 = (i+2) % size; Vector2 p0 = polygonPoints[i0]; Vector2 p1 = polygonPoints[i1]; Vector2 p2 = polygonPoints[i2]; float d = clamp(dot(p0-p1, p2-p1) / (length(p0-p1) * length(p2-p1)), -1.0f, 1.0f); float angle = acosf(d); float area = triangleArea(p0, p1, p2); if (area < 0.0f) angle = 2.0f * PI - angle; polygonAngles[i1] = angle; if (angle < minAngle || !bestIsValid) { // Make sure this is a valid ear, if not, skip this point. bool valid = true; for (uint j = 0; j < size; j++) { if (j == i0 || j == i1 || j == i2) continue; Vector2 p = polygonPoints[j]; if (pointInTriangle(p, p0, p1, p2)) { valid = false; break; } } if (valid || !bestIsValid) { minAngle = angle; bestEar = i1; bestIsValid = valid; } } } nvDebugCheck(minAngle <= 2 * PI); // Clip best ear: uint i0 = (bestEar+size-1) % size; uint i1 = (bestEar+0) % size; uint i2 = (bestEar+1) % size; int v0 = polygonVertices[i0]; int v1 = polygonVertices[i1]; int v2 = polygonVertices[i2]; mesh->addFace(v0, v1, v2); polygonVertices.removeAt(i1); polygonPoints.removeAt(i1); polygonAngles.removeAt(i1); } } #if 0 uint i = 0; while (polygonVertices.size() > 2 && i < polygonVertices.size()) { uint size = polygonVertices.size(); uint i0 = (i+0) % size; uint i1 = (i+1) % size; uint i2 = (i+2) % size; const HalfEdge::Vertex * v0 = polygonVertices[i0]; const HalfEdge::Vertex * v1 = polygonVertices[i1]; const HalfEdge::Vertex * v2 = polygonVertices[i2]; const Vector3 p0 = v0->pos; const Vector3 p1 = v1->pos; const Vector3 p2 = v2->pos; const Vector3 e0 = p2 - p1; const Vector3 e1 = p0 - p1; // If this ear forms a valid triangle, setup relations, remove v1 and repeat. Vector3 n = cross(e0, e1); float len = dot(fn, n); // = sin(angle) float angle = asin(len); if (len > 0.0f) { mesh->addFace(v0->id(), v1->id(), v2->id()); polygonVertices.removeAt(i1); polygonAngles.removeAt(i1); if (i2 > i1) i2--; // @@ Update angles at i0 and i2 } else { i++; } } // @@ Create a few degenerate triangles to avoid introducing holes. i = 0; const uint size = polygonVertices.size(); while (i < size - 2) { uint i0 = (i+0) % size; uint i1 = (i+1) % size; uint i2 = (i+2) % size; const HalfEdge::Vertex * v0 = polygonVertices[i0]; const HalfEdge::Vertex * v1 = polygonVertices[i1]; const HalfEdge::Vertex * v2 = polygonVertices[i2]; mesh->addFace(v0->id(), v1->id(), v2->id()); i++; } #endif } mesh->linkBoundary(); return mesh; }
Basis Basis::transposed() const { Basis tr = *this; tr.transpose(); return tr; }
// Matrix and Residual Fills bool HMX_PDE::evaluate( NOX::Epetra::Interface::Required::FillType flag, const Epetra_Vector* soln, Epetra_Vector* tmp_rhs) { // Determine what to fill (F or Jacobian) bool fillF = false; bool fillMatrix = false; if (tmp_rhs != 0) { fillF = true; rhs = tmp_rhs; } else { fillMatrix = true; } // "flag" can be used to determine how accurate your fill of F should be // depending on why we are calling evaluate (Could be using computeF to // populate a Jacobian or Preconditioner). if (flag == NOX::Epetra::Interface::Required::Residual) { // Do nothing for now } else if (flag == NOX::Epetra::Interface::Required::Jac) { // Do nothing for now } else if (flag == NOX::Epetra::Interface::Required::Prec) { // Do nothing for now } else if (flag == NOX::Epetra::Interface::Required::User) { // Do nothing for now } int numDep = depProblems.size(); // Create the overlapped solution and position vectors Epetra_Vector u(*OverlapMap); Epetra_Vector uold(*OverlapMap); std::vector<Epetra_Vector*> dep(numDep); for( int i = 0; i<numDep; i++) dep[i] = new Epetra_Vector(*OverlapMap); Epetra_Vector xvec(*OverlapMap); // Export Solution to Overlap vector // If the vector to be used in the fill is already in the Overlap form, // we simply need to map on-processor from column-space indices to // OverlapMap indices. Note that the old solution is simply fixed data that // needs to be sent to an OverlapMap (ghosted) vector. The conditional // treatment for the current soution vector arises from use of // FD coloring in parallel. uold.Import(*oldSolution, *Importer, Insert); for( int i = 0; i<numDep; i++ ) (*dep[i]).Import(*( (*(depSolutions.find(depProblems[i]))).second ), *Importer, Insert); xvec.Import(*xptr, *Importer, Insert); if( flag == NOX::Epetra::Interface::Required::FD_Res) // Overlap vector for solution received from FD coloring, so simply reorder // on processor u.Export(*soln, *ColumnToOverlapImporter, Insert); else // Communication to Overlap vector is needed u.Import(*soln, *Importer, Insert); // Declare required variables int OverlapNumMyNodes = OverlapMap->NumMyElements(); int OverlapMinMyNodeGID; if (MyPID==0) OverlapMinMyNodeGID = StandardMap->MinMyGID(); else OverlapMinMyNodeGID = StandardMap->MinMyGID()-1; // Setup iterators for looping over each problem source term contribution // to this one's PDE map<string, double>::iterator srcTermIter; map<string, double>::iterator srcTermEnd = SrcTermWeight.end(); // Bundle up the dependent variables in the way needed for computing // the source terms of each reaction /* Epetra_Vector debugSrcTerm(*OverlapMap); map<string, Epetra_Vector*> debugDepVars; debugDepVars.insert( pair<string, Epetra_Vector*>(getName(), &u) ); for( int i = 0; i<numDep; i++ ) debugDepVars.insert( pair<string, Epetra_Vector*> (myManager->getName(depProblems[i]), &dep[i]) ); for( srcTermIter = SrcTermWeight.begin(); srcTermIter != srcTermEnd; srcTermIter++) { HMX_PDE &srcTermProb = dynamic_cast<HMX_PDE&>( myManager->getProblem(srcTermIter->first) ); std::cout << "Inside problem: \"" << getName() << "\" calling to get source term " << "from problem: \"" << srcTermIter->first << "\" :" << std::endl; srcTermProb.computeSourceTerm(debugDepVars, debugSrcTerm); std::cout << "Resulting source term :" << debugSrcTerm << std::endl; } */ int row; double alpha = 500.0; double xx[2]; double uu[2]; double uuold[2]; std::vector<double*> ddep(numDep); for( int i = 0; i<numDep; i++) ddep[i] = new double[2]; double *srcTerm = new double[2]; Basis basis; // Bundle up the dependent variables in the way needed for computing // the source terms of each reaction map<string, double*> depVars; depVars.insert( pair<string, double*>(getName(), uu) ); for( int i = 0; i<numDep; i++ ) depVars.insert( pair<string, double*> (myManager->getProblemName(depProblems[i]), ddep[i]) ); // Do a check on this fill // map<string, double*>::iterator iter; // for( iter = depVars.begin(); iter != depVars.end(); iter++) // std::cout << "Inserted ... " << iter->first << "\t" << iter->second << std::endl; // std::cout << "--------------------------------------------------" << std::endl; // for( iter = depVars.begin(); iter != depVars.end(); iter++) // std::cout << iter->first << "\t" << (iter->second)[0] << ", " // << (iter->second)[1] << std::endl; // std::cout << "--------------------------------------------------" << std::endl; // Zero out the objects that will be filled if ( fillMatrix ) A->PutScalar(0.0); if ( fillF ) rhs->PutScalar(0.0); // Loop Over # of Finite Elements on Processor for (int ne=0; ne < OverlapNumMyNodes-1; ne++) { // Loop Over Gauss Points for(int gp=0; gp < 2; gp++) { // Get the solution and coordinates at the nodes xx[0]=xvec[ne]; xx[1]=xvec[ne+1]; uu[0] = u[ne]; uu[1] = u[ne+1]; uuold[0] = uold[ne]; uuold[1] = uold[ne+1]; for( int i = 0; i<numDep; i++ ) { ddep[i][0] = (*dep[i])[ne]; ddep[i][1] = (*dep[i])[ne+1]; } // Calculate the basis function and variables at the gauss points basis.getBasis(gp, xx, uu, uuold, ddep); // Loop over Nodes in Element for (int i=0; i< 2; i++) { row=OverlapMap->GID(ne+i); if (StandardMap->MyGID(row)) { if ( fillF ) { // First do time derivative and diffusion operator (*rhs)[StandardMap->LID(OverlapMap->GID(ne+i))]+= +basis.wt*basis.dx *((basis.uu - basis.uuold)/dt * basis.phi[i] +(1.0/(basis.dx*basis.dx))*diffCoef*basis.duu*basis.dphide[i]); // Then do source term contributions // for( srcTermIter = SrcTermWeight.begin(); srcTermIter != srcTermEnd; srcTermIter++) { HMX_PDE &srcTermProb = dynamic_cast<HMX_PDE&>( myManager->getProblem((*srcTermIter).first) ); srcTermProb.computeSourceTerm(2, depVars, srcTerm); (*rhs)[StandardMap->LID(OverlapMap->GID(ne+i))]+= +basis.wt*basis.dx *( basis.phi[i] * ( - (*srcTermIter).second * srcTerm[i] )); } // } } // Loop over Trial Functions if ( fillMatrix ) { /* for(j=0;j < 2; j++) { if (StandardMap->MyGID(row)) { column=OverlapMap->GID(ne+j); jac=basis.wt*basis.dx*( basis.phi[j]/dt*basis.phi[i] +(1.0/(basis.dx*basis.dx))*diffCoef*basis.dphide[j]* basis.dphide[i] + basis.phi[i] * ( (beta+1.0)*basis.phi[j] - 2.0*basis.uu*basis.phi[j]*basis.ddep[id_spec]) ); ierr=A->SumIntoGlobalValues(row, 1, &jac, &column); } } */ } } } } // Apply Dirichlet BC for Temperature problem only (for now); this implies // no-flux across domain boundary for all species. if( getName() == tempFieldName ) { // Insert Boundary Conditions and modify Jacobian and function (F) // U(0)=1 if (MyPID==0) { if ( fillF ) (*rhs)[0]= (*soln)[0] - alpha; if ( fillMatrix ) { int column=0; double jac=1.0; A->ReplaceGlobalValues(0, 1, &jac, &column); column=1; jac=0.0; A->ReplaceGlobalValues(0, 1, &jac, &column); } } // U(1)=1 if ( StandardMap->LID(StandardMap->MaxAllGID()) >= 0 ) { int lastDof = StandardMap->LID(StandardMap->MaxAllGID()); if ( fillF ) (*rhs)[lastDof] = (*soln)[lastDof] - alpha; if ( fillMatrix ) { int row=StandardMap->MaxAllGID(); int column = row; double jac = 1.0; A->ReplaceGlobalValues(row, 1, &jac, &column); jac=0.0; column--; A->ReplaceGlobalValues(row, 1, &jac, &column); } } } // Sync up processors to be safe Comm->Barrier(); A->FillComplete(); #ifdef DEBUG A->Print(cout); if( fillF ) std::cout << "For residual fill :" << std::endl << *rhs << std::endl; if( fillMatrix ) { std::cout << "For jacobian fill :" << std::endl; A->Print(cout); } #endif // Cleanup for( int i = 0; i < numDep; ++i) { delete [] ddep[i]; delete dep[i]; } delete [] srcTerm; return true; }
Basis Basis::orthonormalized() const { Basis c = *this; c.orthonormalize(); return c; }
Basis Basis::inverse() const { Basis inv = *this; inv.invert(); return inv; }
// Matrix and Residual Fills bool Pitchfork_FiniteElementProblem::evaluate(FillType f, const Epetra_Vector* soln, Epetra_Vector* tmp_rhs, Epetra_RowMatrix* tmp_matrix, double jac_coeff, double mass_coeff) { flag = f; // Set the incoming linear objects if (flag == F_ONLY) { rhs = tmp_rhs; } else if (flag == MATRIX_ONLY) { A = dynamic_cast<Epetra_CrsMatrix*> (tmp_matrix); assert(A != NULL); } else if (flag == ALL) { rhs = tmp_rhs; A = dynamic_cast<Epetra_CrsMatrix*> (tmp_matrix); assert(A != NULL); } else { std::cout << "ERROR: Pitchfork_FiniteElementProblem::fillMatrix() - FillType flag is broken" << std::endl; throw; } // Create the overlapped solution and position vectors Epetra_Vector u(*OverlapMap); Epetra_Vector x(*OverlapMap); // Export Solution to Overlap vector u.Import(*soln, *Importer, Insert); // Declare required variables int i,j,ierr; int OverlapNumMyElements = OverlapMap->NumMyElements(); int OverlapMinMyGID; if (MyPID==0) OverlapMinMyGID = StandardMap->MinMyGID(); else OverlapMinMyGID = StandardMap->MinMyGID()-1; int row, column; double jac; double xx[2]; double uu[2]; Basis basis; // Create the nodal coordinates double Length=2.0; double dx=Length/((double) NumGlobalElements-1); for (i=0; i < OverlapNumMyElements; i++) { x[i]=-1.0 + dx*((double) OverlapMinMyGID+i); } // Zero out the objects that will be filled if ((flag == MATRIX_ONLY) || (flag == ALL)) { i = A->PutScalar(0.0); assert(i == 0); } if ((flag == F_ONLY) || (flag == ALL)) { i = rhs->PutScalar(0.0); assert(i == 0); } // Loop Over # of Finite Elements on Processor for (int ne=0; ne < OverlapNumMyElements-1; ne++) { // Loop Over Gauss Points for(int gp=0; gp < 2; gp++) { // Get the solution and coordinates at the nodes xx[0]=x[ne]; xx[1]=x[ne+1]; uu[0]=u[ne]; uu[1]=u[ne+1]; // Calculate the basis function at the gauss point basis.getBasis(gp, xx, uu); // Loop over Nodes in Element for (i=0; i< 2; i++) { row=OverlapMap->GID(ne+i); //printf("Proc=%d GlobalRow=%d LocalRow=%d Owned=%d\n", // MyPID, row, ne+i,StandardMap.MyGID(row)); if (StandardMap->MyGID(row)) { if ((flag == F_ONLY) || (flag == ALL)) { (*rhs)[StandardMap->LID(OverlapMap->GID(ne+i))]+= +basis.wt*basis.dx *((-1.0/(basis.dx*basis.dx))*basis.duu* basis.dphide[i]-source_term(basis.uu)*basis.phi[i]); } } // Loop over Trial Functions if ((flag == MATRIX_ONLY) || (flag == ALL)) { for(j=0;j < 2; j++) { if (StandardMap->MyGID(row)) { column=OverlapMap->GID(ne+j); jac=jac_coeff*basis.wt*basis.dx* ((-1.0/(basis.dx*basis.dx))*basis.dphide[j]*basis.dphide[i] -source_deriv(basis.uu)*basis.phi[j]*basis.phi[i]) + mass_coeff*basis.wt*basis.dx*basis.phi[j]*basis.phi[i]; ierr=A->SumIntoGlobalValues(row, 1, &jac, &column); assert(ierr == 0); } } } } } } // Insert Boundary Conditions and modify Jacobian and function (F) // U(-1)=beta if (MyPID==0) { if ((flag == F_ONLY) || (flag == ALL)) (*rhs)[0]= (*soln)[0] - beta; if ((flag == MATRIX_ONLY) || (flag == ALL)) { column=0; jac=1.0*jac_coeff; A->ReplaceGlobalValues(0, 1, &jac, &column); column=1; jac=0.0*jac_coeff; A->ReplaceGlobalValues(0, 1, &jac, &column); } } // U(1)=beta if ( StandardMap->LID(StandardMap->MaxAllGID()) >= 0 ) { int lastDof = StandardMap->LID(StandardMap->MaxAllGID()); if ((flag == F_ONLY) || (flag == ALL)) (*rhs)[lastDof] = (*soln)[lastDof] - beta; if ((flag == MATRIX_ONLY) || (flag == ALL)) { int row = StandardMap->MaxAllGID(); column = row; jac=1.0*jac_coeff; A->ReplaceGlobalValues(row, 1, &jac, &column); column=row-1; jac=0.0*jac_coeff; A->ReplaceGlobalValues(row, 1, &jac, &column); } } // Sync up processors to be safe Comm->Barrier(); A->FillComplete(); return true; }
void GDAPI godot_basis_set_axis_angle_scale(godot_basis *p_self, const godot_vector3 *p_axis, godot_real p_phi, const godot_vector3 *p_scale) { Basis *self = (Basis *)p_self; const Vector3 *axis = (const Vector3 *)p_axis; const Vector3 *scale = (const Vector3 *)p_scale; self->set_axis_angle_scale(*axis, p_phi, *scale); }
void MobileVRInterface::set_position_from_sensors() { _THREAD_SAFE_METHOD_ // this is a helper function that attempts to adjust our transform using our 9dof sensors // 9dof is a misleading marketing term coming from 3 accelerometer axis + 3 gyro axis + 3 magnetometer axis = 9 axis // but in reality this only offers 3 dof (yaw, pitch, roll) orientation uint64_t ticks = OS::get_singleton()->get_ticks_usec(); uint64_t ticks_elapsed = ticks - last_ticks; float delta_time = (double)ticks_elapsed / 1000000.0; // few things we need Input *input = Input::get_singleton(); Vector3 down(0.0, -1.0, 0.0); // Down is Y negative Vector3 north(0.0, 0.0, 1.0); // North is Z positive // make copies of our inputs bool has_grav = false; Vector3 acc = input->get_accelerometer(); Vector3 gyro = input->get_gyroscope(); Vector3 grav = input->get_gravity(); Vector3 magneto = scale_magneto(input->get_magnetometer()); // this may be overkill on iOS because we're already getting a calibrated magnetometer reading if (sensor_first) { sensor_first = false; } else { acc = scrub(acc, last_accerometer_data, 2, 0.2); magneto = scrub(magneto, last_magnetometer_data, 3, 0.3); }; last_accerometer_data = acc; last_magnetometer_data = magneto; if (grav.length() < 0.1) { // not ideal but use our accelerometer, this will contain shakey shakey user behaviour // maybe look into some math but I'm guessing that if this isn't available, its because we lack the gyro sensor to actually work out // what a stable gravity vector is grav = acc; if (grav.length() > 0.1) { has_grav = true; }; } else { has_grav = true; }; bool has_magneto = magneto.length() > 0.1; if (gyro.length() > 0.1) { /* this can return to 0.0 if the user doesn't move the phone, so once on, it's on */ has_gyro = true; }; if (has_gyro) { // start with applying our gyro (do NOT smooth our gyro!) Basis rotate; rotate.rotate(orientation.get_axis(0), gyro.x * delta_time); rotate.rotate(orientation.get_axis(1), gyro.y * delta_time); rotate.rotate(orientation.get_axis(2), gyro.z * delta_time); orientation = rotate * orientation; tracking_state = ARVRInterface::ARVR_NORMAL_TRACKING; }; ///@TODO improve this, the magnetometer is very fidgity sometimes flipping the axis for no apparent reason (probably a bug on my part) // if you have a gyro + accelerometer that combo tends to be better then combining all three but without a gyro you need the magnetometer.. if (has_magneto && has_grav && !has_gyro) { // convert to quaternions, easier to smooth those out Quat transform_quat(orientation); Quat acc_mag_quat(combine_acc_mag(grav, magneto)); transform_quat = transform_quat.slerp(acc_mag_quat, 0.1); orientation = Basis(transform_quat); tracking_state = ARVRInterface::ARVR_NORMAL_TRACKING; } else if (has_grav) { // use gravity vector to make sure down is down... // transform gravity into our world space grav.normalize(); Vector3 grav_adj = orientation.xform(grav); float dot = grav_adj.dot(down); if ((dot > -1.0) && (dot < 1.0)) { // axis around which we have this rotation Vector3 axis = grav_adj.cross(down); axis.normalize(); Basis drift_compensation(axis, acos(dot) * delta_time * 10); orientation = drift_compensation * orientation; }; }; // JIC orientation.orthonormalize(); last_ticks = ticks; };
void setSchreyerMultipliers(const Basis& basis) { MonoVector schreyer(monoid()); for (size_t gen = 0; gen < basis.size(); ++gen) schreyer.push_back(basis.getPoly(gen)->getLeadMonomial()); setSchreyerMultipliers(std::move(schreyer)); }
void GDAPI godot_basis_set_quat(godot_basis *p_self, const godot_quat *p_quat) { Basis *self = (Basis *)p_self; const Quat *quat = (const Quat *)p_quat; self->set_quat(*quat); }
// Print out the basis set specification in the format: // (example is C atom in 3-21G basis set) // BASIS: 6-31G // Total no. of cgbfs: 9 // Total no. of primitives: 9 // =============== // Specification // =============== // Atom Shell #CGBFs #Prims // ...................................... // C s 3 6 // p 6 3 // (if full = true, then continue with this) // =============== // Basis Functions // =============== // Atom Shell BF Coeff Exponent // ................................................. // C s 1 0.0617669 172.2560 // 0.358794 25.91090 // 0.700713 5.533350 // etc... void Logger::print(Basis& b, bool full) const { // Collect the data needed for printing int nbfs = b.getNBFs(); // Store number of cgbfs and prims int nprims = 0; Vector qs = b.getCharges(); // Sort the qs and get rid of duplicates qs.sort(); Vector qtemp(qs.size()); qtemp[0] = qs(0); int k = 1; for (int i = 1; i < qs.size(); i++){ if (qs(i) != qtemp[k-1]){ qtemp[k] = qs(i); k++; } } qs = qtemp; qs.resizeCopy(k); // Now sum over all basis functions to get the number of prims Vector c(3); c[0] = 0.0; c[1] = 0.0; c[2] = 0.0; BF bftemp(c, 0, 0, 0, c, c); for (int i = 0; i < nbfs; i++){ bftemp = b.getBF(i); nprims += bftemp.getNPrims(); } // Start printing title("Basis Set"); outfile << "BASIS: " << b.getName() << "\n"; outfile << "Total no. of cgbfs: " << nbfs << "\n"; outfile << "Total no. of prims: " << nprims << "\n"; title("Specification"); outfile << std::setw(8) << "Atom"; outfile << std::setw(8) << "Shell"; outfile << std::setw(8) << "#CGBFs"; outfile << std::setw(8) << "#Prims\n"; outfile << std::string(35, '.') << "\n"; // loop over the atom types outfile << std::setprecision(2); Vector subshells; Vector sublnums; for (int i = 0; i < k; i++){ int nc = 0; int np = 0; outfile << std::setw(8) << getAtomName(qs(i)); subshells = b.getShells(qs[i]); sublnums = b.getLnums(qs[i]); outfile << std::setw(8) << getShellName(sublnums[0]); outfile << std::setw(8) << subshells[0]; for (int j = 0; j < subshells[0]; j++){ np += b.getBF(qs[i], j).getNPrims(); } nc += subshells[0]; outfile << std::setw(8) << np << "\n"; for (int j = 1; j < subshells.size(); j++){ outfile << std::setw(8) << ""; outfile << std::setw(8) << getShellName(sublnums[j]); outfile << std::setw(8) << subshells[j]; np = 0; for (int l = 0; l < subshells[j]; l++){ np += b.getBF(qs[i], nc + l).getNPrims(); } nc += subshells[j]; outfile << std::setw(8) << np << "\n"; } } outfile << std::setprecision(8); // Now print out basis functions if required if (full) { title("Basis Functions"); outfile << std::setw(8) << "Atom"; outfile << std::setw(8) << "Shell"; outfile << std::setw(5) << "BF"; outfile << std::setw(18) << "Coeff"; outfile << std::setw(18) << "Exponent\n"; outfile << std::string(58, '.') << "\n"; // Loop over all the basis functions Vector subshell; Vector sublnums; Vector coeffs; Vector exps; std::string filler = ""; for (int i = 0; i < k; i++){ subshell = b.getShells(qs(i)); sublnums = b.getLnums(qs(i)); // Loop over shells int sum = 0; for (int r = 0; r < subshell.size(); r++){ // Loop over bfs for (int s = 0; s < subshell[r]; s++){ bftemp = b.getBF(qs(i), s+sum); coeffs = bftemp.getCoeffs(); exps = bftemp.getExps(); // Loop over coeffs/exps for (int t = 0; t < coeffs.size(); t++){ filler = ((r == 0 && s==0 && t==0) ? getAtomName(qs[i]) : ""); outfile << std::setw(8) << filler; filler = ((s == 0 && t == 0) ? getShellName(sublnums[r]) : ""); outfile << std::setw(8) << filler; filler = (t == 0 ? std::to_string(s+1) : ""); outfile << std::setw(5) << filler; outfile << std::setw(18) << std::setprecision(8) << coeffs(t); outfile << std::setw(18) << std::setprecision(8) << exps(t) << "\n"; } } sum += subshell[r]; } } } }
// A fill specialized to the single node at the coupling interface void ConvDiff_PDE::computeHeatFlux( const Epetra_Vector * soln ) { int numDep = depProblems.size(); // Create the overlapped solution and position vectors Epetra_Vector u(*OverlapMap); Epetra_Vector uold(*OverlapMap); std::vector<Epetra_Vector*> dep(numDep); for( int i = 0; i < numDep; ++i) dep[i] = new Epetra_Vector(*OverlapMap); Epetra_Vector xvec(*OverlapMap); // Export Solution to Overlap vector // If the vector to be used in the fill is already in the Overlap form, // we simply need to map on-processor from column-space indices to // OverlapMap indices. Note that the old solution is simply fixed data that // needs to be sent to an OverlapMap (ghosted) vector. The conditional // treatment for the current soution vector arises from use of // FD coloring in parallel. uold.Import(*oldSolution, *Importer, Insert); for( int i = 0; i < numDep; ++i ) (*dep[i]).Import(*( (*(depSolutions.find(depProblems[i]))).second ), *Importer, Insert); xvec.Import(*xptr, *Importer, Insert); if( NULL == soln ) u.Import(*initialSolution, *Importer, Insert); else u.Import(*soln, *Importer, Insert); // Declare required variables int row; double * xx = new double[2]; double * uu = new double[2]; double * uuold = new double[2]; std::vector<double*> ddep(numDep); for( int i = 0; i < numDep; ++i) ddep[i] = new double[2]; Basis basis; // Bundle up the dependent variables in the way needed for computing // the source terms of each reaction map<string, double*> depVars; depVars.insert( pair< std::string, double*>(getName(), uu) ); for( int i = 0; i < numDep; ++i ) depVars.insert( pair<string, double*>(myManager->getProblemName(depProblems[i]), ddep[i]) ); myFlux = 0.0; // Loop Over Gauss Points for( int gp = 0; gp < 2; ++gp ) { // Get the solution and coordinates at the nodes xx[0]=xvec[interface_elem]; xx[1]=xvec[interface_elem+1]; uu[0] = u[interface_elem]; uu[1] = u[interface_elem+1]; uuold[0] = uold[interface_elem]; uuold[1] = uold[interface_elem+1]; for( int i = 0; i < numDep; ++i ) { ddep[i][0] = (*dep[i])[interface_elem]; ddep[i][1] = (*dep[i])[interface_elem+1]; } // Calculate the basis function and variables at the gauss points basis.getBasis(gp, xx, uu, uuold, ddep); row = OverlapMap->GID( interface_elem + local_node ); if( StandardMap->MyGID(row) ) { myFlux += + basis.wt * basis.dx * ( peclet * (basis.duu / basis.dx) * basis.phi[local_node] + kappa * (1.0/(basis.dx*basis.dx)) * basis.duu * basis.dphide[local_node] ); } } // Sync up processors to be safe Comm->Barrier(); // Cleanup for( int i = 0; i < numDep; ++i) { delete [] ddep[i]; delete dep[i]; } delete [] xx ; delete [] uu ; delete [] uuold ; //int lastDof = StandardMap->LID(StandardMap->MaxAllGID()); //cout << "\t\"" << myName << "\" u[0] = " << u[0] // << "\tu[N] = " << u[lastDof] << std::endl; //cout << u << std::endl; //cout << "\t\"" << myName << "\" myFlux = " << myFlux << std::endl << std::endl; // Scale domain integration according to interface position myFlux *= dirScale; // Now add radiation contribution to flux myFlux += radiation * ( pow(u[interface_node], 4) - pow(u[opposite_node], 4) ); return; }
// Matrix and Residual Fills bool ConvDiff_PDE::evaluate( NOX::Epetra::Interface::Required::FillType flag, const Epetra_Vector * soln, Epetra_Vector * rhs) { if( rhs == 0 ) { std::string msg = "ERROR: ConvDiff_PDE::evaluate : callback appears to be other than a residual fill. Others are not support for this type."; throw msg; } int numDep = depProblems.size(); // Create the overlapped solution and position vectors Epetra_Vector u(*OverlapMap); Epetra_Vector uold(*OverlapMap); std::vector<Epetra_Vector*> dep(numDep); for( int i = 0; i < numDep; ++i) dep[i] = new Epetra_Vector(*OverlapMap); Epetra_Vector xvec(*OverlapMap); // Export Solution to Overlap vector // If the vector to be used in the fill is already in the Overlap form, // we simply need to map on-processor from column-space indices to // OverlapMap indices. Note that the old solution is simply fixed data that // needs to be sent to an OverlapMap (ghosted) vector. The conditional // treatment for the current soution vector arises from use of // FD coloring in parallel. uold.Import(*oldSolution, *Importer, Insert); for( int i = 0; i < numDep; ++i ) (*dep[i]).Import(*( (*(depSolutions.find(depProblems[i]))).second ), *Importer, Insert); xvec.Import(*xptr, *Importer, Insert); if( flag == NOX::Epetra::Interface::Required::FD_Res) // Overlap vector for solution received from FD coloring, so simply reorder // on processor u.Export(*soln, *ColumnToOverlapImporter, Insert); else // Communication to Overlap vector is needed u.Import(*soln, *Importer, Insert); // Declare required variables int OverlapNumMyNodes = OverlapMap->NumMyElements(); int OverlapMinMyNodeGID; if (MyPID==0) OverlapMinMyNodeGID = StandardMap->MinMyGID(); else OverlapMinMyNodeGID = StandardMap->MinMyGID()-1; int row; double * xx = new double[2]; double * uu = new double[2]; double * uuold = new double[2]; std::vector<double*> ddep(numDep); for( int i = 0; i < numDep; ++i) ddep[i] = new double[2]; Basis basis; // Bundle up the dependent variables in the way needed for computing // the source terms of each reaction map<string, double*> depVars; depVars.insert( pair< std::string, double*>(getName(), uu) ); for( int i = 0; i < numDep; ++i ) depVars.insert( pair<string, double*>(myManager->getProblemName(depProblems[i]), ddep[i]) ); // Zero out the objects that will be filled rhs->PutScalar(0.0); // Loop Over # of Finite Elements on Processor for( int ne = 0; ne < OverlapNumMyNodes-1; ++ne ) { // Loop Over Gauss Points for( int gp = 0; gp < 2; ++gp ) { // Get the solution and coordinates at the nodes xx[0]=xvec[ne]; xx[1]=xvec[ne+1]; uu[0] = u[ne]; uu[1] = u[ne+1]; uuold[0] = uold[ne]; uuold[1] = uold[ne+1]; for( int i = 0; i < numDep; ++i ) { ddep[i][0] = (*dep[i])[ne]; ddep[i][1] = (*dep[i])[ne+1]; } // Calculate the basis function and variables at the gauss points basis.getBasis(gp, xx, uu, uuold, ddep); // Loop over Nodes in Element for( int i = 0; i < 2; ++i ) { row = OverlapMap->GID(ne+i); if( StandardMap->MyGID(row) ) { (*rhs)[StandardMap->LID(OverlapMap->GID(ne+i))] += + basis.wt * basis.dx * ( peclet * (basis.duu / basis.dx) * basis.phi[i] + kappa * (1.0/(basis.dx*basis.dx)) * basis.duu * basis.dphide[i] ); } } } } //if( NOX::Epetra::Interface::Required::Residual == flag ) //{ // int lastDof = StandardMap->LID(StandardMap->MaxAllGID()); // std::cout << "\t\"" << myName << "\" u[0] = " << (*soln)[0] // << "\tu[N] = " << (*soln)[lastDof] << std::endl; // std::cout << "\t\"" << myName << "\" RHS[0] = " << (*rhs)[0] // << "\tRHS[N] = " << (*rhs)[lastDof] << std::endl << std::endl; //} // Apply BCs computeHeatFlux( soln ); double bcResidual = bcWeight * (myFlux - depProbPtr->getHeatFlux() ) - (1.0 - bcWeight) * (u[interface_node] - depProbPtr->getInterfaceTemp() ); int lastDof = StandardMap->LID(StandardMap->MaxAllGID()); // "Left" boundary if( LEFT == myInterface ) // this may break in parallel { (*rhs)[0] = bcResidual; (*rhs)[lastDof] = (*soln)[lastDof] - Tright; } // "Right" boundary else { (*rhs)[0] = (*soln)[0] - Tleft; (*rhs)[lastDof] = bcResidual; } // Sync up processors to be safe Comm->Barrier(); A->FillComplete(); #ifdef DEBUG std::cout << "For residual fill :" << std::endl << *rhs << std::endl; #endif // Cleanup for( int i = 0; i < numDep; ++i) { delete [] ddep[i]; delete dep[i]; } delete [] xx ; delete [] uu ; delete [] uuold ; return true; }
void GridMapEditor::_duplicate_paste() { if (!selection.active) return; int idx = options->get_popup()->get_item_index(MENU_OPTION_DUPLICATE_SELECTS); bool reselect = options->get_popup()->is_item_checked( idx ); List< __Item > items; Basis rot; rot.set_orthogonal_index(selection.duplicate_rot); for(int i=selection.begin.x;i<=selection.end.x;i++) { for(int j=selection.begin.y;j<=selection.end.y;j++) { for(int k=selection.begin.z;k<=selection.end.z;k++) { int itm = node->get_cell_item(i,j,k); if (itm==GridMap::INVALID_CELL_ITEM) continue; int orientation = node->get_cell_item_orientation(i,j,k); __Item item; Vector3 rel=Vector3(i,j,k)-selection.begin; rel = rot.xform(rel); Basis orm; orm.set_orthogonal_index(orientation); orm = rot * orm; item.pos=selection.begin+rel; item.item=itm; item.rot=orm.get_orthogonal_index(); items.push_back(item); } } } Vector3 ofs=selection.current-selection.click; if (items.size()) { undo_redo->create_action("GridMap Duplicate Selection"); for(List< __Item >::Element *E=items.front();E;E=E->next()) { __Item &it=E->get(); Vector3 pos = it.pos+ofs; undo_redo->add_do_method(node,"set_cell_item",pos.x,pos.y,pos.z,it.item,it.rot); undo_redo->add_undo_method(node,"set_cell_item",pos.x,pos.y,pos.z,node->get_cell_item(pos.x,pos.y,pos.z),node->get_cell_item_orientation(pos.x,pos.y,pos.z)); } undo_redo->commit_action(); } if (reselect) { selection.begin+=ofs; selection.end+=ofs; selection.click=selection.begin; selection.current=selection.end; _validate_selection(); } }
Basis Basis::scaled_local(const Vector3 &p_scale) const { Basis b; b.set_diagonal(p_scale); return (*this) * b; }
void GridMapEditor::_menu_option(int p_option) { switch(p_option) { case MENU_OPTION_CONFIGURE: { } break; case MENU_OPTION_LOCK_VIEW: { int index=options->get_popup()->get_item_index(MENU_OPTION_LOCK_VIEW); lock_view=!options->get_popup()->is_item_checked(index); options->get_popup()->set_item_checked(index,lock_view); } break; case MENU_OPTION_CLIP_DISABLED: case MENU_OPTION_CLIP_ABOVE: case MENU_OPTION_CLIP_BELOW: { clip_mode=ClipMode(p_option-MENU_OPTION_CLIP_DISABLED); for(int i=0;i<3;i++) { int index=options->get_popup()->get_item_index(MENU_OPTION_CLIP_DISABLED+i); options->get_popup()->set_item_checked(index,i==clip_mode); } _update_clip(); } break; case MENU_OPTION_X_AXIS: case MENU_OPTION_Y_AXIS: case MENU_OPTION_Z_AXIS: { int new_axis = p_option-MENU_OPTION_X_AXIS; for(int i=0;i<3;i++) { int idx=options->get_popup()->get_item_index(MENU_OPTION_X_AXIS+i); options->get_popup()->set_item_checked(idx,i==new_axis); } edit_axis=Vector3::Axis(new_axis); update_grid(); _update_clip(); } break; case MENU_OPTION_CURSOR_ROTATE_Y: { Basis r; if (input_action==INPUT_DUPLICATE) { r.set_orthogonal_index(selection.duplicate_rot); r.rotate(Vector3(0,1,0),-Math_PI/2.0); selection.duplicate_rot=r.get_orthogonal_index(); _update_duplicate_indicator(); break; } r.set_orthogonal_index(cursor_rot); r.rotate(Vector3(0,1,0),-Math_PI/2.0); cursor_rot=r.get_orthogonal_index(); _update_cursor_transform(); } break; case MENU_OPTION_CURSOR_ROTATE_X: { Basis r; if (input_action==INPUT_DUPLICATE) { r.set_orthogonal_index(selection.duplicate_rot); r.rotate(Vector3(1,0,0),-Math_PI/2.0); selection.duplicate_rot=r.get_orthogonal_index(); _update_duplicate_indicator(); break; } r.set_orthogonal_index(cursor_rot); r.rotate(Vector3(1,0,0),-Math_PI/2.0); cursor_rot=r.get_orthogonal_index(); _update_cursor_transform(); } break; case MENU_OPTION_CURSOR_ROTATE_Z: { Basis r; if (input_action==INPUT_DUPLICATE) { r.set_orthogonal_index(selection.duplicate_rot); r.rotate(Vector3(0,0,1),-Math_PI/2.0); selection.duplicate_rot=r.get_orthogonal_index(); _update_duplicate_indicator(); break; } r.set_orthogonal_index(cursor_rot); r.rotate(Vector3(0,0,1),-Math_PI/2.0); cursor_rot=r.get_orthogonal_index(); _update_cursor_transform(); } break; case MENU_OPTION_CURSOR_BACK_ROTATE_Y: { Basis r; r.set_orthogonal_index(cursor_rot); r.rotate(Vector3(0,1,0),Math_PI/2.0); cursor_rot=r.get_orthogonal_index(); _update_cursor_transform(); } break; case MENU_OPTION_CURSOR_BACK_ROTATE_X: { Basis r; r.set_orthogonal_index(cursor_rot); r.rotate(Vector3(1,0,0),Math_PI/2.0); cursor_rot=r.get_orthogonal_index(); _update_cursor_transform(); } break; case MENU_OPTION_CURSOR_BACK_ROTATE_Z: { Basis r; r.set_orthogonal_index(cursor_rot); r.rotate(Vector3(0,0,1),Math_PI/2.0); cursor_rot=r.get_orthogonal_index(); _update_cursor_transform(); } break; case MENU_OPTION_CURSOR_CLEAR_ROTATION: { if (input_action==INPUT_DUPLICATE) { selection.duplicate_rot=0; _update_duplicate_indicator(); break; } cursor_rot=0; _update_cursor_transform(); } break; case MENU_OPTION_DUPLICATE_SELECTS: { int idx = options->get_popup()->get_item_index(MENU_OPTION_DUPLICATE_SELECTS); options->get_popup()->set_item_checked( idx, !options->get_popup()->is_item_checked( idx ) ); } break; case MENU_OPTION_SELECTION_MAKE_AREA: case MENU_OPTION_SELECTION_MAKE_EXTERIOR_CONNECTOR: { if (!selection.active) break; int area = node->get_unused_area_id(); Error err = node->create_area(area,Rect3(selection.begin,selection.end-selection.begin+Vector3(1,1,1))); if (err!=OK) { } if (p_option==MENU_OPTION_SELECTION_MAKE_EXTERIOR_CONNECTOR) { node->area_set_exterior_portal(area,true); } _update_areas_display(); update_areas(); } break; case MENU_OPTION_REMOVE_AREA: { if (selected_area<1) return; node->erase_area(selected_area); _update_areas_display(); update_areas(); } break; case MENU_OPTION_SELECTION_DUPLICATE: if (!(selection.active && input_action==INPUT_NONE)) return; if (last_mouseover==Vector3(-1,-1,-1)) //nono mouseovering anythin break; input_action=INPUT_DUPLICATE; selection.click=last_mouseover; selection.current=last_mouseover; selection.duplicate_rot=0; _update_duplicate_indicator(); break; case MENU_OPTION_SELECTION_CLEAR: { if (!selection.active) return; _delete_selection(); } break; case MENU_OPTION_GRIDMAP_SETTINGS: { settings_dialog->popup_centered(settings_vbc->get_combined_minimum_size() + Size2(50, 50)); } break; } }
void GDAPI godot_basis_set_quat_scale(godot_basis *p_self, const godot_quat *p_quat, const godot_vector3 *p_scale) { Basis *self = (Basis *)p_self; const Quat *quat = (const Quat *)p_quat; const Vector3 *scale = (const Vector3 *)p_scale; self->set_quat_scale(*quat, *scale); }
// --------------------------- // - Matrix and Residual Fills // --------------------------- bool evaluate(NOX::Epetra::Interface::Required::FillType flag, const Epetra_Vector* soln, Epetra_Vector* tmp_rhs, Epetra_RowMatrix* tmp_matrix) { //Determine what to fill (F or Jacobian) bool fillF = false; bool fillMatrix = false; if (tmp_rhs != 0) { fillF = true; rhs = tmp_rhs; } else { fillMatrix = true; } // "flag" can be used to determine how accurate your fill of F should be // depending on why we are calling evaluate (Could be using computeF to // populate a Jacobian or Preconditioner). if (flag == NOX::Epetra::Interface::Required::Residual) { // Do nothing for now } else if (flag == NOX::Epetra::Interface::Required::Jac) { // Do nothing for now } else if (flag == NOX::Epetra::Interface::Required::Prec) { // Do nothing for now } else if (flag == NOX::Epetra::Interface::Required::User) { // Do nothing for now } // Create the overlapped solution and position vectors Epetra_Vector u(*OverlapMap); Epetra_Vector uold(*OverlapMap); Epetra_Vector xvec(*OverlapMap); // Export Solution to Overlap vector u.Import(*soln, *Importer, Insert); uold.Import(*oldSolution, *Importer, Insert); xvec.Import(*xptr, *Importer, Insert); // Declare required variables int OverlapNumMyElements = OverlapMap->NumMyElements(); int row, column; double jac; double xx[2]; double uu[2]; double uuold[2]; Basis basis; // Zero out the objects that will be filled if (fillF) rhs->PutScalar(0.0); if (fillMatrix) jacobian->PutScalar(0.0); // Loop Over # of Finite Elements on Processor for (int ne=0; ne < OverlapNumMyElements-1; ne++) { // Loop Over Gauss Points for(int gp=0; gp < 2; gp++) { // Get the solution and coordinates at the nodes xx[0]=xvec[ne]; xx[1]=xvec[ne+1]; uu[0]=u[ne]; uu[1]=u[ne+1]; uuold[0]=uold[ne]; uuold[1]=uold[ne+1]; // Calculate the basis function at the gauss point basis.computeBasis(gp, xx, uu, uuold); // Loop over Nodes in Element for (int i=0; i< 2; i++) { row=OverlapMap->GID(ne+i); //printf("Proc=%d GlobalRow=%d LocalRow=%d Owned=%d\n", // MyPID, row, ne+i,StandardMap.MyGID(row)); if (StandardMap->MyGID(row)) { if (fillF) { (*rhs)[StandardMap->LID(OverlapMap->GID(ne+i))]+= +basis.wt*basis.dx*( (basis.uu-basis.uuold)/dt*basis.phi[i] + (1.0/(basis.dx*basis.dx))*basis.duu*basis.dphide[i] - 8.0/factor/factor*basis.uu*basis.uu* (1.0-basis.uu)*basis.phi[i]); } } // Loop over Trial Functions if (fillMatrix) { for(int j=0;j < 2; j++) { if (StandardMap->MyGID(row)) { column=OverlapMap->GID(ne+j); jac=basis.wt*basis.dx*( basis.phi[j]/dt*basis.phi[i] + (1.0/(basis.dx*basis.dx))* basis.dphide[j]*basis.dphide[i] - 8.0/factor/factor* (2.0*basis.uu-3.0*basis.uu*basis.uu)* basis.phi[j]*basis.phi[i]); jacobian->SumIntoGlobalValues(row, 1, &jac, &column); } } } } } } // Insert Boundary Conditions and modify Jacobian and function (F) // U(0)=1 if (MyPID==0) { if (fillF) (*rhs)[0]= (*soln)[0] - 1.0; if (fillMatrix) { column=0; jac=1.0; jacobian->ReplaceGlobalValues(0, 1, &jac, &column); column=1; jac=0.0; jacobian->ReplaceGlobalValues(0, 1, &jac, &column); } } // Insert Boundary Conditions and modify Jacobian and function (F) // U(xmax)=0 if (MyPID==NumProc-1) { if (fillF) (*rhs)[NumMyElements-1]= (*soln)[NumMyElements-1] - 0.0; if (fillMatrix) { row=NumGlobalElements-1; column=row; jac=1.0; jacobian->ReplaceGlobalValues(row, 1, &jac, &column); column--; jac=0.0; jacobian->ReplaceGlobalValues(row, 1, &jac, &column); } } // Sync up processors to be safe Comm->Barrier(); jacobian->FillComplete(); return true; }
int main(int argc, char * argv[]) { srand(time(NULL)); // Let us define a 3 layer perceptron architecture auto input = gaml::mlp::input<X>(INPUT_DIM, fillInput); auto l1 = gaml::mlp::layer(input, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_sigmoid(), gaml::mlp::mlp_dsigmoid()); auto l2 = gaml::mlp::layer(l1, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_sigmoid(), gaml::mlp::mlp_dsigmoid()); auto output = gaml::mlp::layer(l2, OUTPUT_DIM, gaml::mlp::mlp_identity(), gaml::mlp::mlp_didentity()); auto mlp = gaml::mlp::perceptron(output, output_of); // Create a training base // Let us try to fit a noisy sinc function Basis basis; basis.resize(NB_SAMPLES); for(auto& d: basis) { d.first = {{ -10.0 + 20.0 * gaml::random::uniform(0.0, 1.0) }} ; d.second = noisy_oracle(d.first); } // Set up the parameters for learning the MLP with a gradient descent gaml::mlp::learner::gradient::parameter gradient_params; gradient_params.alpha = 1e-2; gradient_params.dalpha = 1e-3; gradient_params.verbose = true; // The stopping criteria gradient_params.max_iter = 10000; gradient_params.min_dparams = 1e-7; // Create the learner auto learning_algorithm = gaml::mlp::learner::gradient::algorithm(mlp, gradient_params, gaml::mlp::loss::Quadratic(), fillOutput); // Call the learner on the basis and get the learned predictor auto predictor = learning_algorithm(basis.begin(), basis.end(), input_of_data, output_of_data); // Print out the structure of the perceptron we learned std::cout << predictor << std::endl; // Dump the results std::ofstream outfile("example-005-samples.data"); for(auto& b: basis) outfile << b.first[0] << " " << b.second[0] << " " << std::endl; outfile.close(); outfile.open("example-005-regression.data"); X x; for(x[0] = -10; x[0] < 10 ; x[0] += 0.1) { auto output = predictor(x); outfile << x[0] << " " << oracle(x)[0] << " " << output[0] << std::endl; } outfile.close(); std::cout << "You can plot the results using gnuplot :" << std::endl; std::cout << "gnuplot " << ML_MLP_SHAREDIR << "/plot-example-005.gplot" << std::endl; std::cout << "This will produce example-005.ps" << std::endl; // Let us compute the empirical risk. auto evaluator = gaml::risk::empirical(gaml::mlp::loss::Quadratic()); double risk = evaluator(predictor, basis.begin(), basis.end(), input_of_data, output_of_data); std::cout << "Empirical risk = " << risk << std::endl; // We will use a 6-fold cross-validation to estimate the real risk. auto kfold_evaluator = gaml::risk::cross_validation(gaml::mlp::loss::Quadratic(), gaml::partition::kfold(6), true); double kfold_risk = kfold_evaluator(learning_algorithm, basis.begin(),basis.end(), input_of_data,output_of_data); std::cout << "Estimation of the real risk (6-fold): " << kfold_risk << std::endl; }
void GDAPI godot_basis_set_row(godot_basis *p_self, const godot_int p_row, const godot_vector3 *p_value) { Basis *self = (Basis *)p_self; const Vector3 *value = (const Vector3 *)p_value; self->set_row(p_row, *value); }
Basis Basis::scaled( const Vector3& p_scale ) const { Basis m = *this; m.scale(p_scale); return m; }
void GDAPI godot_basis_set_euler_scale(godot_basis *p_self, const godot_vector3 *p_euler, const godot_vector3 *p_scale) { Basis *self = (Basis *)p_self; const Vector3 *euler = (const Vector3 *)p_euler; const Vector3 *scale = (const Vector3 *)p_scale; self->set_euler_scale(*euler, *scale); }
void basis_sort(Integer *basis) { Basis *basisObj = Basis_find_object(*basis); basisObj->sort(); }