void VTKXMLExportModule :: exportPrimVarAs(UnknownType valID, IntArray &mapG2L, IntArray &mapL2G, int regionDofMans, int ireg, #ifdef __VTK_MODULE vtkSmartPointer<vtkUnstructuredGrid> &stream, #else FILE *stream, #endif TimeStep *tStep) { Domain *d = emodel->giveDomain(1); InternalStateValueType type = ISVT_UNDEFINED; if ( ( valID == DisplacementVector ) || ( valID == EigenVector ) || ( valID == VelocityVector ) ) { type = ISVT_VECTOR; } else if ( ( valID == FluxVector ) || ( valID == PressureVector ) || ( valID == Temperature ) ) { type = ISVT_SCALAR; } else { OOFEM_ERROR2( "VTKXMLExportModule::exportPrimVarAs: unsupported UnknownType %s", __UnknownTypeToString(valID) ); } #ifdef __VTK_MODULE vtkSmartPointer<vtkDoubleArray> primVarArray = vtkSmartPointer<vtkDoubleArray>::New(); primVarArray->SetName(__UnknownTypeToString(valID)); if ( type == ISVT_SCALAR ) { primVarArray->SetNumberOfComponents(1); } else if ( type == ISVT_VECTOR ) { primVarArray->SetNumberOfComponents(3); } else { fprintf( stderr, "VTKXMLExportModule::exportPrimVarAs: unsupported variable type %s\n", __UnknownTypeToString(valID) ); } primVarArray->SetNumberOfTuples(regionDofMans); #else if ( type == ISVT_SCALAR ) { fprintf( stream, "<DataArray type=\"Float64\" Name=\"%s\" format=\"ascii\"> ", __UnknownTypeToString(valID) ); } else if ( type == ISVT_VECTOR ) { fprintf( stream, "<DataArray type=\"Float64\" Name=\"%s\" NumberOfComponents=\"3\" format=\"ascii\"> ", __UnknownTypeToString(valID) ); } else { fprintf( stderr, "VTKXMLExportModule::exportPrimVarAs: unsupported variable type %s\n", __UnknownTypeToString(valID) ); } #endif DofManager *dman; FloatArray iVal, iValLCS; for ( int inode = 1; inode <= regionDofMans; inode++ ) { dman = d->giveNode( mapL2G.at(inode) ); this->getPrimaryVariable(iVal, dman, tStep, valID, ireg); if ( type == ISVT_SCALAR ) { #ifdef __VTK_MODULE primVarArray->SetTuple1(inode-1, iVal.at(1)); #else fprintf(stream, "%e ", iVal.at(1) ); #endif } else if ( type == ISVT_VECTOR ) { //rotate back from nodal CS to global CS if applies if ( (dman->giveClassID() == NodeClass) && d->giveNode( dman->giveNumber() )->hasLocalCS() ) { iVal.resize(3); iValLCS = iVal; iVal.beTProductOf(* d->giveNode( dman->giveNumber() )->giveLocalCoordinateTriplet(), iValLCS); } #ifdef __VTK_MODULE primVarArray->SetTuple3(inode-1, iVal.at(1), iVal.at(2), iVal.at(3)); #else fprintf( stream, "%e %e %e ", iVal.at(1), iVal.at(2), iVal.at(3) ); #endif } } // end loop over nodes #ifdef __VTK_MODULE if ( type == ISVT_SCALAR ) { stream->GetPointData()->SetActiveScalars(__UnknownTypeToString(valID)); stream->GetPointData()->SetScalars(primVarArray); } else if ( type == ISVT_VECTOR ) { stream->GetPointData()->SetActiveVectors(__UnknownTypeToString(valID)); stream->GetPointData()->SetVectors(primVarArray); } #else fprintf(stream, "</DataArray>\n"); #endif }
void SolutionbasedShapeFunction :: loadProblem() { for ( int i = 0; i < this->domain->giveNumberOfSpatialDimensions(); i++ ) { OOFEM_LOG_INFO("************************** Instanciating microproblem from file %s for dimension %u\n", filename.c_str(), i); // Set up and solve problem OOFEMTXTDataReader drMicro( filename.c_str() ); EngngModel *myEngngModel = InstanciateProblem(& drMicro, _processor, 0, NULL, false); drMicro.finish(); myEngngModel->checkProblemConsistency(); myEngngModel->initMetaStepAttributes( myEngngModel->giveMetaStep(1) ); thisTimestep = myEngngModel->giveNextStep(); myEngngModel->init(); this->setLoads(myEngngModel, i + 1); // Check for ( int j = 1; j <= myEngngModel->giveDomain(1)->giveNumberOfElements(); j++ ) { Element *e = myEngngModel->giveDomain(1)->giveElement(j); FloatArray centerCoord; int vlockCount = 0; centerCoord.resize(3); centerCoord.zero(); for ( int k = 1; k <= e->giveNumberOfDofManagers(); k++ ) { DofManager *dman = e->giveDofManager(k); centerCoord.add( * dman->giveCoordinates() ); for ( Dof *dof: *dman ) { if ( dof->giveBcId() != 0 ) { vlockCount++; } } } if ( vlockCount == 30 ) { OOFEM_WARNING("Element over-constrained (%u)! Center coordinate: %f, %f, %f\n", e->giveNumber(), centerCoord.at(1) / 10, centerCoord.at(2) / 10, centerCoord.at(3) / 10); } } myEngngModel->solveYourselfAt(thisTimestep); isLoaded = true; // Set correct export filename std :: string originalFilename; originalFilename = myEngngModel->giveOutputBaseFileName(); if ( i == 0 ) { originalFilename = originalFilename + "_X"; } if ( i == 1 ) { originalFilename = originalFilename + "_Y"; } if ( i == 2 ) { originalFilename = originalFilename + "_Z"; } myEngngModel->letOutputBaseFileNameBe(originalFilename + "_1_Base"); myEngngModel->doStepOutput(thisTimestep); modeStruct *mode = new(modeStruct); mode->myEngngModel = myEngngModel; // Check elements // Set unknowns to the mean value of opposite sides of the domain. // Loop thru all nodes and compute phi for all degrees of freedom on the boundary. Save phi in a list for later use. initializeSurfaceData(mode); // Update with factor double am = 1.0, ap = 1.0; computeCorrectionFactors(* mode, & dofs, & am, & ap); OOFEM_LOG_INFO("Correction factors: am=%f, ap=%f\n", am, ap); mode->ap = ap; mode->am = am; updateModelWithFactors(mode); // myEngngModel->letOutputBaseFileNameBe(originalFilename + "_2_Updated"); modes.push_back(mode); OOFEM_LOG_INFO("************************** Microproblem at %p instanciated \n", myEngngModel); } }
void NonLinearDynamic :: proceedStep(int di, TimeStep *tStep) { // creates system of governing eq's and solves them at given time step // first assemble problem at current time step int neq = this->giveNumberOfDomainEquations(1, EModelDefaultEquationNumbering()); // Time-stepping constants this->determineConstants(tStep); if ( ( tStep->giveNumber() == giveNumberOfFirstStep() ) && initFlag ) { // Initialization incrementOfDisplacement.resize(neq); incrementOfDisplacement.zero(); totalDisplacement.resize(neq); totalDisplacement.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); internalForces.resize(neq); internalForces.zero(); previousIncrementOfDisplacement.resize(neq); previousIncrementOfDisplacement.zero(); previousTotalDisplacement.resize(neq); previousTotalDisplacement.zero(); previousVelocityVector.resize(neq); previousVelocityVector.zero(); previousAccelerationVector.resize(neq); previousAccelerationVector.zero(); previousInternalForces.resize(neq); previousInternalForces.zero(); TimeStep *stepWhenIcApply = new TimeStep(giveNumberOfTimeStepWhenIcApply(), this, 0, -deltaT, deltaT, 0); int nDofs, j, k, jj; int nman = this->giveDomain(di)->giveNumberOfDofManagers(); DofManager *node; Dof *iDof; // Considering initial conditions. for ( j = 1; j <= nman; j++ ) { node = this->giveDomain(di)->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // Ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions). iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { totalDisplacement.at(jj) = iDof->giveUnknown(VM_Total, stepWhenIcApply); velocityVector.at(jj) = iDof->giveUnknown(VM_Velocity, stepWhenIcApply); accelerationVector.at(jj) = iDof->giveUnknown(VM_Acceleration, stepWhenIcApply); } } } this->giveInternalForces(internalForces, true, di, tStep); } if ( initFlag ) { // First assemble problem at current time step. // Option to take into account initial conditions. if ( !effectiveStiffnessMatrix ) { effectiveStiffnessMatrix = classFactory.createSparseMtrx(sparseMtrxType); massMatrix = classFactory.createSparseMtrx(sparseMtrxType); } if ( effectiveStiffnessMatrix == NULL || massMatrix == NULL ) { _error("proceedStep: sparse matrix creation failed"); } if ( nonlocalStiffnessFlag ) { if ( !effectiveStiffnessMatrix->isAsymmetric() ) { _error("proceedStep: effectiveStiffnessMatrix does not support asymmetric storage"); } } effectiveStiffnessMatrix->buildInternalStructure( this, di, EID_MomentumBalance, EModelDefaultEquationNumbering() ); massMatrix->buildInternalStructure( this, di, EID_MomentumBalance, EModelDefaultEquationNumbering() ); // Assemble mass matrix this->assemble(massMatrix, tStep, EID_MomentumBalance, MassMatrix, EModelDefaultEquationNumbering(), this->giveDomain(di)); // Initialize vectors help.resize(neq); help.zero(); rhs.resize(neq); rhs.zero(); rhs2.resize(neq); rhs2.zero(); previousIncrementOfDisplacement.resize(neq); previousTotalDisplacement.resize(neq); previousVelocityVector.resize(neq); previousAccelerationVector.resize(neq); previousInternalForces.resize(neq); for ( int i = 1; i <= neq; i++ ) { previousIncrementOfDisplacement.at(i) = incrementOfDisplacement.at(i); previousTotalDisplacement.at(i) = totalDisplacement.at(i); previousVelocityVector.at(i) = velocityVector.at(i); previousAccelerationVector.at(i) = accelerationVector.at(i); previousInternalForces.at(i) = internalForces.at(i); } forcesVector.resize(neq); forcesVector.zero(); totIterations = 0; initFlag = 0; } #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling load\n"); #endif // Assemble the incremental reference load vector. this->assembleIncrementalReferenceLoadVectors(incrementalLoadVector, incrementalLoadVectorOfPrescribed, refLoadInputMode, this->giveDomain(di), EID_MomentumBalance, tStep); // Assembling the effective load vector for ( int i = 1; i <= neq; i++ ) { help.at(i) = a2 * previousVelocityVector.at(i) + a3 * previousAccelerationVector.at(i) + eta * ( a4 * previousVelocityVector.at(i) + a5 * previousAccelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } massMatrix->times(help, rhs); if ( delta != 0 ) { for ( int i = 1; i <= neq; i++ ) { help.at(i) = delta * ( a4 * previousVelocityVector.at(i) + a5 * previousAccelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } this->timesMtrx(help, rhs2, TangentStiffnessMatrix, this->giveDomain(di), tStep); help.zero(); for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += rhs2.at(i); } } for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += incrementalLoadVector.at(i) - previousInternalForces.at(i); } // // Set-up numerical model. // this->giveNumericalMethod( this->giveCurrentMetaStep() ); // // Call numerical model to solve problem. // double loadLevel = 1.0; if ( totIterations == 0 ) { incrementOfDisplacement.zero(); } if ( initialLoadVector.isNotEmpty() ) { numMetStatus = nMethod->solve(effectiveStiffnessMatrix, & rhs, & initialLoadVector, & totalDisplacement, & incrementOfDisplacement, & forcesVector, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } else { numMetStatus = nMethod->solve(effectiveStiffnessMatrix, & rhs, NULL, & totalDisplacement, & incrementOfDisplacement, & forcesVector, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } for ( int i = 1; i <= neq; i++ ) { rhs.at(i) = previousVelocityVector.at(i); rhs2.at(i) = previousAccelerationVector.at(i); accelerationVector.at(i) = a0 * incrementOfDisplacement.at(i) - a2 * rhs.at(i) - a3 * rhs2.at(i); velocityVector.at(i) = a1 * incrementOfDisplacement.at(i) - a4 * rhs.at(i) - a5 * rhs2.at(i) - a6 * previousIncrementOfDisplacement.at(i); } totIterations += currentIterations; }
void TrPlaneStress2dXFEM :: giveCompositeExportData(std::vector< VTKPiece > &vtkPieces, IntArray &primaryVarsToExport, IntArray &internalVarsToExport, IntArray cellVarsToExport, TimeStep *tStep) { vtkPieces.resize(1); const int numCells = mSubTri.size(); if(numCells == 0) { // Enriched but uncut element // Visualize as a quad vtkPieces[0].setNumberOfCells(1); int numTotalNodes = 3; vtkPieces[0].setNumberOfNodes(numTotalNodes); // Node coordinates std :: vector< FloatArray >nodeCoords; for(int i = 1; i <= 3; i++) { FloatArray &x = *(giveDofManager(i)->giveCoordinates()); nodeCoords.push_back(x); vtkPieces[0].setNodeCoords(i, x); } // Connectivity IntArray nodes1 = {1, 2, 3}; vtkPieces[0].setConnectivity(1, nodes1); // Offset int offset = 3; vtkPieces[0].setOffset(1, offset); // Cell types vtkPieces[0].setCellType(1, 5); // Linear triangle // Export nodal variables from primary fields vtkPieces[0].setNumberOfPrimaryVarsToExport(primaryVarsToExport.giveSize(), numTotalNodes); for ( int fieldNum = 1; fieldNum <= primaryVarsToExport.giveSize(); fieldNum++ ) { UnknownType type = ( UnknownType ) primaryVarsToExport.at(fieldNum); for ( int nodeInd = 1; nodeInd <= numTotalNodes; nodeInd++ ) { if ( type == DisplacementVector ) { // compute displacement FloatArray u = {0.0, 0.0, 0.0}; // Fetch global coordinates (in undeformed configuration) const FloatArray &x = nodeCoords[nodeInd-1]; // Compute local coordinates FloatArray locCoord; computeLocalCoordinates(locCoord, x); // Compute displacement in point FloatMatrix NMatrix; computeNmatrixAt(locCoord, NMatrix); FloatArray solVec; computeVectorOf(VM_Total, tStep, solVec); FloatArray uTemp; uTemp.beProductOf(NMatrix, solVec); if(uTemp.giveSize() == 3) { u = uTemp; } else { u = {uTemp[0], uTemp[1], 0.0}; } vtkPieces[0].setPrimaryVarInNode(fieldNum, nodeInd, u); } else { printf("fieldNum: %d\n", fieldNum); // TODO: Implement // ZZNodalRecoveryMI_recoverValues(values, layer, ( InternalStateType ) 1, tStep); // does not work well - fix // for ( int j = 1; j <= numCellNodes; j++ ) { // vtkPiece.setPrimaryVarInNode(fieldNum, nodeNum, values [ j - 1 ]); // nodeNum += 1; // } } } } // Export nodal variables from internal fields vtkPieces[0].setNumberOfInternalVarsToExport(0, numTotalNodes); // Export cell variables vtkPieces[0].setNumberOfCellVarsToExport(cellVarsToExport.giveSize(), 1); for ( int i = 1; i <= cellVarsToExport.giveSize(); i++ ) { InternalStateType type = ( InternalStateType ) cellVarsToExport.at(i); FloatArray average; std :: unique_ptr< IntegrationRule > &iRule = integrationRulesArray [ 0 ]; VTKXMLExportModule :: computeIPAverage(average, iRule.get(), this, type, tStep); FloatArray averageV9(9); averageV9.at(1) = average.at(1); averageV9.at(5) = average.at(2); averageV9.at(9) = average.at(3); averageV9.at(6) = averageV9.at(8) = average.at(4); averageV9.at(3) = averageV9.at(7) = average.at(5); averageV9.at(2) = averageV9.at(4) = average.at(6); vtkPieces[0].setCellVar( i, 1, averageV9 ); } // Export of XFEM related quantities if ( domain->hasXfemManager() ) { XfemManager *xMan = domain->giveXfemManager(); int nEnrIt = xMan->giveNumberOfEnrichmentItems(); vtkPieces[0].setNumberOfInternalXFEMVarsToExport(xMan->vtkExportFields.giveSize(), nEnrIt, numTotalNodes); const int nDofMan = giveNumberOfDofManagers(); for ( int field = 1; field <= xMan->vtkExportFields.giveSize(); field++ ) { XFEMStateType xfemstype = ( XFEMStateType ) xMan->vtkExportFields [ field - 1 ]; for ( int enrItIndex = 1; enrItIndex <= nEnrIt; enrItIndex++ ) { EnrichmentItem *ei = xMan->giveEnrichmentItem(enrItIndex); for ( int nodeInd = 1; nodeInd <= numTotalNodes; nodeInd++ ) { const FloatArray &x = nodeCoords[nodeInd-1]; FloatArray locCoord; computeLocalCoordinates(locCoord, x); FloatArray N; FEInterpolation *interp = giveInterpolation(); interp->evalN( N, locCoord, FEIElementGeometryWrapper(this) ); if ( xfemstype == XFEMST_LevelSetPhi ) { double levelSet = 0.0, levelSetInNode = 0.0; for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) { DofManager *dMan = giveDofManager(elNodeInd); ei->evalLevelSetNormalInNode(levelSetInNode, dMan->giveGlobalNumber(), *(dMan->giveCoordinates()) ); levelSet += N.at(elNodeInd)*levelSetInNode; } FloatArray valueArray = {levelSet}; vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray); } else if ( xfemstype == XFEMST_LevelSetGamma ) { double levelSet = 0.0, levelSetInNode = 0.0; for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) { DofManager *dMan = giveDofManager(elNodeInd); ei->evalLevelSetTangInNode(levelSetInNode, dMan->giveGlobalNumber(), *(dMan->giveCoordinates()) ); levelSet += N.at(elNodeInd)*levelSetInNode; } FloatArray valueArray = {levelSet}; vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray); } else if ( xfemstype == XFEMST_NodeEnrMarker ) { double nodeEnrMarker = 0.0, nodeEnrMarkerInNode = 0.0; for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) { DofManager *dMan = giveDofManager(elNodeInd); ei->evalNodeEnrMarkerInNode(nodeEnrMarkerInNode, dMan->giveGlobalNumber() ); nodeEnrMarker += N.at(elNodeInd)*nodeEnrMarkerInNode; } FloatArray valueArray = {nodeEnrMarker}; vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray); } } } } } } else { // Enriched and cut element XfemStructuralElementInterface::giveSubtriangulationCompositeExportData(vtkPieces, primaryVarsToExport, internalVarsToExport, cellVarsToExport, tStep); } }
void IncrementalLinearStatic :: solveYourselfAt(TimeStep *tStep) { Domain *d = this->giveDomain(1); // Creates system of governing eq's and solves them at given time step // >>> beginning PH // The following piece of code updates assignment of boundary conditions to dofs // (this allows to have multiple boundary conditions assigned to one dof // which can be arbitrarily turned on and off in time) // Almost the entire section has been copied from domain.C std :: vector< std :: map< int, int > > dof_bc( d->giveNumberOfDofManagers() ); for ( int i = 1; i <= d->giveNumberOfBoundaryConditions(); ++i ) { GeneralBoundaryCondition *gbc = d->giveBc(i); if ( gbc->isImposed(tStep) ) { if ( gbc->giveSetNumber() > 0 ) { ///@todo This will eventually not be optional. // Loop over nodes in set and store the bc number in each dof. Set *set = d->giveSet( gbc->giveSetNumber() ); ActiveBoundaryCondition *active_bc = dynamic_cast< ActiveBoundaryCondition * >(gbc); BoundaryCondition *bc = dynamic_cast< BoundaryCondition * >(gbc); if ( bc || ( active_bc && active_bc->requiresActiveDofs() ) ) { const IntArray &appliedDofs = gbc->giveDofIDs(); const IntArray &nodes = set->giveNodeList(); for ( int inode = 1; inode <= nodes.giveSize(); ++inode ) { for ( int idof = 1; idof <= appliedDofs.giveSize(); ++idof ) { if ( dof_bc [ nodes.at(inode) - 1 ].find( appliedDofs.at(idof) ) == dof_bc [ nodes.at(inode) - 1 ].end() ) { // is empty dof_bc [ nodes.at(inode) - 1 ] [ appliedDofs.at(idof) ] = i; DofManager * dofman = d->giveDofManager( nodes.at(inode) ); Dof * dof = dofman->giveDofWithID( appliedDofs.at(idof) ); dof->setBcId(i); } else { // another bc has been already prescribed at this time step to this dof OOFEM_WARNING("More than one boundary condition assigned at time %f to node %d dof %d. Considering boundary condition %d", tStep->giveTargetTime(), nodes.at(inode), appliedDofs.at(idof), dof_bc [ nodes.at(inode) - 1 ] [appliedDofs.at(idof)] ); } } } } } } } // to get proper number of equations this->forceEquationNumbering(); // <<< end PH // Initiates the total displacement to zero. if ( tStep->isTheFirstStep() ) { for ( auto &dofman : d->giveDofManagers() ) { for ( Dof *dof: *dofman ) { dof->updateUnknownsDictionary(tStep->givePreviousStep(), VM_Total, 0.); dof->updateUnknownsDictionary(tStep, VM_Total, 0.); } } for ( auto &bc : d->giveBcs() ) { ActiveBoundaryCondition *abc; if ( ( abc = dynamic_cast< ActiveBoundaryCondition * >(bc.get()) ) ) { int ndman = abc->giveNumberOfInternalDofManagers(); for ( int i = 1; i <= ndman; i++ ) { DofManager *dofman = abc->giveInternalDofManager(i); for ( Dof *dof: *dofman ) { dof->updateUnknownsDictionary(tStep->givePreviousStep(), VM_Total, 0.); dof->updateUnknownsDictionary(tStep, VM_Total, 0.); } } } } } // Apply dirichlet b.c's on total values for ( auto &dofman : d->giveDofManagers() ) { for ( Dof *dof: *dofman ) { double tot = dof->giveUnknown( VM_Total, tStep->givePreviousStep() ); if ( dof->hasBc(tStep) ) { tot += dof->giveBcValue(VM_Incremental, tStep); } dof->updateUnknownsDictionary(tStep, VM_Total, tot); } } int neq = this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() ); #ifdef VERBOSE OOFEM_LOG_RELEVANT("Solving [step number %8d, time %15e, equations %d]\n", tStep->giveNumber(), tStep->giveTargetTime(), neq); #endif if ( neq == 0 ) { // Allows for fully prescribed/empty problems. return; } incrementOfDisplacementVector.resize(neq); incrementOfDisplacementVector.zero(); #ifdef VERBOSE OOFEM_LOG_INFO("Assembling load\n"); #endif // Assembling the element part of load vector internalLoadVector.resize(neq); internalLoadVector.zero(); this->assembleVector( internalLoadVector, tStep, InternalForceAssembler(), VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) ); loadVector.resize(neq); loadVector.zero(); this->assembleVector( loadVector, tStep, ExternalForceAssembler(), VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) ); loadVector.subtract(internalLoadVector); this->updateSharedDofManagers(loadVector, EModelDefaultEquationNumbering(), ReactionExchangeTag); #ifdef VERBOSE OOFEM_LOG_INFO("Assembling stiffness matrix\n"); #endif stiffnessMatrix.reset( classFactory.createSparseMtrx(sparseMtrxType) ); if ( !stiffnessMatrix ) { OOFEM_ERROR("sparse matrix creation failed"); } stiffnessMatrix->buildInternalStructure( this, 1, EModelDefaultEquationNumbering() ); stiffnessMatrix->zero(); this->assemble( *stiffnessMatrix, tStep, TangentAssembler(TangentStiffness), EModelDefaultEquationNumbering(), this->giveDomain(1) ); #ifdef VERBOSE OOFEM_LOG_INFO("Solving ...\n"); #endif this->giveNumericalMethod( this->giveCurrentMetaStep() ); NM_Status s = nMethod->solve(*stiffnessMatrix, loadVector, incrementOfDisplacementVector); if ( !( s & NM_Success ) ) { OOFEM_ERROR("No success in solving system."); } }
void PLHoopStressCirc :: propagateInterfaces(Domain &iDomain, EnrichmentDomain &ioEnrDom) { // Fetch crack tip data TipInfo tipInfoStart, tipInfoEnd; ioEnrDom.giveTipInfos(tipInfoStart, tipInfoEnd); std :: vector< TipInfo >tipInfo = {tipInfoStart, tipInfoEnd}; SpatialLocalizer *localizer = iDomain.giveSpatialLocalizer(); for ( size_t tipIndex = 0; tipIndex < tipInfo.size(); tipIndex++ ) { // Construct circle points on an arc from -90 to 90 degrees double angle = -90.0 + mAngleInc; std :: vector< double >angles; while ( angle <= ( 90.0 - mAngleInc ) ) { angles.push_back(angle * M_PI / 180.0); angle += mAngleInc; } const FloatArray &xT = tipInfo [ tipIndex ].mGlobalCoord; const FloatArray &t = tipInfo [ tipIndex ].mTangDir; const FloatArray &n = tipInfo [ tipIndex ].mNormalDir; // It is meaningless to propagate a tip that is not inside any element Element *el = localizer->giveElementContainingPoint(tipInfo [ tipIndex ].mGlobalCoord); if ( el != NULL ) { std :: vector< FloatArray >circPoints; for ( size_t i = 0; i < angles.size(); i++ ) { FloatArray tangent(2); tangent.zero(); tangent.add(cos(angles [ i ]), t); tangent.add(sin(angles [ i ]), n); tangent.normalize(); FloatArray x(xT); x.add(mRadius, tangent); circPoints.push_back(x); } std :: vector< double >sigTTArray, sigRTArray; // Loop over circle points for ( size_t pointIndex = 0; pointIndex < circPoints.size(); pointIndex++ ) { FloatArray stressVec; if ( mUseRadialBasisFunc ) { // Interpolate stress with radial basis functions // Choose a cut-off length l: // take the distance between two nodes in the element containing the // crack tip multiplied by a constant factor. // ( This choice implies that we hope that the element has reasonable // aspect ratio.) const FloatArray &x1 = * ( el->giveDofManager(1)->giveCoordinates() ); const FloatArray &x2 = * ( el->giveDofManager(2)->giveCoordinates() ); const double l = 1.0 * x1.distance(x2); // Use the octree to get all elements that have // at least one Gauss point in a certain region around the tip. const double searchRadius = 3.0 * l; std :: set< int >elIndices; localizer->giveAllElementsWithIpWithinBox(elIndices, circPoints [ pointIndex ], searchRadius); // Loop over the elements and Gauss points obtained. // Evaluate the interpolation. FloatArray sumQiWiVi; double sumWiVi = 0.0; for ( int elIndex: elIndices ) { Element *gpEl = iDomain.giveElement(elIndex); IntegrationRule *iRule = gpEl->giveDefaultIntegrationRulePtr(); for ( GaussPoint *gp_i: *iRule ) { //////////////////////////////////////// // Compute global gp coordinates FloatArray N; FEInterpolation *interp = gpEl->giveInterpolation(); interp->evalN( N, * ( gp_i->giveCoordinates() ), FEIElementGeometryWrapper(gpEl) ); // Compute global coordinates of Gauss point FloatArray globalCoord(2); globalCoord.zero(); for ( int i = 1; i <= gpEl->giveNumberOfDofManagers(); i++ ) { DofManager *dMan = gpEl->giveDofManager(i); globalCoord.at(1) += N.at(i) * dMan->giveCoordinate(1); globalCoord.at(2) += N.at(i) * dMan->giveCoordinate(2); } //////////////////////////////////////// // Compute weight of kernel function FloatArray tipToGP; tipToGP.beDifferenceOf(globalCoord, xT); bool inFrontOfCrack = true; if ( tipToGP.dotProduct(t) < 0.0 ) { inFrontOfCrack = false; } double r = circPoints [ pointIndex ].distance(globalCoord); if ( r < l && inFrontOfCrack ) { double w = ( ( l - r ) / ( pow(2.0 * M_PI, 1.5) * pow(l, 3) ) ) * exp( -0.5 * pow(r, 2) / pow(l, 2) ); // Compute gp volume double V = gpEl->computeVolumeAround(gp_i); // Get stress StructuralMaterialStatus *ms = dynamic_cast< StructuralMaterialStatus * >( gp_i->giveMaterialStatus() ); if ( ms == NULL ) { OOFEM_ERROR("failed to fetch MaterialStatus."); } FloatArray stressVecGP = ms->giveStressVector(); if ( sumQiWiVi.giveSize() != stressVecGP.giveSize() ) { sumQiWiVi.resize( stressVecGP.giveSize() ); sumQiWiVi.zero(); } // Add to numerator sumQiWiVi.add(w * V, stressVecGP); // Add to denominator sumWiVi += w * V; } } } if ( fabs(sumWiVi) > 1.0e-12 ) { stressVec.beScaled(1.0 / sumWiVi, sumQiWiVi); } else { // Take stress from closest Gauss point int region = 1; bool useCZGP = false; GaussPoint &gp = * ( localizer->giveClosestIP(circPoints [ pointIndex ], region, useCZGP) ); // Compute stresses StructuralMaterialStatus *ms = dynamic_cast< StructuralMaterialStatus * >( gp.giveMaterialStatus() ); if ( ms == NULL ) { OOFEM_ERROR("failed to fetch MaterialStatus."); } stressVec = ms->giveStressVector(); } } else { // Take stress from closest Gauss point int region = 1; bool useCZGP = false; GaussPoint &gp = * ( localizer->giveClosestIP(circPoints [ pointIndex ], region, useCZGP) ); // Compute stresses StructuralMaterialStatus *ms = dynamic_cast< StructuralMaterialStatus * >( gp.giveMaterialStatus() ); if ( ms == NULL ) { OOFEM_ERROR("failed to fetch MaterialStatus."); } stressVec = ms->giveStressVector(); } FloatMatrix stress(2, 2); int shearPos = stressVec.giveSize(); stress.at(1, 1) = stressVec.at(1); stress.at(1, 2) = stressVec.at(shearPos); stress.at(2, 1) = stressVec.at(shearPos); stress.at(2, 2) = stressVec.at(2); // Rotation matrix FloatMatrix rot(2, 2); rot.at(1, 1) = cos(angles [ pointIndex ]); rot.at(1, 2) = -sin(angles [ pointIndex ]); rot.at(2, 1) = sin(angles [ pointIndex ]); rot.at(2, 2) = cos(angles [ pointIndex ]); FloatArray tRot, nRot; tRot.beProductOf(rot, t); nRot.beProductOf(rot, n); FloatMatrix rotTot(2, 2); rotTot.setColumn(tRot, 1); rotTot.setColumn(nRot, 2); FloatMatrix tmp, stressRot; tmp.beTProductOf(rotTot, stress); stressRot.beProductOf(tmp, rotTot); const double sigThetaTheta = stressRot.at(2, 2); sigTTArray.push_back(sigThetaTheta); const double sigRTheta = stressRot.at(1, 2); sigRTArray.push_back(sigRTheta); } ////////////////////////////// // Compute propagation angle // Find angles that fulfill sigRT = 0 const double stressTol = 1.0e-9; double maxSigTT = 0.0, maxAngle = 0.0; bool foundZeroLevel = false; for ( size_t segIndex = 0; segIndex < ( circPoints.size() - 1 ); segIndex++ ) { // If the shear stress sigRT changes sign over the segment if ( sigRTArray [ segIndex ] * sigRTArray [ segIndex + 1 ] < stressTol ) { // Compute location of zero level double xi = EnrichmentItem :: calcXiZeroLevel(sigRTArray [ segIndex ], sigRTArray [ segIndex + 1 ]); double theta = 0.5 * ( 1.0 - xi ) * angles [ segIndex ] + 0.5 * ( 1.0 + xi ) * angles [ segIndex + 1 ]; double sigThetaTheta = 0.5 * ( 1.0 - xi ) * sigTTArray [ segIndex ] + 0.5 * ( 1.0 + xi ) * sigTTArray [ segIndex + 1 ]; // printf("Found candidate: theta: %e sigThetaTheta: %e\n", theta, sigThetaTheta); if ( sigThetaTheta > maxSigTT ) { foundZeroLevel = true; maxSigTT = sigThetaTheta; maxAngle = theta; } } } if ( !foundZeroLevel ) { printf("No zero level was found.\n"); } if ( iDomain.giveXfemManager()->giveVtkDebug() ) { XFEMDebugTools :: WriteArrayToMatlab("sigTTvsAngle.m", angles, sigTTArray); XFEMDebugTools :: WriteArrayToMatlab("sigRTvsAngle.m", angles, sigRTArray); XFEMDebugTools :: WriteArrayToGnuplot("sigTTvsAngle.dat", angles, sigTTArray); XFEMDebugTools :: WriteArrayToGnuplot("sigRTvsAngle.dat", angles, sigRTArray); } // Compare with threshold if ( maxSigTT > mHoopStressThreshold && foundZeroLevel ) { // Rotation matrix FloatMatrix rot(2, 2); rot.at(1, 1) = cos(maxAngle); rot.at(1, 2) = -sin(maxAngle); rot.at(2, 1) = sin(maxAngle); rot.at(2, 2) = cos(maxAngle); FloatArray dir; dir.beProductOf(rot, tipInfo [ tipIndex ].mTangDir); // Fill up struct std :: vector< TipPropagation >tipPropagations; TipPropagation tipProp; tipProp.mTipIndex = tipIndex; tipProp.mPropagationDir = dir; tipProp.mPropagationLength = mIncrementLength; tipPropagations.push_back(tipProp); // Propagate ioEnrDom.propagateTips(tipPropagations); } } } }
int LoadBalancer :: unpackMigratingData(Domain *d, ProcessCommunicator &pc) { // create temp space for dofManagers and elements // merging should be made by domain ? // maps of new dofmanagers and elements indexed by global number // we can put local dofManagers and elements into maps (should be done before unpacking) // int nproc=this->giveEngngModel()->giveNumberOfProcesses(); int myrank = d->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int _mode, _globnum, _type; bool _newentry; classType _etype; IntArray _partitions, local_partitions; //LoadBalancer::DofManMode dmode; DofManager *dofman; DomainTransactionManager *dtm = d->giveTransactionManager(); // ************************************************** // Unpack migrating data to remote partition // ************************************************** if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); ProcessCommDataStream pcDataStream(pcbuff); pcbuff->unpackInt(_type); // unpack dofman data while ( _type != LOADBALANCER_END_DATA ) { _etype = ( classType ) _type; pcbuff->unpackInt(_mode); switch ( _mode ) { case LoadBalancer :: DM_Remote: // receiving new local dofManager pcbuff->unpackInt(_globnum); /* * _newentry = false; * if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) { * // data not available -> create a new one * _newentry = true; * dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); * } */ _newentry = true; dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); dofman->setGlobalNumber(_globnum); // unpack dofman state (this is the local dofman, not available on remote) dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState); // unpack list of new partitions pcbuff->unpackIntArray(_partitions); dofman->setPartitionList(& _partitions); dofman->setParallelMode(DofManager_local); // add transaction if new entry allocated; otherwise existing one has been modified via returned dofman if ( _newentry ) { dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman); } //dmanMap[_globnum] = dofman; break; case LoadBalancer :: DM_Shared: // receiving new shared dofManager, that was local on sending partition // should be received only once (from partition where was local) pcbuff->unpackInt(_globnum); /* * _newentry = false; * if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) { * // data not available -> mode should be SharedUpdate * _newentry = true; * dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); * } */ _newentry = true; dofman = CreateUsrDefDofManagerOfType(_etype, 0, d); dofman->setGlobalNumber(_globnum); // unpack dofman state (this is the local dofman, not available on remote) dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState); // unpack list of new partitions pcbuff->unpackIntArray(_partitions); dofman->setPartitionList(& _partitions); dofman->setParallelMode(DofManager_shared); #ifdef __VERBOSE_PARALLEL fprintf(stderr, "[%d] received Shared new dofman [%d]\n", myrank, _globnum); #endif // add transaction if new entry allocated; otherwise existing one has been modified via returned dofman if ( _newentry ) { dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman); } //dmanMap[_globnum] = dofman; break; default: OOFEM_ERROR2("LoadBalancer::unpackMigratingData: unexpected dof manager type (%d)", _type); } // get next type record pcbuff->unpackInt(_type); } ; // while (_type != LOADBALANCER_END_DATA); // unpack element data Element *elem; int nrecv = 0; do { pcbuff->unpackInt(_type); if ( _type == LOADBALANCER_END_DATA ) { break; } _etype = ( classType ) _type; elem = CreateUsrDefElementOfType(_etype, 0, d); elem->restoreContext(& pcDataStream, CM_Definition | CM_State); elem->initForNewStep(); dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_Element, elem->giveGlobalNumber(), elem); nrecv++; //recvElemList.push_back(elem); } while ( 1 ); OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: receiving %d migrating elements from %d\n", myrank, nrecv, iproc); return 1; }
void GnuplotExportModule::outputBoundaryCondition(PrescribedGradientBCWeak &iBC, TimeStep *tStep) { FloatArray stress; iBC.computeField(stress, tStep); printf("Mean stress computed in Gnuplot export module: "); stress.printYourself(); double time = 0.0; TimeStep *ts = emodel->giveCurrentStep(); if ( ts != NULL ) { time = ts->giveTargetTime(); } int bcIndex = iBC.giveNumber(); std :: stringstream strMeanStress; strMeanStress << "PrescribedGradientGnuplotMeanStress" << bcIndex << "Time" << time << ".dat"; std :: string nameMeanStress = strMeanStress.str(); std::vector<double> componentArray, stressArray; for(int i = 1; i <= stress.giveSize(); i++) { componentArray.push_back(i); stressArray.push_back(stress.at(i)); } XFEMDebugTools::WriteArrayToGnuplot(nameMeanStress, componentArray, stressArray); // Homogenized strain FloatArray grad; iBC.giveGradientVoigt(grad); outputGradient(iBC.giveNumber(), *iBC.giveDomain(), grad, tStep); #if 0 FloatArray grad; iBC.giveGradientVoigt(grad); double timeFactor = iBC.giveTimeFunction()->evaluate(ts, VM_Total); printf("timeFactor: %e\n", timeFactor ); grad.times(timeFactor); printf("Mean grad computed in Gnuplot export module: "); grad.printYourself(); std :: stringstream strMeanGrad; strMeanGrad << "PrescribedGradientGnuplotMeanGrad" << bcIndex << "Time" << time << ".dat"; std :: string nameMeanGrad = strMeanGrad.str(); std::vector<double> componentArrayGrad, gradArray; for(int i = 1; i <= grad.giveSize(); i++) { componentArrayGrad.push_back(i); gradArray.push_back(grad.at(i)); } XFEMDebugTools::WriteArrayToGnuplot(nameMeanGrad, componentArrayGrad, gradArray); #endif if(mExportBoundaryConditionsExtra) { // Traction node coordinates std::vector< std::vector<FloatArray> > nodePointArray; size_t numTracEl = iBC.giveNumberOfTractionElements(); for(size_t i = 0; i < numTracEl; i++) { std::vector<FloatArray> points; FloatArray xS, xE; iBC.giveTractionElCoord(i, xS, xE); points.push_back(xS); points.push_back(xE); nodePointArray.push_back(points); } std :: stringstream strTractionNodes; strTractionNodes << "TractionNodesGnuplotTime" << time << ".dat"; std :: string nameTractionNodes = strTractionNodes.str(); WritePointsToGnuplot(nameTractionNodes, nodePointArray); // Traction element normal direction std::vector< std::vector<FloatArray> > nodeNormalArray; for(size_t i = 0; i < numTracEl; i++) { std::vector<FloatArray> points; FloatArray n,t; iBC.giveTractionElNormal(i, n,t); points.push_back(n); points.push_back(n); nodeNormalArray.push_back(points); } std :: stringstream strTractionNodeNormals; strTractionNodeNormals << "TractionNodeNormalsGnuplotTime" << time << ".dat"; std :: string nameTractionNodeNormals = strTractionNodeNormals.str(); WritePointsToGnuplot(nameTractionNodeNormals, nodeNormalArray); // Traction (x,y) std::vector< std::vector<FloatArray> > nodeTractionArray; for(size_t i = 0; i < numTracEl; i++) { std::vector<FloatArray> tractions; FloatArray tS, tE; iBC.giveTraction(i, tS, tE, VM_Total, tStep); tractions.push_back(tS); tractions.push_back(tE); nodeTractionArray.push_back(tractions); } std :: stringstream strTractions; strTractions << "TractionsGnuplotTime" << time << ".dat"; std :: string nameTractions = strTractions.str(); WritePointsToGnuplot(nameTractions, nodeTractionArray); // Arc position along the boundary std::vector< std::vector<FloatArray> > arcPosArray; for(size_t i = 0; i < numTracEl; i++) { std::vector<FloatArray> arcPos; double xiS = 0.0, xiE = 0.0; iBC.giveTractionElArcPos(i, xiS, xiE); arcPos.push_back( FloatArray{xiS} ); arcPos.push_back( FloatArray{xiE} ); arcPosArray.push_back(arcPos); } std :: stringstream strArcPos; strArcPos << "ArcPosGnuplotTime" << time << ".dat"; std :: string nameArcPos = strArcPos.str(); WritePointsToGnuplot(nameArcPos, arcPosArray); // Traction (normal, tangent) std::vector< std::vector<FloatArray> > nodeTractionNTArray; for(size_t i = 0; i < numTracEl; i++) { std::vector<FloatArray> tractions; FloatArray tS, tE; iBC.giveTraction(i, tS, tE, VM_Total, tStep); FloatArray n,t; iBC.giveTractionElNormal(i, n, t); double tSn = tS.dotProduct(n,2); double tSt = tS.dotProduct(t,2); tractions.push_back( {tSn ,tSt} ); double tEn = tE.dotProduct(n,2); double tEt = tE.dotProduct(t,2); tractions.push_back( {tEn, tEt} ); nodeTractionNTArray.push_back(tractions); } std :: stringstream strTractionsNT; strTractionsNT << "TractionsNormalTangentGnuplotTime" << time << ".dat"; std :: string nameTractionsNT = strTractionsNT.str(); WritePointsToGnuplot(nameTractionsNT, nodeTractionNTArray); // Boundary points and displacements IntArray boundaries, bNodes; iBC.giveBoundaries(boundaries); std::vector< std::vector<FloatArray> > bndNodes; for ( int pos = 1; pos <= boundaries.giveSize() / 2; ++pos ) { Element *e = iBC.giveDomain()->giveElement( boundaries.at(pos * 2 - 1) ); int boundary = boundaries.at(pos * 2); e->giveInterpolation()->boundaryGiveNodes(bNodes, boundary); std::vector<FloatArray> bndSegNodes; // Add the start and end nodes of the segment DofManager *startNode = e->giveDofManager( bNodes[0] ); FloatArray xS = *(startNode->giveCoordinates()); Dof *dSu = startNode->giveDofWithID(D_u); double dU = dSu->giveUnknown(VM_Total, tStep); xS.push_back(dU); Dof *dSv = startNode->giveDofWithID(D_v); double dV = dSv->giveUnknown(VM_Total, tStep); xS.push_back(dV); bndSegNodes.push_back(xS); DofManager *endNode = e->giveDofManager( bNodes[1] ); FloatArray xE = *(endNode->giveCoordinates()); Dof *dEu = endNode->giveDofWithID(D_u); dU = dEu->giveUnknown(VM_Total, tStep); xE.push_back(dU); Dof *dEv = endNode->giveDofWithID(D_v); dV = dEv->giveUnknown(VM_Total, tStep); xE.push_back(dV); bndSegNodes.push_back(xE); bndNodes.push_back(bndSegNodes); } std :: stringstream strBndNodes; strBndNodes << "BndNodesGnuplotTime" << time << ".dat"; std :: string nameBndNodes = strBndNodes.str(); WritePointsToGnuplot(nameBndNodes, bndNodes); } }
void LSPrimaryVariableMapper :: mapPrimaryVariables(FloatArray &oU, Domain &iOldDom, Domain &iNewDom, ValueModeType iMode, TimeStep &iTStep) { EngngModel *engngMod = iNewDom.giveEngngModel(); EModelDefaultEquationNumbering num; const int dim = iNewDom.giveNumberOfSpatialDimensions(); int numElNew = iNewDom.giveNumberOfElements(); // Count dofs int numDofsNew = engngMod->giveNumberOfDomainEquations( 1, num ); oU.resize(numDofsNew); oU.zero(); FloatArray du(numDofsNew); du.zero(); FloatArray res(numDofsNew); #ifdef __PETSC_MODULE PetscSparseMtrx *K = dynamic_cast<PetscSparseMtrx*>( classFactory.createSparseMtrx(SMT_PetscMtrx) ); SparseLinearSystemNM *solver = classFactory.createSparseLinSolver(ST_Petsc, & iOldDom, engngMod); #else SparseMtrx *K = classFactory.createSparseMtrx(SMT_Skyline); SparseLinearSystemNM *solver = classFactory.createSparseLinSolver(ST_Direct, & iOldDom, engngMod); #endif K->buildInternalStructure( engngMod, 1, num ); int maxIter = 1; for ( int iter = 0; iter < maxIter; iter++ ) { K->zero(); res.zero(); // Contribution from elements for ( int elIndex = 1; elIndex <= numElNew; elIndex++ ) { StructuralElement *elNew = dynamic_cast< StructuralElement * >( iNewDom.giveElement(elIndex) ); if ( elNew == NULL ) { OOFEM_ERROR("Failed to cast Element new to StructuralElement."); } /////////////////////////////////// // Compute residual // Count element dofs int numElNodes = elNew->giveNumberOfDofManagers(); int numElDofs = 0; for ( int i = 1; i <= numElNodes; i++ ) { numElDofs += elNew->giveDofManager(i)->giveNumberOfDofs(); } FloatArray elRes(numElDofs); elRes.zero(); IntArray elDofsGlob; elNew->giveLocationArray( elDofsGlob, num ); // Loop over Gauss points for ( int intRuleInd = 0; intRuleInd < elNew->giveNumberOfIntegrationRules(); intRuleInd++ ) { IntegrationRule *iRule = elNew->giveIntegrationRule(intRuleInd); for ( GaussPoint *gp: *iRule ) { // New N-matrix FloatMatrix NNew; elNew->computeNmatrixAt(* ( gp->giveNaturalCoordinates() ), NNew); ////////////// // Global coordinates of GP const int nDofMan = elNew->giveNumberOfDofManagers(); FloatArray Nc; FEInterpolation *interp = elNew->giveInterpolation(); const FloatArray &localCoord = * ( gp->giveNaturalCoordinates() ); interp->evalN( Nc, localCoord, FEIElementGeometryWrapper(elNew) ); const IntArray &elNodes = elNew->giveDofManArray(); FloatArray globalCoord(dim); globalCoord.zero(); for ( int i = 1; i <= nDofMan; i++ ) { DofManager *dMan = elNew->giveDofManager(i); for ( int j = 1; j <= dim; j++ ) { globalCoord.at(j) += Nc.at(i) * dMan->giveCoordinate(j); } } ////////////// // Localize element and point in the old domain FloatArray localCoordOld(dim), pointCoordOld(dim); StructuralElement *elOld = dynamic_cast< StructuralElement * >( iOldDom.giveSpatialLocalizer()->giveElementClosestToPoint(localCoordOld, pointCoordOld, globalCoord, 0) ); if ( elOld == NULL ) { OOFEM_ERROR("Failed to cast Element old to StructuralElement."); } // Compute N-Matrix for the old element FloatMatrix NOld; elOld->computeNmatrixAt(localCoordOld, NOld); // Fetch nodal displacements for the new element FloatArray nodeDispNew( elDofsGlob.giveSize() ); int dofsPassed = 1; for ( int i = 1; i <= elNodes.giveSize(); i++ ) { DofManager *dMan = elNew->giveDofManager(i); for ( Dof *dof: *dMan ) { if ( elDofsGlob.at(dofsPassed) != 0 ) { nodeDispNew.at(dofsPassed) = oU.at( elDofsGlob.at(dofsPassed) ); } else { if ( dof->hasBc(& iTStep) ) { nodeDispNew.at(dofsPassed) = dof->giveBcValue(iMode, & iTStep); } } dofsPassed++; } } FloatArray newDisp; newDisp.beProductOf(NNew, nodeDispNew); // Fetch nodal displacements for the old element FloatArray nodeDispOld; dofsPassed = 1; IntArray elDofsGlobOld; elOld->giveLocationArray( elDofsGlobOld, num ); // elOld->computeVectorOf(iMode, &(iTStep), nodeDisp); int numElNodesOld = elOld->giveNumberOfDofManagers(); for(int nodeIndOld = 1; nodeIndOld <= numElNodesOld; nodeIndOld++) { DofManager *dManOld = elOld->giveDofManager(nodeIndOld); for ( Dof *dof: *dManOld ) { if ( elDofsGlobOld.at(dofsPassed) != 0 ) { FloatArray dofUnknowns; dof->giveUnknowns(dofUnknowns, iMode, &iTStep); #ifdef DEBUG if(!dofUnknowns.isFinite()) { OOFEM_ERROR("!dofUnknowns.isFinite()") } if(dofUnknowns.giveSize() < 1) { OOFEM_ERROR("dofUnknowns.giveSize() < 1") } #endif nodeDispOld.push_back(dofUnknowns.at(1)); } else { if ( dof->hasBc(& iTStep) ) { // printf("hasBC.\n"); #ifdef DEBUG if(!std::isfinite(dof->giveBcValue(iMode, & iTStep))) { OOFEM_ERROR("!std::isfinite(dof->giveBcValue(iMode, & iTStep))") } #endif nodeDispOld.push_back( dof->giveBcValue(iMode, & iTStep) ); } else { // printf("Unhandled case in LSPrimaryVariableMapper :: mapPrimaryVariables().\n"); nodeDispOld.push_back( 0.0 ); } } dofsPassed++; } } FloatArray oldDisp; oldDisp.beProductOf(NOld, nodeDispOld); FloatArray temp, du; #ifdef DEBUG if(!oldDisp.isFinite()) { OOFEM_ERROR("!oldDisp.isFinite()") } if(!newDisp.isFinite()) { OOFEM_ERROR("!newDisp.isFinite()") } #endif du.beDifferenceOf(oldDisp, newDisp); temp.beTProductOf(NNew, du); double dV = elNew->computeVolumeAround(gp); elRes.add(dV, temp); } }
int LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc) { int myrank = d->giveEngngModel()->giveRank(); int iproc = pc.giveRank(); int idofman, ndofman; classType dtype; DofManager *dofman; LoadBalancer :: DofManMode dmode; // ************************************************** // Pack migrating data to remote partition // ************************************************** // pack dofManagers if ( iproc == myrank ) { return 1; // skip local partition } // query process communicator to use ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff(); ProcessCommDataStream pcDataStream(pcbuff); // loop over dofManagers ndofman = d->giveNumberOfDofManagers(); for ( idofman = 1; idofman <= ndofman; idofman++ ) { dofman = d->giveDofManager(idofman); dmode = this->giveDofManState(idofman); dtype = dofman->giveClassID(); // sync data to remote partition // if dofman already present on remote partition then there is no need to sync //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) { if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) && ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) { pcbuff->packInt(dtype); pcbuff->packInt(dmode); pcbuff->packInt( dofman->giveGlobalNumber() ); // pack dofman state (this is the local dofman, not available on remote) /* this is a potential performance leak, sending shared dofman to a partition, * in which is already shared does not require to send context (is already there) * here for simplicity it is always send */ dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState); // send list of new partitions pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) ); } } // pack end-of-dofman-section record pcbuff->packInt(LOADBALANCER_END_DATA); int ielem, nelem = d->giveNumberOfElements(), nsend = 0; Element *elem; for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements elem = d->giveElement(ielem); if ( ( elem->giveParallelMode() == Element_local ) && ( this->giveElementPartition(ielem) == iproc ) ) { // pack local element (node numbers shuld be global ones!!!) // pack type pcbuff->packInt( elem->giveClassID() ); // nodal numbers shuld be packed as global !! elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State); nsend++; } } // end loop over elements // pack end-of-element-record pcbuff->packInt(LOADBALANCER_END_DATA); OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc); return 1; }
void NonLinearDynamic :: proceedStep(int di, TimeStep *tStep) { // creates system of governing eq's and solves them at given time step // first assemble problem at current time step int neq = this->giveNumberOfEquations(EID_MomentumBalance); // Time-stepping constants double dt2 = deltaT * deltaT; if ( tStep->giveTimeDiscretization() == TD_Newmark ) { OOFEM_LOG_DEBUG("Solving using Newmark-beta method\n"); a0 = 1 / ( beta * dt2 ); a1 = gamma / ( beta * deltaT ); a2 = 1 / ( beta * deltaT ); a3 = 1 / ( 2 * beta ) - 1; a4 = ( gamma / beta ) - 1; a5 = deltaT / 2 * ( gamma / beta - 2 ); a6 = 0; } else if ( ( tStep->giveTimeDiscretization() == TD_TwoPointBackward ) || ( tStep->giveNumber() == giveNumberOfFirstStep() ) ) { if ( tStep->giveTimeDiscretization() != TD_ThreePointBackward ) { OOFEM_LOG_DEBUG("Solving using Backward Euler method\n"); } else { OOFEM_LOG_DEBUG("Solving initial step using Three-point Backward Euler method\n"); } a0 = 1 / dt2; a1 = 1 / deltaT; a2 = 1 / deltaT; a3 = 0; a4 = 0; a5 = 0; a6 = 0; } else if ( tStep->giveTimeDiscretization() == TD_ThreePointBackward ) { OOFEM_LOG_DEBUG("Solving using Three-point Backward Euler method\n"); a0 = 2 / dt2; a1 = 3 / ( 2 * deltaT ); a2 = 2 / deltaT; a3 = 0; a4 = 0; a5 = 0; a6 = 1 / ( 2 * deltaT ); } else { _error("NonLinearDynamic: Time-stepping scheme not found!\n") } if ( tStep->giveNumber() == giveNumberOfFirstStep() ) { // Initialization previousIncrementOfDisplacement.resize(neq); previousIncrementOfDisplacement.zero(); previousTotalDisplacement.resize(neq); previousTotalDisplacement.zero(); totalDisplacement.resize(neq); totalDisplacement.zero(); previousInternalForces.resize(neq); previousInternalForces.zero(); incrementOfDisplacement.resize(neq); incrementOfDisplacement.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); TimeStep *stepWhenIcApply = new TimeStep(giveNumberOfTimeStepWhenIcApply(), this, 0, -deltaT, deltaT, 0); int nDofs, j, k, jj; int nman = this->giveDomain(di)->giveNumberOfDofManagers(); DofManager *node; Dof *iDof; // Considering initial conditions. for ( j = 1; j <= nman; j++ ) { node = this->giveDomain(di)->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // Ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions). iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { incrementOfDisplacement.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Total, stepWhenIcApply); velocityVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Velocity, stepWhenIcApply); accelerationVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Acceleration, stepWhenIcApply); } } } } else { incrementOfDisplacement.resize(neq); incrementOfDisplacement.zero(); } if ( initFlag ) { // First assemble problem at current time step. // Option to take into account initial conditions. if ( !stiffnessMatrix ) { stiffnessMatrix = CreateUsrDefSparseMtrx(sparseMtrxType); } if ( stiffnessMatrix == NULL ) { _error("proceedStep: sparse matrix creation failed"); } if ( nonlocalStiffnessFlag ) { if ( !stiffnessMatrix->isAsymmetric() ) { _error("proceedStep: stiffnessMatrix does not support asymmetric storage"); } } stiffnessMatrix->buildInternalStructure( this, di, EID_MomentumBalance, EModelDefaultEquationNumbering() ); // Initialize vectors help.resize(neq); rhs.resize(neq); rhs2.resize(neq); internalForces.resize(neq); help.zero(); rhs.zero(); rhs2.zero(); previousTotalDisplacement.resize(neq); for ( int i = 1; i <= neq; i++ ) { previousTotalDisplacement.at(i) = totalDisplacement.at(i); } initFlag = 0; } #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling load\n"); #endif // Assemble the incremental reference load vector. this->assembleIncrementalReferenceLoadVectors(incrementalLoadVector, incrementalLoadVectorOfPrescribed, refLoadInputMode, this->giveDomain(di), EID_MomentumBalance, tStep); // Assembling the effective load vector for ( int i = 1; i <= neq; i++ ) { help.at(i) = a2 * velocityVector.at(i) + a3 * accelerationVector.at(i) + eta * ( a4 * velocityVector.at(i) + a5 * accelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } this->timesMtrx(help, rhs, MassMatrix, this->giveDomain(di), tStep); if ( delta != 0 ) { for ( int i = 1; i <= neq; i++ ) { help.at(i) = delta * ( a4 * velocityVector.at(i) + a5 * accelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } this->timesMtrx(help, rhs2, StiffnessMatrix, this->giveDomain(di), tStep); help.zero(); for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += rhs2.at(i); } } for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += incrementalLoadVector.at(i) - previousInternalForces.at(i); totalDisplacement.at(i) = previousTotalDisplacement.at(i); } // // Set-up numerical model. // this->giveNumericalMethod( this->giveCurrentMetaStep() ); // // Call numerical model to solve problem. // double loadLevel = 1.0; if ( initialLoadVector.isNotEmpty() ) { numMetStatus = nMethod->solve(stiffnessMatrix, & rhs, & initialLoadVector, & totalDisplacement, & incrementOfDisplacement, & internalForces, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } else { numMetStatus = nMethod->solve(stiffnessMatrix, & rhs, NULL, & totalDisplacement, & incrementOfDisplacement, & internalForces, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } OOFEM_LOG_INFO("Equilibrium reached in %d iterations\n", currentIterations); }
void IncrementalLinearStatic :: solveYourselfAt(TimeStep *tStep) { // Creates system of governing eq's and solves them at given time step // Initiates the total displacement to zero. if ( tStep->isTheFirstStep() ) { Domain *d = this->giveDomain(1); for ( int i = 1; i <= d->giveNumberOfDofManagers(); i++ ) { DofManager *dofman = d->giveDofManager(i); for ( int j = 1; j <= dofman->giveNumberOfDofs(); j++ ) { dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total_Old, 0.); dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total, 0.); // This is actually redundant now; //dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Incremental, 0.); } } int nbc = d->giveNumberOfBoundaryConditions(); for ( int ibc = 1; ibc <= nbc; ++ibc ) { GeneralBoundaryCondition *bc = d->giveBc(ibc); ActiveBoundaryCondition *abc; if ( ( abc = dynamic_cast< ActiveBoundaryCondition * >( bc ) ) ) { int ndman = abc->giveNumberOfInternalDofManagers(); for ( int i = 1; i <= ndman; i++ ) { DofManager *dofman = abc->giveInternalDofManager(i); for ( int j = 1; j <= dofman->giveNumberOfDofs(); j++ ) { dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total_Old, 0.); dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total, 0.); // This is actually redundant now; //dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Incremental, 0.); } } } } } // Apply dirichlet b.c's on total values Domain *d = this->giveDomain(1); for ( int i = 1; i <= d->giveNumberOfDofManagers(); i++ ) { DofManager *dofman = d->giveDofManager(i); for ( int j = 1; j <= dofman->giveNumberOfDofs(); j++ ) { Dof *d = dofman->giveDof(j); double tot = d->giveUnknown(VM_Total_Old, tStep); if ( d->hasBc(tStep) ) { tot += d->giveBcValue(VM_Incremental, tStep); } d->updateUnknownsDictionary(tStep, VM_Total, tot); } } #ifdef VERBOSE OOFEM_LOG_RELEVANT( "Solving [step number %8d, time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif int neq = this->giveNumberOfDomainEquations(1, EModelDefaultEquationNumbering()); if (neq == 0) { // Allows for fully prescribed/empty problems. return; } incrementOfDisplacementVector.resize(neq); incrementOfDisplacementVector.zero(); #ifdef VERBOSE OOFEM_LOG_INFO("Assembling load\n"); #endif // Assembling the element part of load vector internalLoadVector.resize(neq); internalLoadVector.zero(); this->assembleVector( internalLoadVector, tStep, EID_MomentumBalance, InternalForcesVector, VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) ); loadVector.resize(neq); loadVector.zero(); this->assembleVector( loadVector, tStep, EID_MomentumBalance, ExternalForcesVector, VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) ); loadVector.subtract(internalLoadVector); #ifdef VERBOSE OOFEM_LOG_INFO("Assembling stiffness matrix\n"); #endif if ( stiffnessMatrix ) { delete stiffnessMatrix; } stiffnessMatrix = classFactory.createSparseMtrx(sparseMtrxType); if ( stiffnessMatrix == NULL ) { _error("solveYourselfAt: sparse matrix creation failed"); } stiffnessMatrix->buildInternalStructure( this, 1, EID_MomentumBalance, EModelDefaultEquationNumbering() ); stiffnessMatrix->zero(); this->assemble( stiffnessMatrix, tStep, EID_MomentumBalance, StiffnessMatrix, EModelDefaultEquationNumbering(), this->giveDomain(1) ); #ifdef VERBOSE OOFEM_LOG_INFO("Solving ...\n"); #endif this->giveNumericalMethod( this->giveCurrentMetaStep() ); NM_Status s = nMethod->solve(stiffnessMatrix, & loadVector, & incrementOfDisplacementVector); if ( !(s & NM_Success) ) { OOFEM_ERROR("IncrementalLinearStatic :: solverYourselfAt - No success in solving system."); } }
bool NRSolver :: checkConvergence(FloatArray &RT, FloatArray &F, FloatArray &rhs, FloatArray &ddX, FloatArray &X, double RRT, const FloatArray &internalForcesEBENorm, int nite, bool &errorOutOfRange, TimeStep *tNow) { double forceErr, dispErr; FloatArray dg_forceErr, dg_dispErr, dg_totalLoadLevel, dg_totalDisp; bool answer; EModelDefaultEquationNumbering dn; #ifdef __PARALLEL_MODE #ifdef __PETSC_MODULE PetscContext *parallel_context = engngModel->givePetscContext(this->domain->giveNumber()); Natural2LocalOrdering *n2l = parallel_context->giveN2Lmap(); #endif #endif /* * The force errors are (if possible) evaluated as relative errors. * If the norm of applied load vector is zero (one may load by temperature, etc) * then the norm of reaction forces is used in relative norm evaluation. * * Note: This is done only when all dofs are included (nccdg = 0). Not implemented if * multiple convergence criteria are used. * */ answer = true; errorOutOfRange = false; if ( internalForcesEBENorm.giveSize() > 1 ) { // Special treatment when just one norm is given; No grouping int nccdg = this->domain->giveMaxDofID(); // Keeps tracks of which dof IDs are actually in use; IntArray idsInUse(nccdg); idsInUse.zero(); // zero error norms per group dg_forceErr.resize(nccdg); dg_forceErr.zero(); dg_dispErr.resize(nccdg); dg_dispErr.zero(); dg_totalLoadLevel.resize(nccdg); dg_totalLoadLevel.zero(); dg_totalDisp.resize(nccdg); dg_totalDisp.zero(); // loop over dof managers int ndofman = domain->giveNumberOfDofManagers(); for ( int idofman = 1; idofman <= ndofman; idofman++ ) { DofManager *dofman = domain->giveDofManager(idofman); #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( !parallel_context->isLocal(dofman) ) { continue; } #endif // loop over individual dofs int ndof = dofman->giveNumberOfDofs(); for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over dof managers // loop over elements and their DOFs int nelem = domain->giveNumberOfElements(); for ( int ielem = 1; ielem <= nelem; ielem++ ) { Element *elem = domain->giveElement(ielem); #ifdef __PARALLEL_MODE if ( elem->giveParallelMode() != Element_local ) { continue; } #endif // loop over element internal Dofs for ( int idofman = 1; idofman <= elem->giveNumberOfInternalDofManagers(); idofman++) { DofManager *dofman = elem->giveInternalDofManager(idofman); int ndof = dofman->giveNumberOfDofs(); // loop over individual dofs for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue; #endif dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over element internal dofmans } // end loop over elements // loop over boundary conditions and their internal DOFs for ( int ibc = 1; ibc <= domain->giveNumberOfBoundaryConditions(); ibc++ ) { GeneralBoundaryCondition *bc = domain->giveBc(ibc); // loop over element internal Dofs for ( int idofman = 1; idofman <= bc->giveNumberOfInternalDofManagers(); idofman++) { DofManager *dofman = bc->giveInternalDofManager(idofman); int ndof = dofman->giveNumberOfDofs(); // loop over individual dofs for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue; #endif dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over element internal dofmans } // end loop over elements #ifdef __PARALLEL_MODE // exchange individual partition contributions (simultaneously for all groups) #ifdef __PETSC_MODULE FloatArray collectiveErr(nccdg); parallel_context->accumulate(dg_forceErr, collectiveErr); dg_forceErr = collectiveErr; parallel_context->accumulate(dg_dispErr, collectiveErr); dg_dispErr = collectiveErr; parallel_context->accumulate(dg_totalLoadLevel, collectiveErr); dg_totalLoadLevel = collectiveErr; parallel_context->accumulate(dg_totalDisp, collectiveErr); dg_totalDisp = collectiveErr; #else if ( this->engngModel->isParallel() ) { FloatArray collectiveErr(nccdg); MPI_Allreduce(dg_forceErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_forceErr = collectiveErr; MPI_Allreduce(dg_dispErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_dispErr = collectiveErr; MPI_Allreduce(dg_totalLoadLevel.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_totalLoadLevel = collectiveErr; MPI_Allreduce(dg_totalDisp.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_totalDisp = collectiveErr; return globalNorm; } #endif #endif OOFEM_LOG_INFO("NRSolver: %-5d", nite); //bool zeroNorm = false; // loop over dof groups and check convergence individually for ( int dg = 1; dg <= nccdg; dg++ ) { bool zeroFNorm = false, zeroDNorm = false; // Skips the ones which aren't used in this problem (the residual will be zero for these anyway, but it is annoying to print them all) if ( !idsInUse.at(dg) ) { continue; } OOFEM_LOG_INFO( " %s:", __DofIDItemToString((DofIDItem)dg).c_str() ); if ( rtolf.at(1) > 0.0 ) { // compute a relative error norm if ( ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) > nrsolver_ERROR_NORM_SMALL_NUM ) { forceErr = sqrt( dg_forceErr.at(dg) / ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) ); } else { // If both external forces and internal ebe norms are zero, then the residual must be zero. //zeroNorm = true; // Warning about this afterwards. zeroFNorm = true; forceErr = sqrt( dg_forceErr.at(dg) ); } if ( forceErr > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( forceErr > rtolf.at(1) ) { answer = false; } OOFEM_LOG_INFO( zeroFNorm ? " *%.3e" : " %.3e", forceErr ); } if ( rtold.at(1) > 0.0 ) { // compute displacement error if ( dg_totalDisp.at(dg) > nrsolver_ERROR_NORM_SMALL_NUM ) { dispErr = sqrt( dg_dispErr.at(dg) / dg_totalDisp.at(dg) ); } else { ///@todo This is almost always the case for displacement error. nrsolveR_ERROR_NORM_SMALL_NUM is no good. //zeroNorm = true; // Warning about this afterwards. //zeroDNorm = true; dispErr = sqrt( dg_dispErr.at(dg) ); } if ( dispErr > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( dispErr > rtold.at(1) ) { answer = false; } OOFEM_LOG_INFO( zeroDNorm ? " *%.3e" : " %.3e", dispErr ); } } OOFEM_LOG_INFO("\n"); //if ( zeroNorm ) OOFEM_WARNING("NRSolver :: checkConvergence - Had to resort to absolute error measure (marked by *)"); } else { // No dof grouping double dXX, dXdX; if ( engngModel->giveProblemScale() == macroScale ) { OOFEM_LOG_INFO("NRSolver: %-15d", nite); } else { OOFEM_LOG_INFO(" NRSolver: %-15d", nite); } #ifdef __PARALLEL_MODE forceErr = parallel_context->norm(rhs); forceErr *= forceErr; dXX = parallel_context->localNorm(X); dXX *= dXX; // Note: Solutions are always total global values (natural distribution makes little sense for the solution) dXdX = parallel_context->localNorm(ddX); dXdX *= dXdX; #else forceErr = rhs.computeSquaredNorm(); dXX = X.computeSquaredNorm(); dXdX = ddX.computeSquaredNorm(); #endif if ( rtolf.at(1) > 0.0 ) { // we compute a relative error norm if ( ( RRT + internalForcesEBENorm.at(1) ) > nrsolver_ERROR_NORM_SMALL_NUM ) { forceErr = sqrt( forceErr / ( RRT + internalForcesEBENorm.at(1) ) ); } else { forceErr = sqrt( forceErr ); // absolute norm as last resort } if ( fabs(forceErr) > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( fabs(forceErr) > rtolf.at(1) ) { answer = false; } OOFEM_LOG_INFO(" %-15e", forceErr); } if ( rtold.at(1) > 0.0 ) { // compute displacement error // err is relative displacement change if ( dXX > nrsolver_ERROR_NORM_SMALL_NUM ) { dispErr = sqrt( dXdX / dXX ); } else { dispErr = sqrt( dXdX ); } if ( fabs(dispErr) > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( fabs(dispErr) > rtold.at(1) ) { answer = false; } OOFEM_LOG_INFO(" %-15e", dispErr); } OOFEM_LOG_INFO("\n"); } // end default case (all dofs contributing) return answer; }
void SolutionbasedShapeFunction :: initializeSurfaceData(modeStruct *mode) { EngngModel *m = mode->myEngngModel; double TOL2 = 1e-5; IntArray pNodes, mNodes, zNodes; Set *mySet = this->domain->giveSet( this->giveSetNumber() ); IntArray BoundaryList = mySet->giveBoundaryList(); // First add all nodes to pNodes or nNodes respectively depending on coordinate and normal. for ( int i = 0; i < BoundaryList.giveSize() / 2; i++ ) { int ElementID = BoundaryList(2 * i); int Boundary = BoundaryList(2 * i + 1); Element *e = m->giveDomain(1)->giveElement(ElementID); FEInterpolation *geoInterpolation = e->giveInterpolation(); // Check all sides of element IntArray bnodes; #define usePoints 1 #if usePoints == 1 // Check if all nodes are on the boundary geoInterpolation->boundaryGiveNodes(bnodes, Boundary); for ( int k = 1; k <= bnodes.giveSize(); k++ ) { DofManager *dman = e->giveDofManager( bnodes.at(k) ); for ( int l = 1; l <= dman->giveCoordinates()->giveSize(); l++ ) { if ( fabs( dman->giveCoordinates()->at(l) - maxCoord.at(l) ) < TOL2 ) { pNodes.insertOnce( dman->giveNumber() ); } if ( fabs( dman->giveCoordinates()->at(l) - minCoord.at(l) ) < TOL2 ) { mNodes.insertOnce( dman->giveNumber() ); } } } #else // Check normal FloatArray lcoords; lcoords.resize(2); lcoords.at(1) = 0.33333; lcoords.at(2) = 0.33333; FloatArray normal; geoInterpolation->boundaryEvalNormal( normal, j, lcoords, FEIElementGeometryWrapper(e) ); geoInterpolation->boundaryGiveNodes(bnodes, j); printf( "i=%u\tj=%u\t(%f\t%f\t%f)\n", i, j, normal.at(1), normal.at(2), normal.at(3) ); for ( int k = 1; k <= normal.giveSize(); k++ ) { if ( fabs( ( fabs( normal.at(k) ) - 1 ) ) < 1e-4 ) { // Points in x, y or z direction addTo = NULL; if ( normal.at(k) > 0.5 ) { addTo = & pNodes; } if ( normal.at(k) < -0.5 ) { addTo = & mNodes; } if ( addTo != NULL ) { for ( int l = 1; l <= bnodes.giveSize(); l++ ) { bool isSurface = false; DofManager *dman = e->giveDofManager( bnodes.at(l) ); dman->giveCoordinates()->printYourself(); for ( int m = 1; m <= dman->giveCoordinates()->giveSize(); m++ ) { if ( ( fabs( dman->giveCoordinates()->at(m) - maxCoord.at(m) ) < TOL2 ) || ( fabs( dman->giveCoordinates()->at(m) - minCoord.at(m) ) < TOL2 ) ) { isSurface = true; } } if ( isSurface ) { addTo->insertOnce( e->giveDofManagerNumber( bnodes.at(l) ) ); } } } } } #endif } #if 0 printf("p=["); for ( int i = 1; i < pNodes.giveSize(); i++ ) { printf( "%u, ", pNodes.at(i) ); } printf("];\n"); printf("m=["); for ( int i = 1; i < mNodes.giveSize(); i++ ) { printf( "%u, ", mNodes.at(i) ); } printf("];\n"); #endif //The intersection of pNodes and mNodes constitutes zNodes { int i = 1, j = 1; while ( i <= pNodes.giveSize() ) { j = 1; while ( j <= mNodes.giveSize() && ( i <= pNodes.giveSize() ) ) { //printf("%u == %u?\n", pNodes.at(i), mNodes.at(j)); if ( pNodes.at(i) == mNodes.at(j) ) { zNodes.insertOnce( pNodes.at(i) ); pNodes.erase(i); mNodes.erase(j); } else { j++; } } i++; } } // Compute base function values on nodes for dofids copyDofManagersToSurfaceData(mode, pNodes, true, false, false); copyDofManagersToSurfaceData(mode, mNodes, false, true, false); copyDofManagersToSurfaceData(mode, zNodes, false, false, true); #if 0 printf("p2=["); for ( int i = 1; i <= pNodes.giveSize(); i++ ) { printf( "%u, ", pNodes.at(i) ); } printf("];\n"); printf("m2=["); for ( int i = 1; i <= mNodes.giveSize(); i++ ) { printf( "%u, ", mNodes.at(i) ); } printf("];\n"); printf("z2=["); for ( int i = 1; i <= zNodes.giveSize(); i++ ) { printf( "%u, ", zNodes.at(i) ); } printf("];\n"); printf("pCoords=["); for ( int i = 1; i <= pNodes.giveSize(); i++ ) { FloatArray *coords = m->giveDomain(1)->giveDofManager( pNodes.at(i) )->giveCoordinates(); printf( "%f, %f, %f; ", coords->at(1), coords->at(2), coords->at(3) ); } printf("]\n"); printf("mCoords=["); for ( int i = 1; i <= mNodes.giveSize(); i++ ) { FloatArray *coords = m->giveDomain(1)->giveDofManager( mNodes.at(i) )->giveCoordinates(); printf( "%f, %f, %f; ", coords->at(1), coords->at(2), coords->at(3) ); } printf("]\n"); printf("zCoords=["); for ( int i = 1; i <= zNodes.giveSize(); i++ ) { FloatArray *coords = m->giveDomain(1)->giveDofManager( zNodes.at(i) )->giveCoordinates(); printf( "%f, %f, %f; ", coords->at(1), coords->at(2), coords->at(3) ); } printf("];\n"); #endif }
void ProblemCommunicator :: setUpCommunicationMapsForElementCut(EngngModel *pm, bool excludeSelfCommFlag) { Domain *domain = pm->giveDomain(1); int nnodes = domain->giveNumberOfDofManagers(); int i, j, partition; if ( this->mode == ProblemCommMode__ELEMENT_CUT ) { /* * Initially, each partition knows for which nodes a receive * is needed (and can therefore compute easily the recv map), * but does not know for which nodes it should send data to which * partition. Hence, the communication setup is performed by * broadcasting "send request" lists of nodes for which * a partition expects to receive data (ie. of those nodes * which the partition uses, but does not own) to all * collaborating processes. The "send request" list are * converted into send maps. */ // receive maps can be build locally, // but send maps should be assembled from broadcasted lists (containing // expected receive nodes) of remote partitions. // first build local receive map IntArray domainNodeRecvCount(size); const IntArray *partitionList; DofManager *dofMan; //Element *element; int domainRecvListSize = 0, domainRecvListPos = 0; //int nelems; int result = 1; for ( i = 1; i <= nnodes; i++ ) { partitionList = domain->giveDofManager(i)->givePartitionList(); if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_remote ) { // size of partitionList should be 1 <== only ine master for ( j = 1; j <= partitionList->giveSize(); j++ ) { if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) { domainRecvListSize++; domainNodeRecvCount.at(partitionList->at(j) + 1)++; } } } } // build maps simultaneously IntArray pos(size); IntArray **maps = new IntArray * [ size ]; for ( i = 0; i < size; i++ ) { maps [ i ] = new IntArray( domainNodeRecvCount.at(i + 1) ); } // allocate also domain receive list to be broadcasted IntArray domainRecvList(domainRecvListSize); if ( domainRecvListSize ) { for ( i = 1; i <= nnodes; i++ ) { // test if node is remote DofMan dofMan = domain->giveDofManager(i); if ( dofMan->giveParallelMode() == DofManager_remote ) { domainRecvList.at(++domainRecvListPos) = dofMan->giveGlobalNumber(); partitionList = domain->giveDofManager(i)->givePartitionList(); // size of partitionList should be 1 <== only ine master for ( j = 1; j <= partitionList->giveSize(); j++ ) { if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) { partition = partitionList->at(j); maps [ partition ]->at( ++pos.at(partition + 1) ) = i; } } } } } // set up process recv communicator maps for ( i = 0; i < size; i++ ) { this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), * maps [ i ]); //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, *maps[i]); } // delete local maps for ( i = 0; i < size; i++ ) { delete maps [ i ]; } delete maps; // to assemble send maps, we must analyze broadcasted remote domain send lists // and we must also broadcast our send list. #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Element-cut broadcasting started", rank); #endif StaticCommunicationBuffer commBuff(MPI_COMM_WORLD); IntArray remoteDomainRecvList; IntArray toSendMap; int localExpectedSize, globalRecvSize; int sendMapPos, sendMapSize, globalDofManNum; // determine the size of receive buffer using AllReduce operation #ifndef IBM_MPI_IMPLEMENTATION localExpectedSize = domainRecvList.givePackSize(commBuff); #else localExpectedSize = domainRecvList.givePackSize(commBuff) + 1; #endif #ifdef __USE_MPI result = MPI_Allreduce(& localExpectedSize, & globalRecvSize, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); if ( result != MPI_SUCCESS ) { _error("setUpCommunicationMaps: MPI_Allreduce failed"); } #else WARNING: NOT SUPPORTED MESSAGE PARSING LIBRARY #endif #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Finished reducing receiveBufferSize", rank); #endif // resize to fit largest received message commBuff.resize(globalRecvSize); // resize toSend map to max possible size toSendMap.resize(globalRecvSize); for ( i = 0; i < size; i++ ) { // loop over domains commBuff.init(); if ( i == rank ) { //current domain has to send its receive list to all domains // broadcast domainRecvList #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list", rank); #endif commBuff.packIntArray(domainRecvList); result = commBuff.bcast(i); if ( result != MPI_SUCCESS ) { _error("setUpCommunicationMaps: commBuff broadcast failed"); } #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Broadcasting own send list finished", rank); #endif } else { #ifdef __VERBOSE_PARALLEL OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d\n", rank, "ProblemCommunicator :: unpackAllData", i); #endif // receive broadcasted lists result = commBuff.bcast(i); if ( result != MPI_SUCCESS ) { _error("setUpCommunicationMaps: commBuff broadcast failed"); } #ifdef __VERBOSE_PARALLEL OOFEM_LOG_DEBUG("[process rank %3d]: %-30s: Receiving broadcasted send map from partition %3d finished\n", rank, "ProblemCommunicator :: unpackAllData", i); #endif // unpack remote receive list if ( !commBuff.unpackIntArray(remoteDomainRecvList) ) { _error("ProblemCommunicator::setUpCommunicationMaps: unpack remote receive list failed"); } // find if remote nodes are in local partition // if yes add them into send map for correcponding i-th partition sendMapPos = 0; sendMapSize = 0; // determine sendMap size for ( j = 1; j <= nnodes; j++ ) { // loop over local DofManagers dofMan = domain->giveDofManager(j); globalDofManNum = dofMan->giveGlobalNumber(); // test id globalDofManNum is in remoteDomainRecvList if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) { sendMapSize++; } } toSendMap.resize(sendMapSize); for ( j = 1; j <= nnodes; j++ ) { // loop over local DofManagers dofMan = domain->giveDofManager(j); globalDofManNum = dofMan->giveGlobalNumber(); // test id globalDofManNum is in remoteDomainRecvList if ( remoteDomainRecvList.findFirstIndexOf(globalDofManNum) ) { // add this local DofManager number to sed map for active partition toSendMap.at(++sendMapPos) = j; } } // end loop over local DofManagers // set send map to i-th process communicator this->setProcessCommunicatorToSendArry(this->giveProcessCommunicator(i), toSendMap); //this->giveDomainCommunicator(i)->setToSendArry (this->engngModel, toSendMap); } // end receiving broadcasted lists #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Receiving broadcasted send maps finished", rank); #endif } // end loop over domains } else { _error("setUpCommunicationMapsForElementCut: unknown mode"); } }
///////////////////////////////////////////////// // Help functions void GnuplotExportModule::outputReactionForces(TimeStep *tStep) { // Add sum of reaction forces to arrays // Compute sum of reaction forces for each BC number Domain *domain = emodel->giveDomain(1); StructuralEngngModel *seMod = dynamic_cast<StructuralEngngModel* >(emodel); if(seMod == NULL) { OOFEM_ERROR("failed to cast to StructuralEngngModel."); } IntArray ielemDofMask; FloatArray reactions; IntArray dofManMap, dofidMap, eqnMap; // test if solution step output is active if ( !testTimeStepOutput(tStep) ) { return; } // map contains corresponding dofmanager and dofs numbers corresponding to prescribed equations // sorted according to dofmanger number and as a minor crit. according to dof number // this is necessary for extractor, since the sorted output is expected seMod->buildReactionTable(dofManMap, dofidMap, eqnMap, tStep, 1); // compute reaction forces seMod->computeReaction(reactions, tStep, 1); // Find highest index of prescribed dofs int maxIndPresDof = 0; for ( int i = 1; i <= dofManMap.giveSize(); i++ ) { maxIndPresDof = std::max(maxIndPresDof, dofidMap.at(i)); } int numBC = domain->giveNumberOfBoundaryConditions(); while ( mReactionForceHistory.size() < size_t(numBC) ) { std::vector<FloatArray> emptyArray; mReactionForceHistory.push_back( emptyArray ); } maxIndPresDof = domain->giveNumberOfSpatialDimensions(); while ( mDispHist.size() < size_t(numBC) ) { std::vector<double> emptyArray; mDispHist.push_back( emptyArray ); } for(int bcInd = 0; bcInd < numBC; bcInd++) { FloatArray fR(maxIndPresDof), disp(numBC); fR.zero(); for ( int i = 1; i <= dofManMap.giveSize(); i++ ) { DofManager *dMan = domain->giveDofManager( dofManMap.at(i) ); Dof *dof = dMan->giveDofWithID( dofidMap.at(i) ); if ( dof->giveBcId() == bcInd+1 ) { fR.at( dofidMap.at(i) ) += reactions.at( eqnMap.at(i) ); // Slightly dirty BoundaryCondition *bc = dynamic_cast<BoundaryCondition*> (domain->giveBc(bcInd+1)); if ( bc != NULL ) { disp.at(bcInd+1) = bc->give(dof, VM_Total, tStep->giveTargetTime()); } ///@todo This function should be using the primaryfield instead of asking BCs directly. / Mikael } } mDispHist[bcInd].push_back(disp.at(bcInd+1)); mReactionForceHistory[bcInd].push_back(fR); // X FILE * pFileX; char fileNameX[100]; sprintf(fileNameX, "ReactionForceGnuplotBC%dX.dat", bcInd+1); pFileX = fopen ( fileNameX , "wb" ); fprintf(pFileX, "#u Fx\n"); for ( size_t j = 0; j < mDispHist[bcInd].size(); j++ ) { fprintf(pFileX, "%e %e\n", mDispHist[bcInd][j], mReactionForceHistory[bcInd][j].at(1) ); } fclose(pFileX); // Y FILE * pFileY; char fileNameY[100]; sprintf(fileNameY, "ReactionForceGnuplotBC%dY.dat", bcInd+1); pFileY = fopen ( fileNameY , "wb" ); fprintf(pFileY, "#u Fx\n"); for ( size_t j = 0; j < mDispHist[bcInd].size(); j++ ) { if( mReactionForceHistory[bcInd][j].giveSize() >= 2 ) { fprintf(pFileY, "%e %e\n", mDispHist[bcInd][j], mReactionForceHistory[bcInd][j].at(2) ); } } fclose(pFileY); } }
void LEPlic :: doLagrangianPhase(TimeStep *tStep) { //Maps element nodes along trajectories using basic Runge-Kutta method (midpoint rule) int i, ci, ndofman = domain->giveNumberOfDofManagers(); int nsd = 2; double dt = tStep->giveTimeIncrement(); DofManager *dman; Node *inode; IntArray velocityMask; FloatArray x, x2(nsd), v_t, v_tn1; FloatMatrix t; #if 1 EngngModel *emodel = domain->giveEngngModel(); int err; #endif velocityMask = {V_u, V_v}; updated_XCoords.resize(ndofman); updated_YCoords.resize(ndofman); for ( i = 1; i <= ndofman; i++ ) { dman = domain->giveDofManager(i); inode = dynamic_cast< Node * >(dman); // skip dofmanagers with no position information if ( !inode ) { continue; } // get node coordinates x = * ( inode->giveCoordinates() ); // get velocity field v(tn, x(tn)) for dof manager #if 1 /* Original version */ dman->giveUnknownVector( v_t, velocityMask, VM_Total, tStep->givePreviousStep() ); /* Modified version */ //dman->giveUnknownVector(v_t, velocityMask, VM_Total, tStep); // Original version // compute updated position x(tn)+0.5*dt*v(tn,x(tn)) for ( ci = 1; ci <= nsd; ci++ ) { x2.at(ci) = x.at(ci) + 0.5 *dt *v_t.at(ci); } // compute interpolated velocity field at x2 [ v(tn+1, x(tn)+0.5*dt*v(tn,x(tn))) = v(tn+1, x2) ] FM_FieldPtr vfield; vfield = emodel->giveContext()->giveFieldManager()->giveField(FT_Velocity); if ( vfield == NULL ) { OOFEM_ERROR("Velocity field not available"); } err = vfield->evaluateAt(v_tn1, x2, VM_Total, tStep); if ( err == 1 ) { // point outside domain -> be explicit v_tn1 = v_t; } else if ( err != 0 ) { OOFEM_ERROR("vfield->evaluateAt failed, error code %d", err); } // compute final updated position for ( ci = 1; ci <= nsd; ci++ ) { x2.at(ci) = x.at(ci) + dt *v_tn1.at(ci); } #else // pure explicit version dman->giveUnknownVector(v_t, velocityMask, VM_Total, tStep); for ( ci = 1; ci <= nsd; ci++ ) { x2.at(ci) = x.at(ci) + dt *v_t.at(ci); } #endif // store updated node position updated_XCoords.at(i) = x2.at(1); updated_YCoords.at(i) = x2.at(2); } }
void PetscNatural2GlobalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et) { Domain *d = emodel->giveDomain(di); int i, j, k, p, ndofs, ndofman = d->giveNumberOfDofManagers(); int myrank = emodel->giveRank(); DofManager *dman; // determine number of local eqs + number of those shared DOFs which are numbered by receiver // shared dofman is numbered on partition with lovest rank number EModelDefaultEquationNumbering dn; EModelDefaultPrescribedEquationNumbering dpn; #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "initializing N2G ordering", myrank); #endif l_neqs = 0; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); /* * if (dman->giveParallelMode() == DofManager_local) { // count all dofman eqs * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * if (dman->giveDof(j)->giveEquationNumber()) l_neqs++; * } * } * } else if (dman->giveParallelMode() == DofManager_shared) { * // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to * // deliver number * IntArray *plist = dman->givePartitionList(); * int n = plist->giveSize(); * int minrank = myrank; * for (j=1; j<=n; j++) minrank = min (minrank, plist->at(j)); * if (minrank == myrank) { // count eqs * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * if (dman->giveDof(j)->giveEquationNumber()) l_neqs++; * } * } * } * } // end shared dman */ if ( isLocal(dman) ) { ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { if ( et == et_standard ) { if ( dman->giveDof(j)->giveEquationNumber(dn) ) { l_neqs++; } } else { if ( dman->giveDof(j)->giveEquationNumber(dpn) ) { l_neqs++; } } } } } } // exchange with other procs the number of eqs numbered on particular procs int *leqs = new int [ emodel->giveNumberOfProcesses() ]; MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD); // compute local offset int offset = 0; for ( j = 0; j < myrank; j++ ) { offset += leqs [ j ]; } // count global number of eqs for ( g_neqs = 0, j = 0; j < emodel->giveNumberOfProcesses(); j++ ) { g_neqs += leqs [ j ]; } // send numbered shared ones if ( et == et_standard ) { locGlobMap.resize( emodel->giveNumberOfEquations(ut) ); } else { locGlobMap.resize( emodel->giveNumberOfPrescribedEquations(ut) ); } // determine shared dofs int psize, nproc = emodel->giveNumberOfProcesses(); IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc); #ifdef __VERBOSE_PARALLEL IntArray nrecToSend(nproc); #endif const IntArray *plist; for ( i = 1; i <= ndofman; i++ ) { // if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) { if ( isShared( d->giveDofManager(i) ) ) { int n = d->giveDofManager(i)->giveNumberOfDofs(); plist = d->giveDofManager(i)->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // count to send for ( j = 1; j <= psize; j++ ) { #ifdef __VERBOSE_PARALLEL nrecToSend( plist->at(j) )++; #endif sizeToSend( plist->at(j) ) += ( 1 + n ); // ndofs+dofman number } } else { nrecToReceive(minrank)++; sizeToRecv(minrank) += ( 1 + n ); // ndofs+dofman number } } } #ifdef __VERBOSE_PARALLEL for ( i = 0; i < nproc; i++ ) { OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n", myrank, nrecToSend(i), nrecToReceive(i), i); } #endif std :: map< int, int >globloc; // global->local mapping for shared // number local guys int globeq = offset; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); //if (dman->giveParallelMode() == DofManager_shared) { if ( isShared(dman) ) { globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared plist = dman->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // local ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { locGlobMap.at(eq) = globeq++; } } } } //} else if (dman->giveParallelMode() == DofManager_local) { } else { ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { locGlobMap.at(eq) = globeq++; } } } } } /* * fprintf (stderr, "[%d] locGlobMap: ", myrank); * for (i=1; i<=locGlobMap.giveSize(); i++) * fprintf (stderr, "%d ",locGlobMap.at(i)); */ // pack data for remote procs CommunicationBuffer **buffs = new CommunicationBuffer * [ nproc ]; for ( p = 0; p < nproc; p++ ) { buffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0); buffs [ p ]->resize( buffs [ p ]->givePackSize(MPI_INT, 1) * sizeToSend(p) ); #if 0 OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Send buffer[%d] size %d\n", myrank, p, sizeToSend(p) ); #endif } for ( i = 1; i <= ndofman; i++ ) { if ( isShared( d->giveDofManager(i) ) ) { dman = d->giveDofManager(i); plist = dman->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // do send for ( j = 1; j <= psize; j++ ) { p = plist->at(j); if ( p == myrank ) { continue; } #if 0 OOFEM_LOG_INFO("[%d]PetscN2G:: init: Sending localShared node %d[%d] to proc %d\n", myrank, i, dman->giveGlobalNumber(), p); #endif buffs [ p ]->packInt( dman->giveGlobalNumber() ); ndofs = dman->giveNumberOfDofs(); for ( k = 1; k <= ndofs; k++ ) { if ( dman->giveDof(k)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(k)->giveEquationNumber(dn); } else { eq = dman->giveDof(k)->giveEquationNumber(dpn); } if ( eq ) { buffs [ p ]->packInt( locGlobMap.at(eq) ); } } } } } } } //fprintf (stderr, "[%d] Sending glob nums ...", myrank); // send buffers for ( p = 0; p < nproc; p++ ) { if ( p != myrank ) { buffs [ p ]->iSend(p, 999); } } /**** * * for (p=0; p<nproc; p++) { * if (p == myrank) continue; * for (i=1; i<= ndofman; i++) { * //if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) { * if (isShared(d->giveDofManager(i))) { * dman = d->giveDofManager(i); * plist = dman->givePartitionList(); * psize = plist->giveSize(); * int minrank = myrank; * for (j=1; j<=psize; j++) minrank = min (minrank, plist->at(j)); * if (minrank == myrank) { // do send * buffs[p]->packInt(dman->giveGlobalNumber()); * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * buffs[p]->packInt(locGlobMap.at(dman->giveDof(j)->giveEquationNumber())); * } * } * } * } * } * // send buffer * buffs[p]->iSend(p, 999); * } ****/ // receive remote eqs and complete global numbering CommunicationBuffer **rbuffs = new CommunicationBuffer * [ nproc ]; for ( p = 0; p < nproc; p++ ) { rbuffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0); rbuffs [ p ]->resize( rbuffs [ p ]->givePackSize(MPI_INT, 1) * sizeToRecv(p) ); #if 0 OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Receive buffer[%d] size %d\n", myrank, p, sizeToRecv(p) ); #endif } //fprintf (stderr, "[%d] Receiving glob nums ...", myrank); for ( p = 0; p < nproc; p++ ) { if ( p != myrank ) { rbuffs [ p ]->iRecv(p, 999); } } IntArray finished(nproc); finished.zero(); int fin = 1; finished.at(emodel->giveRank() + 1) = 1; do { for ( p = 0; p < nproc; p++ ) { if ( finished.at(p + 1) == 0 ) { if ( rbuffs [ p ]->testCompletion() ) { // data are here // unpack them int nite = nrecToReceive(p); int shdm, ldm; for ( i = 1; i <= nite; i++ ) { rbuffs [ p ]->unpackInt(shdm); #if 0 OOFEM_LOG_INFO("[%d]PetscN2G:: init: Received shared node [%d] from proc %d\n", myrank, shdm, p); #endif // // find local guy coorecponding to shdm if ( globloc.find(shdm) != globloc.end() ) { ldm = globloc [ shdm ]; } else { OOFEM_ERROR3("[%d] PetscNatural2GlobalOrdering :: init: invalid shared dofman received, globnum %d\n", myrank, shdm); } dman = d->giveDofManager(ldm); ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { int val; rbuffs [ p ]->unpackInt(val); locGlobMap.at(eq) = val; } } } } finished.at(p + 1) = 1; fin++; } } } } while ( fin < nproc ); /* * fprintf (stderr, "[%d] Finished receiving glob nums ...", myrank); * * fprintf (stderr, "[%d] locGlobMap:", myrank); * for (i=1; i<=locGlobMap.giveSize(); i++) * fprintf (stderr, "%d ",locGlobMap.at(i)); */ #ifdef __VERBOSE_PARALLEL if ( et == et_standard ) { int _eq; char *ptr; char *locname = "local", *shname = "shared", *unkname = "unknown"; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); if ( dman->giveParallelMode() == DofManager_local ) { ptr = locname; } else if ( dman->giveParallelMode() == DofManager_shared ) { ptr = shname; } else { ptr = unkname; } ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( ( _eq = dman->giveDof(j)->giveEquationNumber(dn) ) ) { fprintf( stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, locGlobMap.at(_eq) ); } else { fprintf(stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, 0); } } } } #endif // build reverse map int lneq; if ( et == et_standard ) { lneq = emodel->giveNumberOfEquations(ut); } else { lneq = emodel->giveNumberOfPrescribedEquations(ut); } globLocMap.clear(); for ( i = 1; i <= lneq; i++ ) { globLocMap [ locGlobMap.at(i) ] = i; } for ( p = 0; p < nproc; p++ ) { delete rbuffs [ p ]; delete buffs [ p ]; } delete[] rbuffs; delete[] buffs; delete[] leqs; MPI_Barrier(MPI_COMM_WORLD); #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "done", myrank); #endif }
double UserDefDirichletBC :: give(Dof *dof, ValueModeType mode, TimeStep *stepN) { double factor = this->giveLoadTimeFunction()->evaluate(stepN, mode); DofManager *dMan = dof->giveDofManager(); /* * The Python function takes two input arguments: * 1) An array with node coordinates * 2) The dof id */ int numArgs = 3; // Create array with node coordinates int dim = dMan->giveCoordinates()->giveSize(); PyObject *pArgArray = PyList_New(dim); PyObject *pArgs = PyTuple_New(numArgs); for (int i = 0; i < dim; i++) { PyList_SET_ITEM(pArgArray, i, PyFloat_FromDouble( dMan->giveCoordinate(i+1) )); } // PyTuple_SetItem takes over responsibility for objects passed // to it -> no DECREF PyTuple_SetItem(pArgs, 0, pArgArray); // Dof number PyObject *pValDofNum = PyLong_FromLong(dof->giveDofID()); PyTuple_SetItem(pArgs, 1, pValDofNum); // Time PyObject *pTargetTime = PyFloat_FromDouble( stepN->giveTargetTime() ); PyTuple_SetItem(pArgs, 2, pTargetTime); // Value returned from the Python function PyObject *pRetVal; if ( PyCallable_Check(mpFunc) ) { pRetVal = PyObject_CallObject(mpFunc, pArgs); } else { OOFEM_ERROR("UserDefDirichletBC :: give: Python function is not callable."); } // Get return value double retVal = 0.0; if ( pRetVal != NULL ) { retVal = PyFloat_AsDouble(pRetVal); } else { OOFEM_ERROR("UserDefDirichletBC :: give: Failed to fetch Python return value."); } // Decrement reference count on pointers Py_DECREF(pArgs); Py_DECREF(pRetVal); return retVal*factor; }
void NlDEIDynamic :: solveYourselfAt(TimeStep *tStep) { // // Creates system of governing eq's and solves them at given time step. // Domain *domain = this->giveDomain(1); int neq = this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() ); int nman = domain->giveNumberOfDofManagers(); DofManager *node; int i, k, j, jj; double coeff, maxDt, maxOm = 0.; double prevIncrOfDisplacement, incrOfDisplacement; if ( initFlag ) { #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling mass matrix\n"); #endif // // Assemble mass matrix. // this->computeMassMtrx(massMatrix, maxOm, tStep); if ( drFlag ) { // If dynamic relaxation: Assemble amplitude load vector. loadRefVector.resize(neq); loadRefVector.zero(); this->computeLoadVector(loadRefVector, VM_Total, tStep); #ifdef __PARALLEL_MODE // Compute the processor part of load vector norm pMp this->pMp = 0.0; double my_pMp = 0.0, coeff = 1.0; int eqNum, ndofman = domain->giveNumberOfDofManagers(); dofManagerParallelMode dofmanmode; DofManager *dman; for ( int dm = 1; dm <= ndofman; dm++ ) { dman = domain->giveDofManager(dm); dofmanmode = dman->giveParallelMode(); // Skip all remote and null dofmanagers coeff = 1.0; if ( ( dofmanmode == DofManager_remote ) || ( ( dofmanmode == DofManager_null ) ) ) { continue; } else if ( dofmanmode == DofManager_shared ) { coeff = 1. / dman->givePartitionsConnectivitySize(); } // For shared nodes we add locally an average = 1/givePartitionsConnectivitySize()*contribution, for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) { my_pMp += coeff * loadRefVector.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum); } } } // Sum up the contributions from processors. MPI_Allreduce(& my_pMp, & pMp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #else this->pMp = 0.0; for ( i = 1; i <= neq; i++ ) { pMp += loadRefVector.at(i) * loadRefVector.at(i) / massMatrix.at(i); } #endif // Solve for rate of loading process (parameter "c") (undamped system assumed), if ( dumpingCoef < 1.e-3 ) { c = 3.0 * this->pyEstimate / pMp / Tau / Tau; } else { c = this->pyEstimate * Tau * dumpingCoef * dumpingCoef * dumpingCoef / pMp / ( -3.0 / 2.0 + dumpingCoef * Tau + 2.0 * exp(-dumpingCoef * Tau) - 0.5 * exp(-2.0 * dumpingCoef * Tau) ); } } initFlag = 0; } if ( tStep->isTheFirstStep() ) { // // Special init step - Compute displacements at tstep 0. // displacementVector.resize(neq); displacementVector.zero(); previousIncrementOfDisplacementVector.resize(neq); previousIncrementOfDisplacementVector.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); for ( j = 1; j <= nman; j++ ) { node = domain->giveDofManager(j); for ( Dof *dof: *node ) { // Ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions) // all dofs are expected to be DisplacementVector type. if ( !dof->isPrimaryDof() ) { continue; } jj = dof->__giveEquationNumber(); if ( jj ) { displacementVector.at(jj) = dof->giveUnknown(VM_Total, tStep); velocityVector.at(jj) = dof->giveUnknown(VM_Velocity, tStep); accelerationVector.at(jj) = dof->giveUnknown(VM_Acceleration, tStep); } } } // // Set-up numerical model. // // Try to determine the best deltaT, maxDt = 2.0 / sqrt(maxOm); if ( deltaT > maxDt ) { // Print reduced time step increment and minimum period Tmin OOFEM_LOG_RELEVANT("deltaT reduced to %e, Tmin is %e\n", maxDt, maxDt * M_PI); deltaT = maxDt; tStep->setTimeIncrement(deltaT); } for ( j = 1; j <= neq; j++ ) { previousIncrementOfDisplacementVector.at(j) = velocityVector.at(j) * ( deltaT ); displacementVector.at(j) -= previousIncrementOfDisplacementVector.at(j); } #ifdef VERBOSE OOFEM_LOG_RELEVANT( "\n\nSolving [Step number %8d, Time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif return; } // end of init step #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling right hand side\n"); #endif displacementVector.add(previousIncrementOfDisplacementVector); // Update solution state counter tStep->incrementStateCounter(); // Compute internal forces. this->giveInternalForces(internalForces, false, 1, tStep); if ( !drFlag ) { // // Assembling the element part of load vector. // this->computeLoadVector(loadVector, VM_Total, tStep); // // Assembling additional parts of right hand side. // loadVector.subtract(internalForces); } else { // Dynamic relaxation // compute load factor pt = 0.0; #ifdef __PARALLEL_MODE double my_pt = 0.0, coeff = 1.0; int eqNum, ndofman = domain->giveNumberOfDofManagers(); dofManagerParallelMode dofmanmode; DofManager *dman; for ( int dm = 1; dm <= ndofman; dm++ ) { dman = domain->giveDofManager(dm); dofmanmode = dman->giveParallelMode(); // skip all remote and null dofmanagers coeff = 1.0; if ( ( dofmanmode == DofManager_remote ) || ( dofmanmode == DofManager_null ) ) { continue; } else if ( dofmanmode == DofManager_shared ) { coeff = 1. / dman->givePartitionsConnectivitySize(); } // For shared nodes we add locally an average= 1/givePartitionsConnectivitySize()*contribution. for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) { my_pt += coeff * internalForces.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum); } } } // Sum up the contributions from processors. MPI_Allreduce(& my_pt, & pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #else for ( k = 1; k <= neq; k++ ) { pt += internalForces.at(k) * loadRefVector.at(k) / massMatrix.at(k); } #endif pt = pt / pMp; if ( dumpingCoef < 1.e-3 ) { pt += c * ( Tau - tStep->giveTargetTime() ) / Tau; } else { pt += c * ( 1.0 - exp( dumpingCoef * ( tStep->giveTargetTime() - Tau ) ) ) / dumpingCoef / Tau; } loadVector.resize( this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() ) ); for ( k = 1; k <= neq; k++ ) { loadVector.at(k) = pt * loadRefVector.at(k) - internalForces.at(k); } // Compute relative error. double err = 0.0; #ifdef __PARALLEL_MODE double my_err = 0.0; for ( int dm = 1; dm <= ndofman; dm++ ) { dman = domain->giveDofManager(dm); dofmanmode = dman->giveParallelMode(); // Skip all remote and null dofmanagers. coeff = 1.0; if ( ( dofmanmode == DofManager_remote ) || ( dofmanmode == DofManager_null ) ) { continue; } else if ( dofmanmode == DofManager_shared ) { coeff = 1. / dman->givePartitionsConnectivitySize(); } // For shared nodes we add locally an average= 1/givePartitionsConnectivitySize()*contribution. for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) { my_err += coeff * loadVector.at(eqNum) * loadVector.at(eqNum) / massMatrix.at(eqNum); } } } // Sum up the contributions from processors. MPI_Allreduce(& my_err, & err, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #else for ( k = 1; k <= neq; k++ ) { err = loadVector.at(k) * loadVector.at(k) / massMatrix.at(k); } #endif err = err / ( pMp * pt * pt ); OOFEM_LOG_RELEVANT("Relative error is %e, loadlevel is %e\n", err, pt); } for ( j = 1; j <= neq; j++ ) { coeff = massMatrix.at(j); loadVector.at(j) += coeff * ( ( 1. / ( deltaT * deltaT ) ) - dumpingCoef * 1. / ( 2. * deltaT ) ) * previousIncrementOfDisplacementVector.at(j); } // // Set-up numerical model // /* it is not necesary to call numerical method * approach used here is not good, but effective enough * inverse of diagonal mass matrix is done here */ // // call numerical model to solve arised problem - done localy here // #ifdef VERBOSE OOFEM_LOG_RELEVANT( "\n\nSolving [Step number %8d, Time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif // NM_Status s = nMethod->solve(*massMatrix, loadVector, displacementVector); // if ( !(s & NM_Success) ) { // OOFEM_ERROR("No success in solving system. Ma=f"); // } for ( i = 1; i <= neq; i++ ) { prevIncrOfDisplacement = previousIncrementOfDisplacementVector.at(i); incrOfDisplacement = loadVector.at(i) / ( massMatrix.at(i) * ( 1. / ( deltaT * deltaT ) + dumpingCoef / ( 2. * deltaT ) ) ); accelerationVector.at(i) = ( incrOfDisplacement - prevIncrOfDisplacement ) / ( deltaT * deltaT ); velocityVector.at(i) = ( incrOfDisplacement + prevIncrOfDisplacement ) / ( 2. * deltaT ); previousIncrementOfDisplacementVector.at(i) = incrOfDisplacement; } }
void SloanGraph :: initialize() { int i, j, k, ielemnodes, ielemintdmans, ndofmans; int nnodes = domain->giveNumberOfDofManagers(); int nelems = domain->giveNumberOfElements(); int nbcs = domain->giveNumberOfBoundaryConditions(); Element *ielem; GeneralBoundaryCondition *ibc; ///@todo Use std::list for this first part instead (suboptimization?) this->nodes.growTo(nnodes); // Add dof managers. for ( i = 1; i <= nnodes; i++ ) { SloanGraphNode *node = new SloanGraphNode(this, i); nodes.put(i, node); dmans.put(i, domain->giveDofManager(i) ); } k = nnodes; // Add element internal dof managers for ( i = 1; i <= nelems; i++ ) { ielem = domain->giveElement(i); this->nodes.growTo(k+ielem->giveNumberOfInternalDofManagers()); for ( j = 1; j <= ielem->giveNumberOfInternalDofManagers(); ++j ) { SloanGraphNode *node = new SloanGraphNode(this, i); nodes.put(++k, node); dmans.put(++k, ielem->giveInternalDofManager(i) ); } } // Add boundary condition internal dof managers for ( i = 1; i <= nbcs; i++ ) { ibc = domain->giveBc(i); if (ibc) { this->nodes.growTo(k+ibc->giveNumberOfInternalDofManagers()); for ( j = 1; j <= ibc->giveNumberOfInternalDofManagers(); ++j ) { SloanGraphNode *node = new SloanGraphNode(this, i); nodes.put(++k, node); dmans.put(++k, ibc->giveInternalDofManager(i) ); } } } IntArray connections; for ( i = 1; i <= nelems; i++ ) { ielem = domain->giveElement(i); ielemnodes = ielem->giveNumberOfDofManagers(); ielemintdmans = ielem->giveNumberOfInternalDofManagers(); ndofmans = ielemnodes + ielemintdmans; connections.resize(ndofmans); for ( j = 1; j <= ielemnodes; j++ ) { connections.at(j) = ielem->giveDofManager(j)->giveNumber(); } for ( j = 1; j <= ielemintdmans; j++ ) { connections.at(ielemnodes+j) = ielem->giveInternalDofManager(j)->giveNumber(); } for ( j = 1; j <= ndofmans; j++ ) { for ( k = j + 1; k <= ndofmans; k++ ) { // Connect both ways this->giveNode( connections.at(j) )->addNeighbor( connections.at(k) ); this->giveNode( connections.at(k) )->addNeighbor( connections.at(j) ); } } } ///@todo Add connections from dof managers to boundary condition internal dof managers. // loop over dof managers and test if there are some "slave" or rigidArm connection // if yes, such dependency is reflected in the graph by introducing additional // graph edges between slaves and corresponding masters /* * DofManager* iDofMan; * for (i=1; i <= nnodes; i++){ * if (domain->giveDofManager (i)->hasAnySlaveDofs()) { * iDofMan = domain->giveDofManager (i); * if (iDofMan->giveClassID() == RigidArmNodeClass) { * // rigid arm node -> has only one master * int master = ((RigidArmNode*)iDofMan)->giveMasterDofMngr()->giveNumber(); * // add edge * this->giveNode(i)->addNeighbor (master); * this->giveNode(master)->addNeighbor(i); * * } else { * // slave dofs are present in dofManager * // first - ask for masters, these may be different for each dof * int j; * for (j=1; j<=iDofMan->giveNumberOfDofs(); j++) * if (iDofMan->giveDof (j)->giveClassID() == SimpleSlaveDofClass) { * int master = ((SimpleSlaveDof*) iDofMan->giveDof (j))->giveMasterDofManagerNum(); * // add edge * this->giveNode(i)->addNeighbor (master); * this->giveNode(master)->addNeighbor(i); * * } * } * } * } // end dof man loop */ std :: set< int, std :: less< int > >masters; std :: set< int, std :: less< int > > :: iterator it; IntArray dofMasters; DofManager *iDofMan; for ( i = 1; i <= nnodes; i++ ) { if ( domain->giveDofManager(i)->hasAnySlaveDofs() ) { // slave dofs are present in dofManager // first - ask for masters, these may be different for each dof masters.clear(); iDofMan = domain->giveDofManager(i); int j, k; for ( j = 1; j <= iDofMan->giveNumberOfDofs(); j++ ) { if ( !iDofMan->giveDof(j)->isPrimaryDof() ) { iDofMan->giveDof(j)->giveMasterDofManArray(dofMasters); for ( k = 1; k <= dofMasters.giveSize(); k++ ) { masters.insert( dofMasters.at(k) ); } } } for ( it = masters.begin(); it != masters.end(); ++it ) { this->giveNode(i)->addNeighbor( * ( it ) ); this->giveNode( * ( it ) )->addNeighbor(i); } } } // end dof man loop }
void ParmetisLoadBalancer :: labelDofManagers() { int idofman, ndofman = domain->giveNumberOfDofManagers(); ConnectivityTable *ct = domain->giveConnectivityTable(); const IntArray *dofmanconntable; DofManager *dofman; Element *ielem; dofManagerParallelMode dmode; std :: set< int, std :: less< int > >__dmanpartitions; int myrank = domain->giveEngngModel()->giveRank(); int nproc = domain->giveEngngModel()->giveNumberOfProcesses(); int ie, npart; // resize label array dofManState.resize(ndofman); dofManState.zero(); // resize dof man partitions dofManPartitions.clear(); dofManPartitions.resize(ndofman); #ifdef ParmetisLoadBalancer_DEBUG_PRINT int _cols = 0; fprintf(stderr, "[%d] DofManager labels:\n", myrank); #endif // loop over local dof managers for ( idofman = 1; idofman <= ndofman; idofman++ ) { dofman = domain->giveDofManager(idofman); dmode = dofman->giveParallelMode(); if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) { dofmanconntable = ct->giveDofManConnectivityArray(idofman); __dmanpartitions.clear(); for ( ie = 1; ie <= dofmanconntable->giveSize(); ie++ ) { ielem = domain->giveElement( dofmanconntable->at(ie) ); // assemble list of partitions sharing idofman dofmanager // set is used to include possibly repeated partition only once if ( ielem->giveParallelMode() == Element_local ) { __dmanpartitions.insert( giveElementPartition( dofmanconntable->at(ie) ) ); } } npart = __dmanpartitions.size(); dofManPartitions [ idofman - 1 ].resize( __dmanpartitions.size() ); int i = 1; for ( auto &dm: __dmanpartitions ) { dofManPartitions [ idofman - 1 ].at(i++) = dm; } } } // handle master slave links between dofmans (master and slave required on same partition) this->handleMasterSlaveDofManLinks(); /* Exchange new partitions for shared nodes */ CommunicatorBuff cb(nproc, CBT_dynamic); Communicator com(domain->giveEngngModel(), &cb, myrank, nproc, CommMode_Dynamic); com.packAllData(this, & ParmetisLoadBalancer :: packSharedDmanPartitions); com.initExchange(SHARED_DOFMAN_PARTITIONS_TAG); com.unpackAllData(this, & ParmetisLoadBalancer :: unpackSharedDmanPartitions); com.finishExchange(); /* label dof managers */ for ( idofman = 1; idofman <= ndofman; idofman++ ) { dofman = domain->giveDofManager(idofman); dmode = dofman->giveParallelMode(); npart = dofManPartitions [ idofman - 1 ].giveSize(); if ( ( dmode == DofManager_local ) || ( dmode == DofManager_shared ) ) { // determine its state after balancing -> label dofManState.at(idofman) = this->determineDofManState(idofman, myrank, npart, & dofManPartitions [ idofman - 1 ]); } else { dofManState.at(idofman) = DM_NULL; } } #ifdef ParmetisLoadBalancer_DEBUG_PRINT for ( idofman = 1; idofman <= ndofman; idofman++ ) { fprintf(stderr, " | %d: ", idofman); if ( dofManState.at(idofman) == DM_NULL ) { fprintf(stderr, "NULL "); } else if ( dofManState.at(idofman) == DM_Local ) { fprintf(stderr, "Local "); } else if ( dofManState.at(idofman) == DM_Shared ) { fprintf(stderr, "Shared"); } else if ( dofManState.at(idofman) == DM_Remote ) { fprintf(stderr, "Remote"); } else { fprintf(stderr, "Unknown"); } //else if (dofManState.at(idofman) == DM_SharedExclude)fprintf (stderr, "ShdExc"); //else if (dofManState.at(idofman) == DM_SharedNew) fprintf (stderr, "ShdNew"); //else if (dofManState.at(idofman) == DM_SharedUpdate) fprintf (stderr, "ShdUpd"); if ( ( ( ++_cols % 4 ) == 0 ) || ( idofman == ndofman ) ) { fprintf(stderr, "\n"); } } #endif }
int DSSMatrix :: buildInternalStructure(EngngModel *eModel, int di, const UnknownNumberingScheme &s) { IntArray loc; Domain *domain = eModel->giveDomain(di); int neq = eModel->giveNumberOfDomainEquations(di, s); unsigned long indx; // allocation map std :: vector< std :: set< int > >columns(neq); unsigned long nz_ = 0; for ( auto &elem : domain->giveElements() ) { elem->giveLocationArray(loc, s); for ( int ii : loc ) { if ( ii > 0 ) { for ( int jj : loc ) { if ( jj > 0 ) { columns [ jj - 1 ].insert(ii - 1); } } } } } // loop over active boundary conditions std::vector<IntArray> r_locs; std::vector<IntArray> c_locs; for ( auto &gbc : domain->giveBcs() ) { ActiveBoundaryCondition *bc = dynamic_cast< ActiveBoundaryCondition * >( gbc.get() ); if ( bc != NULL ) { bc->giveLocationArrays(r_locs, c_locs, UnknownCharType, s, s); for (std::size_t k = 0; k < r_locs.size(); k++) { IntArray &krloc = r_locs[k]; IntArray &kcloc = c_locs[k]; for ( int ii : krloc ) { if ( ii > 0 ) { for ( int jj : kcloc ) { if ( jj > 0 ) { columns [ jj - 1 ].insert(ii - 1); } } } } } } } for ( int i = 0; i < neq; i++ ) { nz_ += columns [ i ].size(); } unsigned long *rowind_ = new unsigned long [ nz_ ]; unsigned long *colptr_ = new unsigned long [ neq + 1 ]; if ( ( rowind_ == NULL ) || ( colptr_ == NULL ) ) { OOFEM_ERROR("free store exhausted, exiting"); } indx = 0; for ( int j = 0; j < neq; j++ ) { // column loop colptr_ [ j ] = indx; for ( auto &val : columns [ j ] ) { // row loop rowind_ [ indx++ ] = val; } } colptr_ [ neq ] = indx; _sm.reset( new SparseMatrixF(neq, NULL, rowind_, colptr_, 0, 0, true) ); if ( !_sm ) { OOFEM_FATAL("free store exhausted, exiting"); } /* * Assemble block to equation mapping information */ bool _succ = true; int _ndofs, _neq, ndofmans = domain->giveNumberOfDofManagers(); int ndofmansbc = 0; ///@todo This still misses element internal dofs. // count number of internal dofmans on active bc for ( auto &bc : domain->giveBcs() ) { ndofmansbc += bc->giveNumberOfInternalDofManagers(); } int bsize = 0; if ( ndofmans > 0 ) { bsize = domain->giveDofManager(1)->giveNumberOfDofs(); } long *mcn = new long [ (ndofmans+ndofmansbc) * bsize ]; long _c = 0; if ( mcn == NULL ) { OOFEM_FATAL("free store exhausted, exiting"); } for ( auto &dman : domain->giveDofManagers() ) { _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() ) { _neq = dof->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } for ( int i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } // loop over internal dofmans of active bc for ( auto &bc : domain->giveBcs() ) { int ndman = bc->giveNumberOfInternalDofManagers(); for (int idman = 1; idman <= ndman; idman ++) { DofManager *dman = bc->giveInternalDofManager(idman); _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() ) { _neq = dof->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } for ( int i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } if ( _succ ) { _dss->SetMatrixPattern(_sm.get(), bsize); _dss->LoadMCN(ndofmans+ndofmansbc, bsize, mcn); } else { OOFEM_LOG_INFO("DSSMatrix: using assumed block structure"); _dss->SetMatrixPattern(_sm.get(), bsize); } _dss->PreFactorize(); // zero matrix, put unity on diagonal with supported dofs _dss->LoadZeros(); delete[] mcn; OOFEM_LOG_DEBUG("DSSMatrix info: neq is %d, bsize is %d\n", neq, nz_); // increment version this->version++; return true; }
void DEIDynamic :: solveYourselfAt(TimeStep *tStep) { // // creates system of governing eq's and solves them at given time step // // this is an explicit problem: we assemble governing equating at time t // and solution is obtained for time t+dt // // first assemble problem at current time step to obtain results in following // time step. // and then print results for this step also. // for first time step we need special start code Domain *domain = this->giveDomain(1); int nelem = domain->giveNumberOfElements(); int nman = domain->giveNumberOfDofManagers(); IntArray loc; Element *element; DofManager *node; Dof *iDof; int nDofs, neq; int i, k, n, j, jj, kk, init = 0; double coeff, maxDt, maxOmi, maxOm = 0., maxOmEl, c1, c2, c3; FloatMatrix charMtrx, charMtrx2; FloatArray previousDisplacementVector; neq = this->giveNumberOfEquations(EID_MomentumBalance); if ( tStep->giveNumber() == giveNumberOfFirstStep() ) { init = 1; #ifdef VERBOSE OOFEM_LOG_INFO("Assembling mass matrix\n"); #endif // // first step assemble mass Matrix // massMatrix.resize(neq); massMatrix.zero(); EModelDefaultEquationNumbering dn; for ( i = 1; i <= nelem; i++ ) { element = domain->giveElement(i); element->giveLocationArray(loc, EID_MomentumBalance, dn); element->giveCharacteristicMatrix(charMtrx, LumpedMassMatrix, tStep); // charMtrx.beLumpedOf(fullCharMtrx); element->giveCharacteristicMatrix(charMtrx2, StiffnessMatrix, tStep); // // assemble it manually // #ifdef DEBUG if ( ( n = loc.giveSize() ) != charMtrx.giveNumberOfRows() ) { _error("solveYourselfAt : dimension mismatch"); } #endif n = loc.giveSize(); maxOmEl = 0.; for ( j = 1; j <= n; j++ ) { if ( charMtrx.at(j, j) > ZERO_MASS ) { maxOmi = charMtrx2.at(j, j) / charMtrx.at(j, j); if ( init ) { maxOmEl = ( maxOmEl > maxOmi ) ? ( maxOmEl ) : ( maxOmi ); } } } maxOm = ( maxOm > maxOmEl ) ? ( maxOm ) : ( maxOmEl ); for ( j = 1; j <= n; j++ ) { jj = loc.at(j); if ( ( jj ) && ( charMtrx.at(j, j) <= ZERO_MASS ) ) { charMtrx.at(j, j) = charMtrx2.at(j, j) / maxOmEl; } } for ( j = 1; j <= n; j++ ) { jj = loc.at(j); if ( jj ) { massMatrix.at(jj) += charMtrx.at(j, j); } } } // if init - try to determine the best deltaT if ( init ) { maxDt = 2 / sqrt(maxOm); if ( deltaT > maxDt ) { OOFEM_LOG_RELEVANT("DEIDynamic: deltaT reduced to %e\n", maxDt); deltaT = maxDt; tStep->setTimeIncrement(deltaT); } } // // special init step - compute displacements at tstep 0 // displacementVector.resize(neq); displacementVector.zero(); nextDisplacementVector.resize(neq); nextDisplacementVector.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); for ( j = 1; j <= nman; j++ ) { node = domain->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions) // now we are setting initial cond. for step -1. iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { nextDisplacementVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Total, tStep); // become displacementVector after init velocityVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Velocity, tStep); // accelerationVector = iDof->giveUnknown(AccelerartionVector,tStep) ; } } } for ( j = 1; j <= neq; j++ ) { nextDisplacementVector.at(j) -= velocityVector.at(j) * ( deltaT ); } return; } // end of init step #ifdef VERBOSE OOFEM_LOG_INFO("Assembling right hand side\n"); #endif c1 = ( 1. / ( deltaT * deltaT ) ); c2 = ( 1. / ( 2. * deltaT ) ); c3 = ( 2. / ( deltaT * deltaT ) ); previousDisplacementVector = displacementVector; displacementVector = nextDisplacementVector; // // assembling the element part of load vector // loadVector.resize( this->giveNumberOfEquations(EID_MomentumBalance) ); loadVector.zero(); this->assembleVector(loadVector, tStep, EID_MomentumBalance, ExternalForcesVector, VM_Total, EModelDefaultEquationNumbering(), domain); // // assembling additional parts of right hand side // EModelDefaultEquationNumbering dn; for ( i = 1; i <= nelem; i++ ) { element = domain->giveElement(i); element->giveLocationArray(loc, EID_MomentumBalance, dn); element->giveCharacteristicMatrix(charMtrx, StiffnessMatrix, tStep); n = loc.giveSize(); for ( j = 1; j <= n; j++ ) { jj = loc.at(j); if ( jj ) { for ( k = 1; k <= n; k++ ) { kk = loc.at(k); if ( kk ) { loadVector.at(jj) -= charMtrx.at(j, k) * displacementVector.at(kk); } } // // if init step - find minimum period of vibration in order to // determine maximal admissible time step // //maxOmi = charMtrx.at(j,j)/massMatrix.at(jj) ; //if (init) maxOm = (maxOm > maxOmi) ? (maxOm) : (maxOmi) ; } } } for ( j = 1; j <= neq; j++ ) { coeff = massMatrix.at(j); loadVector.at(j) += coeff * c3 * displacementVector.at(j) - coeff * ( c1 - dumpingCoef * c2 ) * previousDisplacementVector.at(j); } // // set-up numerical model // /* it is not necessary to call numerical method * approach used here is not good, but effective enough * inverse of diagonal mass matrix is done here */ // // call numerical model to solve arose problem - done locally here // #ifdef VERBOSE OOFEM_LOG_RELEVANT( "Solving [step number %8d, time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif double prevD; for ( i = 1; i <= neq; i++ ) { prevD = previousDisplacementVector.at(i); nextDisplacementVector.at(i) = loadVector.at(i) / ( massMatrix.at(i) * ( c1 + dumpingCoef * c2 ) ); velocityVector.at(i) = nextDisplacementVector.at(i) - prevD; accelerationVector.at(i) = nextDisplacementVector.at(i) - 2. * displacementVector.at(i) + prevD; } accelerationVector.times(c1); velocityVector.times(c2); }
int DSSMatrix :: buildInternalStructure(EngngModel *eModel, int di, EquationID ut, const UnknownNumberingScheme &s) { IntArray loc; Domain *domain = eModel->giveDomain(di); int neq = eModel->giveNumberOfDomainEquations(di, s); int nelem = domain->giveNumberOfElements(); int i, ii, j, jj, n; unsigned long indx; Element *elem; // allocation map std :: vector< std :: set< int > >columns(neq); unsigned long nz_ = 0; for ( n = 1; n <= nelem; n++ ) { elem = domain->giveElement(n); elem->giveLocationArray(loc, ut, s); for ( i = 1; i <= loc.giveSize(); i++ ) { if ( ( ii = loc.at(i) ) ) { for ( j = 1; j <= loc.giveSize(); j++ ) { if ( ( jj = loc.at(j) ) ) { columns [ jj - 1 ].insert(ii - 1); } } } } } // loop over active boundary conditions int nbc = domain->giveNumberOfBoundaryConditions(); std::vector<IntArray> r_locs; std::vector<IntArray> c_locs; for ( i = 1; i <= nbc; ++i ) { ActiveBoundaryCondition *bc = dynamic_cast< ActiveBoundaryCondition * >( domain->giveBc(i) ); if ( bc != NULL ) { bc->giveLocationArrays(r_locs, c_locs, ut, UnknownCharType, s, s); for (std::size_t k = 0; k < r_locs.size(); k++) { IntArray &krloc = r_locs[k]; IntArray &kcloc = c_locs[k]; for ( int ri = 1; ri <= krloc.giveSize(); ri++ ) { if ( ( ii = krloc.at(ri) ) ) { for ( j = 1; j <= kcloc.giveSize(); j++ ) { if ( (jj = kcloc.at(j) ) ) { columns [ jj - 1 ].insert(ii - 1); } } } } } } } for ( i = 0; i < neq; i++ ) { nz_ += columns [ i ].size(); } unsigned long *rowind_ = new unsigned long [ nz_ ]; unsigned long *colptr_ = new unsigned long [ neq + 1 ]; if ( ( rowind_ == NULL ) || ( colptr_ == NULL ) ) { OOFEM_ERROR("DSSMatrix::buildInternalStructure: free store exhausted, exiting"); } indx = 0; std :: set< int > :: iterator pos; for ( j = 0; j < neq; j++ ) { // column loop colptr_ [ j ] = indx; for ( pos = columns [ j ].begin(); pos != columns [ j ].end(); ++pos ) { // row loop rowind_ [ indx++ ] = * pos; } } colptr_ [ neq ] = indx; if ( _sm ) { delete _sm; } if ( ( _sm = new SparseMatrixF(neq, NULL, rowind_, colptr_, 0, 0, true) ) == NULL ) { OOFEM_ERROR("DSSMatrix::buildInternalStructure: free store exhausted, exiting"); } int bsize = eModel->giveDomain(1)->giveDefaultNodeDofIDArry().giveSize(); /* * Assemble block to equation mapping information */ bool _succ = true; int _ndofs, _neq, ndofmans = domain->giveNumberOfDofManagers(); int ndofmansbc = 0; // count number of internal dofmans on active bc for (n=1; n<=nbc; n++) { ndofmansbc+=domain->giveBc(n)->giveNumberOfInternalDofManagers(); } long *mcn = new long [ (ndofmans+ndofmansbc) * bsize ]; long _c = 0; DofManager *dman; if ( mcn == NULL ) { OOFEM_ERROR("DSSMatrix::buildInternalStructure: free store exhausted, exiting"); } for ( n = 1; n <= ndofmans; n++ ) { dman = domain->giveDofManager(n); _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( i = 1; i <= _ndofs; i++ ) { if ( dman->giveDof(i)->isPrimaryDof() ) { _neq = dman->giveDof(i)->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } for ( i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } // loop over internal dofmans of active bc for (int ibc=1; ibc<=nbc; ibc++) { int ndman = domain->giveBc(ibc)->giveNumberOfInternalDofManagers(); for (int idman = 1; idman <= ndman; idman ++) { dman = domain->giveBc(ibc)->giveInternalDofManager(idman); _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( i = 1; i <= _ndofs; i++ ) { if ( dman->giveDof(i)->isPrimaryDof() ) { _neq = dman->giveDof(i)->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } for ( i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } if ( _succ ) { _dss->SetMatrixPattern(_sm, bsize); _dss->LoadMCN(ndofmans+ndofmansbc, bsize, mcn); } else { OOFEM_LOG_INFO("DSSMatrix: using assumed block structure"); _dss->SetMatrixPattern(_sm, bsize); } _dss->PreFactorize(); // zero matrix, put unity on diagonal with supported dofs _dss->LoadZeros(); delete[] mcn; OOFEM_LOG_DEBUG("DSSMatrix info: neq is %d, bsize is %d\n", neq, nz_); // increment version this->version++; return true; }