int EIPrimaryUnknownMapper :: mapAndUpdate(FloatArray &answer, ValueModeType mode, Domain *oldd, Domain *newd, TimeStep *tStep) { int inode, nd_nnodes = newd->giveNumberOfDofManagers(); int nsize = newd->giveEngngModel()->giveNumberOfDomainEquations( newd->giveNumber(), EModelDefaultEquationNumbering() ); FloatArray unknownValues; IntArray dofidMask, locationArray; IntArray reglist; #ifdef OOFEM_MAPPING_CHECK_REGIONS ConnectivityTable *conTable = newd->giveConnectivityTable(); const IntArray *nodeConnectivity; #endif answer.resize(nsize); answer.zero(); for ( inode = 1; inode <= nd_nnodes; inode++ ) { DofManager *node = newd->giveNode(inode); /* HUHU CHEATING */ #ifdef __PARALLEL_MODE if ( ( node->giveParallelMode() == DofManager_null ) || ( node->giveParallelMode() == DofManager_remote ) ) { continue; } #endif #ifdef OOFEM_MAPPING_CHECK_REGIONS // build up region list for node nodeConnectivity = conTable->giveDofManConnectivityArray(inode); reglist.resize( nodeConnectivity->giveSize() ); reglist.clear(); for ( int indx = 1; indx <= nodeConnectivity->giveSize(); indx++ ) { reglist.insertSortedOnce( newd->giveElement( nodeConnectivity->at(indx) )->giveRegionNumber() ); } #endif ///@todo Shouldn't we pass a primary field or something to this function? if ( this->evaluateAt(unknownValues, dofidMask, mode, oldd, * node->giveCoordinates(), reglist, tStep) ) { ///@todo This doesn't respect local coordinate systems in nodes. Supporting that would require major reworking. for ( int ii = 1; ii <= dofidMask.giveSize(); ii++ ) { // exclude slaves; they are determined from masters auto it = node->findDofWithDofId((DofIDItem)dofidMask.at(ii)); if ( it != node->end() ) { Dof *dof = *it; if ( dof->isPrimaryDof() ) { int eq = dof->giveEquationNumber(EModelDefaultEquationNumbering()); answer.at( eq ) += unknownValues.at(ii); } } } } else { OOFEM_ERROR("evaluateAt service failed for node %d", inode); } } return 1; }
bool NRSolver :: checkConvergence(FloatArray &RT, FloatArray &F, FloatArray &rhs, FloatArray &ddX, FloatArray &X, double RRT, const FloatArray &internalForcesEBENorm, int nite, bool &errorOutOfRange, TimeStep *tNow) { double forceErr, dispErr; FloatArray dg_forceErr, dg_dispErr, dg_totalLoadLevel, dg_totalDisp; bool answer; EModelDefaultEquationNumbering dn; #ifdef __PARALLEL_MODE #ifdef __PETSC_MODULE PetscContext *parallel_context = engngModel->givePetscContext(this->domain->giveNumber()); Natural2LocalOrdering *n2l = parallel_context->giveN2Lmap(); #endif #endif /* * The force errors are (if possible) evaluated as relative errors. * If the norm of applied load vector is zero (one may load by temperature, etc) * then the norm of reaction forces is used in relative norm evaluation. * * Note: This is done only when all dofs are included (nccdg = 0). Not implemented if * multiple convergence criteria are used. * */ answer = true; errorOutOfRange = false; if ( internalForcesEBENorm.giveSize() > 1 ) { // Special treatment when just one norm is given; No grouping int nccdg = this->domain->giveMaxDofID(); // Keeps tracks of which dof IDs are actually in use; IntArray idsInUse(nccdg); idsInUse.zero(); // zero error norms per group dg_forceErr.resize(nccdg); dg_forceErr.zero(); dg_dispErr.resize(nccdg); dg_dispErr.zero(); dg_totalLoadLevel.resize(nccdg); dg_totalLoadLevel.zero(); dg_totalDisp.resize(nccdg); dg_totalDisp.zero(); // loop over dof managers int ndofman = domain->giveNumberOfDofManagers(); for ( int idofman = 1; idofman <= ndofman; idofman++ ) { DofManager *dofman = domain->giveDofManager(idofman); #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( !parallel_context->isLocal(dofman) ) { continue; } #endif // loop over individual dofs int ndof = dofman->giveNumberOfDofs(); for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over dof managers // loop over elements and their DOFs int nelem = domain->giveNumberOfElements(); for ( int ielem = 1; ielem <= nelem; ielem++ ) { Element *elem = domain->giveElement(ielem); #ifdef __PARALLEL_MODE if ( elem->giveParallelMode() != Element_local ) { continue; } #endif // loop over element internal Dofs for ( int idofman = 1; idofman <= elem->giveNumberOfInternalDofManagers(); idofman++) { DofManager *dofman = elem->giveInternalDofManager(idofman); int ndof = dofman->giveNumberOfDofs(); // loop over individual dofs for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue; #endif dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over element internal dofmans } // end loop over elements // loop over boundary conditions and their internal DOFs for ( int ibc = 1; ibc <= domain->giveNumberOfBoundaryConditions(); ibc++ ) { GeneralBoundaryCondition *bc = domain->giveBc(ibc); // loop over element internal Dofs for ( int idofman = 1; idofman <= bc->giveNumberOfInternalDofManagers(); idofman++) { DofManager *dofman = bc->giveInternalDofManager(idofman); int ndof = dofman->giveNumberOfDofs(); // loop over individual dofs for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue; #endif dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over element internal dofmans } // end loop over elements #ifdef __PARALLEL_MODE // exchange individual partition contributions (simultaneously for all groups) #ifdef __PETSC_MODULE FloatArray collectiveErr(nccdg); parallel_context->accumulate(dg_forceErr, collectiveErr); dg_forceErr = collectiveErr; parallel_context->accumulate(dg_dispErr, collectiveErr); dg_dispErr = collectiveErr; parallel_context->accumulate(dg_totalLoadLevel, collectiveErr); dg_totalLoadLevel = collectiveErr; parallel_context->accumulate(dg_totalDisp, collectiveErr); dg_totalDisp = collectiveErr; #else if ( this->engngModel->isParallel() ) { FloatArray collectiveErr(nccdg); MPI_Allreduce(dg_forceErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_forceErr = collectiveErr; MPI_Allreduce(dg_dispErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_dispErr = collectiveErr; MPI_Allreduce(dg_totalLoadLevel.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_totalLoadLevel = collectiveErr; MPI_Allreduce(dg_totalDisp.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_totalDisp = collectiveErr; return globalNorm; } #endif #endif OOFEM_LOG_INFO("NRSolver: %-5d", nite); //bool zeroNorm = false; // loop over dof groups and check convergence individually for ( int dg = 1; dg <= nccdg; dg++ ) { bool zeroFNorm = false, zeroDNorm = false; // Skips the ones which aren't used in this problem (the residual will be zero for these anyway, but it is annoying to print them all) if ( !idsInUse.at(dg) ) { continue; } OOFEM_LOG_INFO( " %s:", __DofIDItemToString((DofIDItem)dg).c_str() ); if ( rtolf.at(1) > 0.0 ) { // compute a relative error norm if ( ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) > nrsolver_ERROR_NORM_SMALL_NUM ) { forceErr = sqrt( dg_forceErr.at(dg) / ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) ); } else { // If both external forces and internal ebe norms are zero, then the residual must be zero. //zeroNorm = true; // Warning about this afterwards. zeroFNorm = true; forceErr = sqrt( dg_forceErr.at(dg) ); } if ( forceErr > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( forceErr > rtolf.at(1) ) { answer = false; } OOFEM_LOG_INFO( zeroFNorm ? " *%.3e" : " %.3e", forceErr ); } if ( rtold.at(1) > 0.0 ) { // compute displacement error if ( dg_totalDisp.at(dg) > nrsolver_ERROR_NORM_SMALL_NUM ) { dispErr = sqrt( dg_dispErr.at(dg) / dg_totalDisp.at(dg) ); } else { ///@todo This is almost always the case for displacement error. nrsolveR_ERROR_NORM_SMALL_NUM is no good. //zeroNorm = true; // Warning about this afterwards. //zeroDNorm = true; dispErr = sqrt( dg_dispErr.at(dg) ); } if ( dispErr > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( dispErr > rtold.at(1) ) { answer = false; } OOFEM_LOG_INFO( zeroDNorm ? " *%.3e" : " %.3e", dispErr ); } } OOFEM_LOG_INFO("\n"); //if ( zeroNorm ) OOFEM_WARNING("NRSolver :: checkConvergence - Had to resort to absolute error measure (marked by *)"); } else { // No dof grouping double dXX, dXdX; if ( engngModel->giveProblemScale() == macroScale ) { OOFEM_LOG_INFO("NRSolver: %-15d", nite); } else { OOFEM_LOG_INFO(" NRSolver: %-15d", nite); } #ifdef __PARALLEL_MODE forceErr = parallel_context->norm(rhs); forceErr *= forceErr; dXX = parallel_context->localNorm(X); dXX *= dXX; // Note: Solutions are always total global values (natural distribution makes little sense for the solution) dXdX = parallel_context->localNorm(ddX); dXdX *= dXdX; #else forceErr = rhs.computeSquaredNorm(); dXX = X.computeSquaredNorm(); dXdX = ddX.computeSquaredNorm(); #endif if ( rtolf.at(1) > 0.0 ) { // we compute a relative error norm if ( ( RRT + internalForcesEBENorm.at(1) ) > nrsolver_ERROR_NORM_SMALL_NUM ) { forceErr = sqrt( forceErr / ( RRT + internalForcesEBENorm.at(1) ) ); } else { forceErr = sqrt( forceErr ); // absolute norm as last resort } if ( fabs(forceErr) > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( fabs(forceErr) > rtolf.at(1) ) { answer = false; } OOFEM_LOG_INFO(" %-15e", forceErr); } if ( rtold.at(1) > 0.0 ) { // compute displacement error // err is relative displacement change if ( dXX > nrsolver_ERROR_NORM_SMALL_NUM ) { dispErr = sqrt( dXdX / dXX ); } else { dispErr = sqrt( dXdX ); } if ( fabs(dispErr) > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( fabs(dispErr) > rtold.at(1) ) { answer = false; } OOFEM_LOG_INFO(" %-15e", dispErr); } OOFEM_LOG_INFO("\n"); } // end default case (all dofs contributing) return answer; }