示例#1
0
void
SolutionbasedShapeFunction :: setLoads(EngngModel *myEngngModel, int d)
{
    DynamicInputRecord ir;
    FloatArray gradP;

    gradP.resize( this->giveDomain()->giveNumberOfSpatialDimensions() );
    gradP.zero();
    gradP.at(d) = 1.0;

    ir.setRecordKeywordField("deadweight", 1);
    ir.setField(gradP, _IFT_Load_components);
    ir.setField(1, _IFT_GeneralBoundaryCondition_timeFunct);

    int bcID = myEngngModel->giveDomain(1)->giveNumberOfBoundaryConditions() + 1;
    GeneralBoundaryCondition *myBodyLoad;
    myBodyLoad = classFactory.createBoundaryCondition( "deadweight", bcID, myEngngModel->giveDomain(1) );
    myBodyLoad->initializeFrom(& ir);
    myEngngModel->giveDomain(1)->setBoundaryCondition(bcID, myBodyLoad);

    for ( int i = 1; i <= myEngngModel->giveDomain(1)->giveNumberOfElements(); i++ ) {
        IntArray *blArray;
        blArray = myEngngModel->giveDomain(1)->giveElement(i)->giveBodyLoadArray();
        blArray->resizeWithValues(blArray->giveSize() + 1);
        blArray->at( blArray->giveSize() ) = bcID;
    }
}
示例#2
0
void Tr1Darcy :: computeLoadVector(FloatArray &answer, TimeStep *atTime)
{
    // TODO: Implement support for body forces

    FloatArray vec;

    answer.resize(3);
    answer.zero();

    // Compute characteristic vector for Neumann boundary conditions.
    int i, load_number, load_id;
    GeneralBoundaryCondition *load;
    bcGeomType ltype;

    int nLoads = boundaryLoadArray.giveSize() / 2;

    for ( i = 1; i <= nLoads; i++ ) {  // For each Neumann boundary condition ....
        load_number = boundaryLoadArray.at(2 * i - 1);
        load_id = boundaryLoadArray.at(2 * i);
        load = ( GeneralBoundaryCondition * ) domain->giveLoad(load_number);
        ltype = load->giveBCGeoType();

        if ( ltype == EdgeLoadBGT ) {
            this->computeEdgeBCSubVectorAt(vec, ( Load * ) load, load_id, atTime);
        }

        answer.add(vec);
    }

    answer.negated();
}
示例#3
0
BoundaryCondition *MasterDof :: giveBc()
// Returns the boundary condition the receiver is subjected to.
{
    if ( bc ) {
        GeneralBoundaryCondition *bcptr = dofManager->giveDomain()->giveBc(bc);
        if ( bcptr->giveType() == DirichletBT ) {
            return static_cast< BoundaryCondition * >(bcptr);
        }
    }

    OOFEM_ERROR("Incompatible BC (%d) applied as Dirichlet/Primary BC", bc);
    return NULL;
}
示例#4
0
void
SolutionbasedShapeFunction :: setBoundaryConditionOnDof(Dof *d, double value)
{
    int bcID = d->giveBcId();

    if ( bcID == 0 ) {
        DynamicInputRecord ir;
        ir.setRecordKeywordField("boundarycondition", 1);
        ir.setField(1, _IFT_GeneralBoundaryCondition_timeFunct);
        ir.setField(value, _IFT_BoundaryCondition_PrescribedValue);

        bcID = d->giveDofManager()->giveDomain()->giveNumberOfBoundaryConditions() + 1;

        GeneralBoundaryCondition *myBC;
        myBC = classFactory.createBoundaryCondition( "boundarycondition", bcID, d->giveDofManager()->giveDomain() );
        myBC->initializeFrom(& ir);
        d->giveDofManager()->giveDomain()->setBoundaryCondition(bcID, myBC);

        d->setBcId(bcID);
    } else {
        BoundaryCondition *bc = static_cast< BoundaryCondition * >( d->giveDofManager()->giveDomain()->giveBc(bcID) );
        bc->setPrescribedValue(value);
    }
}
示例#5
0
void
BeamBaseElement :: computeLocalForceLoadVector(FloatArray &answer, TimeStep *tStep, ValueModeType mode)
// computes the part of load vector, which is imposed by force loads acting
// on element volume (surface).
// Why is this function taken separately ?
// When reactions forces are computed, they are computed from element::GiveRealStressVector
// in this vector a real forces are stored (temperature part is subtracted).
// so we need further subtract part corresponding to non-nodal loading.
{
    FloatArray helpLoadVector(1);
    answer.clear();

    // loop over body load array first
    int nBodyLoads = this->giveBodyLoadArray()->giveSize();
    for ( int i = 1; i <= nBodyLoads; i++ ) {
        int id = bodyLoadArray.at(i);
        Load *load = domain->giveLoad(id);
        bcGeomType ltype = load->giveBCGeoType();
        if ( ( ltype == BodyLoadBGT ) && ( load->giveBCValType() == ForceLoadBVT ) ) {
            this->computeBodyLoadVectorAt(helpLoadVector, load, tStep, mode);
            if ( helpLoadVector.giveSize() ) {
                answer.add(helpLoadVector);
            }
        } else {
            if ( load->giveBCValType() != TemperatureBVT && load->giveBCValType() != EigenstrainBVT ) {
                // temperature and eigenstrain is handled separately at computeLoadVectorAt subroutine
                OOFEM_ERROR("body load %d is of unsupported type (%d)", id, ltype);
            }
        }
    }

    // loop over boundary load array
    int nBoundaryLoads = this->giveBoundaryLoadArray()->giveSize() / 2;
    for ( int i = 1; i <= nBoundaryLoads; i++ ) {
        int n = boundaryLoadArray.at(1 + ( i - 1 ) * 2);
        int id = boundaryLoadArray.at(i * 2);
        Load *load = domain->giveLoad(n);
	BoundaryLoad* bLoad;
	if ((bLoad = dynamic_cast<BoundaryLoad*> (load))) {
	  bcGeomType ltype = load->giveBCGeoType();
	  if ( ltype == EdgeLoadBGT ) {
	    this->computeBoundaryEdgeLoadVector(helpLoadVector, bLoad, id, ExternalForcesVector, mode, tStep, false);
            if ( helpLoadVector.giveSize() ) {
	      answer.add(helpLoadVector);
            }
	  } else if ( ltype == SurfaceLoadBGT ) {
	    this->computeBoundarySurfaceLoadVector(helpLoadVector, bLoad, id, ExternalForcesVector, mode, tStep, false);
            if ( helpLoadVector.giveSize() ) {
	      answer.add(helpLoadVector);
            }
	  } else if ( ltype == PointLoadBGT ) {
            // id not used
	    this->computePointLoadVectorAt(helpLoadVector, load, tStep, mode, false);
            if ( helpLoadVector.giveSize() ) {
	      answer.add(helpLoadVector);
            }
	  } else {
            OOFEM_ERROR("boundary load %d is of unsupported type (%d)", id, ltype);
	  }
	}
    }


    // add exact end forces due to nonnodal loading applied indirectly (via sets)
    BCTracker *bct = this->domain->giveBCTracker();
    BCTracker::entryListType bcList = bct->getElementRecords(this->number);
    FloatArray help;
    
    for (BCTracker::entryListType::iterator it = bcList.begin(); it != bcList.end(); ++it) {
      GeneralBoundaryCondition *bc = this->domain->giveBc((*it).bcNumber);
      BodyLoad *bodyLoad;
      BoundaryLoad *boundaryLoad;
      if (bc->isImposed(tStep)) {
        if ((bodyLoad = dynamic_cast<BodyLoad*>(bc))) { // body load
          this->computeBodyLoadVectorAt(help,bodyLoad, tStep, VM_Total); // this one is local
          answer.add(help);
        } else if ((boundaryLoad = dynamic_cast<BoundaryLoad*>(bc))) {
          // compute Boundary Edge load vector in GLOBAL CS !!!!!!!
          this->computeBoundaryEdgeLoadVector(help, boundaryLoad, (*it).boundaryId,
					      ExternalForcesVector, VM_Total, tStep, false);
          // get it transformed back to local c.s.
          // this->computeGtoLRotationMatrix(t);
          // help.rotatedWith(t, 'n');
          answer.add(help);
        }
      }
    }
}
示例#6
0
void IncrementalLinearStatic :: solveYourselfAt(TimeStep *tStep)
{
    Domain *d = this->giveDomain(1);
    // Creates system of governing eq's and solves them at given time step


    // >>> beginning PH
    // The following piece of code updates assignment of boundary conditions to dofs
    // (this allows to have multiple boundary conditions assigned to one dof
    // which can be arbitrarily turned on and off in time)
    // Almost the entire section has been copied from domain.C
    std :: vector< std :: map< int, int > > dof_bc( d->giveNumberOfDofManagers() );

    for ( int i = 1; i <= d->giveNumberOfBoundaryConditions(); ++i ) {
        GeneralBoundaryCondition *gbc = d->giveBc(i);

        if ( gbc->isImposed(tStep) ) {

            if ( gbc->giveSetNumber() > 0 ) { ///@todo This will eventually not be optional.
                // Loop over nodes in set and store the bc number in each dof.
                Set *set = d->giveSet( gbc->giveSetNumber() );
                ActiveBoundaryCondition *active_bc = dynamic_cast< ActiveBoundaryCondition * >(gbc);
                BoundaryCondition *bc = dynamic_cast< BoundaryCondition * >(gbc);
                if ( bc || ( active_bc && active_bc->requiresActiveDofs() ) ) {
                    const IntArray &appliedDofs = gbc->giveDofIDs();
                    const IntArray &nodes = set->giveNodeList();
                    for ( int inode = 1; inode <= nodes.giveSize(); ++inode ) {
                        for ( int idof = 1; idof <= appliedDofs.giveSize(); ++idof ) {

                            if  ( dof_bc [ nodes.at(inode) - 1 ].find( appliedDofs.at(idof) ) == dof_bc [ nodes.at(inode) - 1 ].end() ) {
                                // is empty
                                dof_bc [ nodes.at(inode) - 1 ] [ appliedDofs.at(idof) ] = i;

                                DofManager * dofman = d->giveDofManager( nodes.at(inode) );
                                Dof * dof = dofman->giveDofWithID( appliedDofs.at(idof) );

                                dof->setBcId(i);

                            } else {
                                // another bc has been already prescribed at this time step to this dof
                                OOFEM_WARNING("More than one boundary condition assigned at time %f to node %d dof %d. Considering boundary condition %d", tStep->giveTargetTime(),  nodes.at(inode), appliedDofs.at(idof), dof_bc [ nodes.at(inode) - 1 ] [appliedDofs.at(idof)] );


                            }
                        }
                    }
                }
            }
        }
    }

    // to get proper number of equations
    this->forceEquationNumbering();
    // <<< end PH



    // Initiates the total displacement to zero.
    if ( tStep->isTheFirstStep() ) {
        for ( auto &dofman : d->giveDofManagers() ) {
            for ( Dof *dof: *dofman ) {
                dof->updateUnknownsDictionary(tStep->givePreviousStep(), VM_Total, 0.);
                dof->updateUnknownsDictionary(tStep, VM_Total, 0.);
            }
        }

        for ( auto &bc : d->giveBcs() ) {
            ActiveBoundaryCondition *abc;

            if ( ( abc = dynamic_cast< ActiveBoundaryCondition * >(bc.get()) ) ) {
                int ndman = abc->giveNumberOfInternalDofManagers();
                for ( int i = 1; i <= ndman; i++ ) {
                    DofManager *dofman = abc->giveInternalDofManager(i);
                    for ( Dof *dof: *dofman ) {
                        dof->updateUnknownsDictionary(tStep->givePreviousStep(), VM_Total, 0.);
                        dof->updateUnknownsDictionary(tStep, VM_Total, 0.);
                    }
                }
            }
        }
    }

    // Apply dirichlet b.c's on total values
    for ( auto &dofman : d->giveDofManagers() ) {
        for ( Dof *dof: *dofman ) {
            double tot = dof->giveUnknown( VM_Total, tStep->givePreviousStep() );
            if ( dof->hasBc(tStep) ) {
                tot += dof->giveBcValue(VM_Incremental, tStep);
            }

            dof->updateUnknownsDictionary(tStep, VM_Total, tot);
        }
    }

    int neq = this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() );

#ifdef VERBOSE
    OOFEM_LOG_RELEVANT("Solving [step number %8d, time %15e, equations %d]\n", tStep->giveNumber(), tStep->giveTargetTime(), neq);
#endif

    if ( neq == 0 ) { // Allows for fully prescribed/empty problems.
        return;
    }

    incrementOfDisplacementVector.resize(neq);
    incrementOfDisplacementVector.zero();

#ifdef VERBOSE
    OOFEM_LOG_INFO("Assembling load\n");
#endif
    // Assembling the element part of load vector
    internalLoadVector.resize(neq);
    internalLoadVector.zero();
    this->assembleVector( internalLoadVector, tStep, InternalForceAssembler(),
                          VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) );

    loadVector.resize(neq);
    loadVector.zero();
    this->assembleVector( loadVector, tStep, ExternalForceAssembler(),
                          VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) );

    loadVector.subtract(internalLoadVector);
    this->updateSharedDofManagers(loadVector, EModelDefaultEquationNumbering(), ReactionExchangeTag);


#ifdef VERBOSE
    OOFEM_LOG_INFO("Assembling stiffness matrix\n");
#endif
    stiffnessMatrix.reset( classFactory.createSparseMtrx(sparseMtrxType) );
    if ( !stiffnessMatrix ) {
        OOFEM_ERROR("sparse matrix creation failed");
    }

    stiffnessMatrix->buildInternalStructure( this, 1, EModelDefaultEquationNumbering() );
    stiffnessMatrix->zero();
    this->assemble( *stiffnessMatrix, tStep, TangentAssembler(TangentStiffness),
                    EModelDefaultEquationNumbering(), this->giveDomain(1) );

#ifdef VERBOSE
    OOFEM_LOG_INFO("Solving ...\n");
#endif
    this->giveNumericalMethod( this->giveCurrentMetaStep() );
    NM_Status s = nMethod->solve(*stiffnessMatrix, loadVector, incrementOfDisplacementVector);
    if ( !( s & NM_Success ) ) {
        OOFEM_ERROR("No success in solving system.");
    }
}
示例#7
0
void SloanGraph :: initialize()
{
    int i, j, k, ielemnodes, ielemintdmans, ndofmans;
    int nnodes = domain->giveNumberOfDofManagers();
    int nelems = domain->giveNumberOfElements();
    int nbcs = domain->giveNumberOfBoundaryConditions();
    Element *ielem;
    GeneralBoundaryCondition *ibc;

    ///@todo Use std::list for this first part instead (suboptimization?)
    this->nodes.growTo(nnodes);

    // Add dof managers.
    for ( i = 1; i <= nnodes; i++ ) {
        SloanGraphNode *node = new SloanGraphNode(this, i);
        nodes.put(i, node);
        dmans.put(i, domain->giveDofManager(i) );
    }
    k = nnodes;
    // Add element internal dof managers
    for ( i = 1; i <= nelems; i++ ) {
        ielem = domain->giveElement(i);
        this->nodes.growTo(k+ielem->giveNumberOfInternalDofManagers());
        for ( j = 1; j <= ielem->giveNumberOfInternalDofManagers(); ++j ) {
            SloanGraphNode *node = new SloanGraphNode(this, i);
            nodes.put(++k, node);
            dmans.put(++k, ielem->giveInternalDofManager(i) );
        }
    }
    // Add boundary condition internal dof managers
    for ( i = 1; i <= nbcs; i++ ) {
        ibc = domain->giveBc(i);
        if (ibc) {
            this->nodes.growTo(k+ibc->giveNumberOfInternalDofManagers());
            for ( j = 1; j <= ibc->giveNumberOfInternalDofManagers(); ++j ) {
                SloanGraphNode *node = new SloanGraphNode(this, i);
                nodes.put(++k, node);
                dmans.put(++k, ibc->giveInternalDofManager(i) );
            }
        }
    }

    IntArray connections;
    for ( i = 1; i <= nelems; i++ ) {
        ielem = domain->giveElement(i);
        ielemnodes = ielem->giveNumberOfDofManagers();
        ielemintdmans = ielem->giveNumberOfInternalDofManagers();
        ndofmans = ielemnodes + ielemintdmans;
        connections.resize(ndofmans);
        for ( j = 1; j <= ielemnodes; j++ ) {
            connections.at(j) = ielem->giveDofManager(j)->giveNumber();
        }
        for ( j = 1; j <= ielemintdmans; j++ ) {
            connections.at(ielemnodes+j) = ielem->giveInternalDofManager(j)->giveNumber();
        }
        for ( j = 1; j <= ndofmans; j++ ) {
            for ( k = j + 1; k <= ndofmans; k++ ) {
                // Connect both ways
                this->giveNode( connections.at(j) )->addNeighbor( connections.at(k) );
                this->giveNode( connections.at(k) )->addNeighbor( connections.at(j) );
            }
        }
    }
    ///@todo Add connections from dof managers to boundary condition internal dof managers.

    // loop over dof managers and test if there are some "slave" or rigidArm connection
    // if yes, such dependency is reflected in the graph by introducing additional
    // graph edges between slaves and corresponding masters
    /*
     * DofManager* iDofMan;
     * for (i=1; i <= nnodes; i++){
     * if (domain->giveDofManager (i)->hasAnySlaveDofs()) {
     * iDofMan = domain->giveDofManager (i);
     * if (iDofMan->giveClassID() == RigidArmNodeClass) {
     *   // rigid arm node -> has only one master
     *   int master = ((RigidArmNode*)iDofMan)->giveMasterDofMngr()->giveNumber();
     *   // add edge
     *   this->giveNode(i)->addNeighbor (master);
     *   this->giveNode(master)->addNeighbor(i);
     *
     * } else {
     * // slave dofs are present in dofManager
     * // first - ask for masters, these may be different for each dof
     *   int j;
     *   for (j=1; j<=iDofMan->giveNumberOfDofs(); j++)
     *     if (iDofMan->giveDof (j)->giveClassID() == SimpleSlaveDofClass) {
     *       int master = ((SimpleSlaveDof*) iDofMan->giveDof (j))->giveMasterDofManagerNum();
     *       // add edge
     *       this->giveNode(i)->addNeighbor (master);
     *       this->giveNode(master)->addNeighbor(i);
     *
     *     }
     * }
     * }
     * } // end dof man loop */

    std :: set< int, std :: less< int > >masters;
    std :: set< int, std :: less< int > > :: iterator it;

    IntArray dofMasters;
    DofManager *iDofMan;
    for ( i = 1; i <= nnodes; i++ ) {
        if ( domain->giveDofManager(i)->hasAnySlaveDofs() ) {
            // slave dofs are present in dofManager
            // first - ask for masters, these may be different for each dof
            masters.clear();
            iDofMan = domain->giveDofManager(i);
            int j, k;
            for ( j = 1; j <= iDofMan->giveNumberOfDofs(); j++ ) {
                if ( !iDofMan->giveDof(j)->isPrimaryDof() ) {
                    iDofMan->giveDof(j)->giveMasterDofManArray(dofMasters);
                    for ( k = 1; k <= dofMasters.giveSize(); k++ ) {
                        masters.insert( dofMasters.at(k) );
                    }
                }
            }

            for ( it = masters.begin(); it != masters.end(); ++it ) {
                this->giveNode(i)->addNeighbor( * ( it ) );
                this->giveNode( * ( it ) )->addNeighbor(i);
            }
        }
    } // end dof man loop

}
示例#8
0
bool
NRSolver :: checkConvergence(FloatArray &RT, FloatArray &F, FloatArray &rhs,  FloatArray &ddX, FloatArray &X,
                             double RRT, const FloatArray &internalForcesEBENorm,
                             int nite, bool &errorOutOfRange, TimeStep *tNow)
{
    double forceErr, dispErr;
    FloatArray dg_forceErr, dg_dispErr, dg_totalLoadLevel, dg_totalDisp;
    bool answer;
    EModelDefaultEquationNumbering dn;
 #ifdef __PARALLEL_MODE
  #ifdef __PETSC_MODULE
    PetscContext *parallel_context = engngModel->givePetscContext(this->domain->giveNumber());
    Natural2LocalOrdering *n2l = parallel_context->giveN2Lmap();
  #endif
 #endif

    /*
     * The force errors are (if possible) evaluated as relative errors.
     * If the norm of applied load vector is zero (one may load by temperature, etc)
     * then the norm of reaction forces is used in relative norm evaluation.
     *
     * Note: This is done only when all dofs are included (nccdg = 0). Not implemented if
     * multiple convergence criteria are used.
     *
     */

    answer = true;
    errorOutOfRange = false;

    if ( internalForcesEBENorm.giveSize() > 1 ) { // Special treatment when just one norm is given; No grouping
        int nccdg = this->domain->giveMaxDofID();
        // Keeps tracks of which dof IDs are actually in use;
        IntArray idsInUse(nccdg);
        idsInUse.zero();
        // zero error norms per group
        dg_forceErr.resize(nccdg); dg_forceErr.zero();
        dg_dispErr.resize(nccdg); dg_dispErr.zero();
        dg_totalLoadLevel.resize(nccdg); dg_totalLoadLevel.zero();
        dg_totalDisp.resize(nccdg); dg_totalDisp.zero();
        // loop over dof managers
        int ndofman = domain->giveNumberOfDofManagers();
        for ( int idofman = 1; idofman <= ndofman; idofman++ ) {
            DofManager *dofman = domain->giveDofManager(idofman);
 #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) )
            if ( !parallel_context->isLocal(dofman) ) {
                continue;
            }

 #endif

            // loop over individual dofs
            int ndof = dofman->giveNumberOfDofs();
            for ( int idof = 1; idof <= ndof; idof++ ) {
                Dof *dof = dofman->giveDof(idof);
                if ( !dof->isPrimaryDof() ) continue;
                int eq = dof->giveEquationNumber(dn);
                int dofid = dof->giveDofID();
                if ( !eq ) continue;
 
                dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq);
                dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq);
                dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq);
                dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq);
                idsInUse.at(dofid) = 1;
            } // end loop over DOFs
        } // end loop over dof managers

        // loop over elements and their DOFs
        int nelem = domain->giveNumberOfElements();
        for ( int ielem = 1; ielem <= nelem; ielem++ ) {
            Element *elem = domain->giveElement(ielem);
 #ifdef __PARALLEL_MODE
            if ( elem->giveParallelMode() != Element_local ) {
                continue;
            }

 #endif
            // loop over element internal Dofs
            for ( int idofman = 1; idofman <= elem->giveNumberOfInternalDofManagers(); idofman++) {
                DofManager *dofman = elem->giveInternalDofManager(idofman);
                int ndof = dofman->giveNumberOfDofs();
                // loop over individual dofs
                for ( int idof = 1; idof <= ndof; idof++ ) {
                    Dof *dof = dofman->giveDof(idof);
                    if ( !dof->isPrimaryDof() ) continue;
                    int eq = dof->giveEquationNumber(dn);
                    int dofid = dof->giveDofID();
                    
                    if ( !eq ) continue;
 #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) )
                    if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue;
 #endif
                    dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq);
                    dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq);
                    dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq);
                    dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq);
                    idsInUse.at(dofid) = 1;
                } // end loop over DOFs
            } // end loop over element internal dofmans
        } // end loop over elements
        
        // loop over boundary conditions and their internal DOFs
        for ( int ibc = 1; ibc <= domain->giveNumberOfBoundaryConditions(); ibc++ ) {
            GeneralBoundaryCondition *bc = domain->giveBc(ibc);

            // loop over element internal Dofs
            for ( int idofman = 1; idofman <= bc->giveNumberOfInternalDofManagers(); idofman++) {
                DofManager *dofman = bc->giveInternalDofManager(idofman);
                int ndof = dofman->giveNumberOfDofs();
                // loop over individual dofs
                for ( int idof = 1; idof <= ndof; idof++ ) {
                    Dof *dof = dofman->giveDof(idof);
                    if ( !dof->isPrimaryDof() ) continue;
                    int eq = dof->giveEquationNumber(dn);
                    int dofid = dof->giveDofID();

                    if ( !eq ) continue;
 #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) )
                    if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue;
 #endif
                    dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq);
                    dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq);
                    dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq);
                    dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq);
                    idsInUse.at(dofid) = 1;
                } // end loop over DOFs
            } // end loop over element internal dofmans
        } // end loop over elements

 #ifdef __PARALLEL_MODE
        // exchange individual partition contributions (simultaneously for all groups)
#ifdef __PETSC_MODULE
        FloatArray collectiveErr(nccdg);
        parallel_context->accumulate(dg_forceErr,       collectiveErr); dg_forceErr       = collectiveErr;
        parallel_context->accumulate(dg_dispErr,        collectiveErr); dg_dispErr        = collectiveErr;
        parallel_context->accumulate(dg_totalLoadLevel, collectiveErr); dg_totalLoadLevel = collectiveErr;
        parallel_context->accumulate(dg_totalDisp,      collectiveErr); dg_totalDisp      = collectiveErr;
#else
        if ( this->engngModel->isParallel() ) {
            FloatArray collectiveErr(nccdg);
            MPI_Allreduce(dg_forceErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm);
            dg_forceErr = collectiveErr;
            MPI_Allreduce(dg_dispErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm);
            dg_dispErr = collectiveErr;
            MPI_Allreduce(dg_totalLoadLevel.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm);
            dg_totalLoadLevel = collectiveErr;
            MPI_Allreduce(dg_totalDisp.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm);
            dg_totalDisp = collectiveErr;
            return globalNorm;
        }
#endif
 #endif
        OOFEM_LOG_INFO("NRSolver: %-5d", nite);
        //bool zeroNorm = false;
        // loop over dof groups and check convergence individually
        for ( int dg = 1; dg <= nccdg; dg++ ) {
            bool zeroFNorm = false, zeroDNorm = false;
            // Skips the ones which aren't used in this problem (the residual will be zero for these anyway, but it is annoying to print them all)
            if ( !idsInUse.at(dg) ) {
                continue;
            }
            
            OOFEM_LOG_INFO( "  %s:", __DofIDItemToString((DofIDItem)dg).c_str() );

            if ( rtolf.at(1) > 0.0 ) {
                //  compute a relative error norm
                if ( ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) > nrsolver_ERROR_NORM_SMALL_NUM ) {
                    forceErr = sqrt( dg_forceErr.at(dg) / ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) );
                } else {
                    // If both external forces and internal ebe norms are zero, then the residual must be zero.
                    //zeroNorm = true; // Warning about this afterwards.
                    zeroFNorm = true;
                    forceErr = sqrt( dg_forceErr.at(dg) );
                }

                if ( forceErr > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) {
                    errorOutOfRange = true;
                }
                if ( forceErr > rtolf.at(1) ) {
                    answer = false;
                }
                OOFEM_LOG_INFO( zeroFNorm ? " *%.3e" : "  %.3e", forceErr );
            }

            if ( rtold.at(1) > 0.0 ) {
                // compute displacement error
                if ( dg_totalDisp.at(dg) >  nrsolver_ERROR_NORM_SMALL_NUM ) {
                    dispErr = sqrt( dg_dispErr.at(dg) / dg_totalDisp.at(dg) );
                } else {
                    ///@todo This is almost always the case for displacement error. nrsolveR_ERROR_NORM_SMALL_NUM is no good.
                    //zeroNorm = true; // Warning about this afterwards.
                    //zeroDNorm = true;
                    dispErr = sqrt( dg_dispErr.at(dg) );
                }
                if ( dispErr  > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) {
                    errorOutOfRange = true;
                }
                if ( dispErr > rtold.at(1) ) {
                    answer = false;
                }
                OOFEM_LOG_INFO( zeroDNorm ? " *%.3e" : "  %.3e", dispErr );
            }
        }
        OOFEM_LOG_INFO("\n");
        //if ( zeroNorm ) OOFEM_WARNING("NRSolver :: checkConvergence - Had to resort to absolute error measure (marked by *)");
    } else { // No dof grouping
        double dXX, dXdX;
        
        if ( engngModel->giveProblemScale() == macroScale ) {
            OOFEM_LOG_INFO("NRSolver:     %-15d", nite);
        } else {
            OOFEM_LOG_INFO("  NRSolver:     %-15d", nite);
        }

 #ifdef __PARALLEL_MODE
        forceErr = parallel_context->norm(rhs); forceErr *= forceErr;
        dXX = parallel_context->localNorm(X); dXX *= dXX; // Note: Solutions are always total global values (natural distribution makes little sense for the solution)
        dXdX = parallel_context->localNorm(ddX); dXdX *= dXdX;
 #else
        forceErr = rhs.computeSquaredNorm();
        dXX = X.computeSquaredNorm();
        dXdX = ddX.computeSquaredNorm();
 #endif
        if ( rtolf.at(1) > 0.0 ) {
            // we compute a relative error norm
            if ( ( RRT + internalForcesEBENorm.at(1) ) > nrsolver_ERROR_NORM_SMALL_NUM ) {
                forceErr = sqrt( forceErr / ( RRT + internalForcesEBENorm.at(1) ) );
            } else {
                forceErr = sqrt( forceErr ); // absolute norm as last resort
            }
            if ( fabs(forceErr) > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) {
                errorOutOfRange = true;
            }
            if ( fabs(forceErr) > rtolf.at(1) ) {
                answer = false;
            }
            OOFEM_LOG_INFO(" %-15e", forceErr);
        }

        if ( rtold.at(1) > 0.0 ) {
            // compute displacement error
            // err is relative displacement change
            if ( dXX > nrsolver_ERROR_NORM_SMALL_NUM ) {
                dispErr = sqrt( dXdX / dXX );
            } else {
                dispErr = sqrt( dXdX );
            }
            if ( fabs(dispErr)  > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) {
                errorOutOfRange = true;
            }
            if ( fabs(dispErr)  > rtold.at(1) ) {
                answer = false;
            }
            OOFEM_LOG_INFO(" %-15e", dispErr);
        }

        OOFEM_LOG_INFO("\n");
    } // end default case (all dofs contributing)

    return answer;
}