int StructuralEngngModel :: packDofManagers(FloatArray *src, ProcessCommunicator &processComm, bool prescribedEquations) { int result = 1; int i, size; int j, ndofs, eqNum; Domain *domain = this->giveDomain(1); IntArray const *toSendMap = processComm.giveToSendMap(); ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff(); DofManager *dman; Dof *jdof; size = toSendMap->giveSize(); for ( i = 1; i <= size; i++ ) { dman = domain->giveDofManager( toSendMap->at(i) ); ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { jdof = dman->giveDof(j); if ( prescribedEquations ) { eqNum = jdof->__givePrescribedEquationNumber(); } else { eqNum = jdof->__giveEquationNumber(); } if ( jdof->isPrimaryDof() && eqNum ) { result &= pcbuff->packDouble( src->at(eqNum) ); } } } return result; }
void LumpedMassElement :: giveDofManDofIDMask(int inode, EquationID eid, IntArray &answer) const { answer.resize(0, 6); DofManager *dman = this->giveDofManager(inode); int _i, _ndof = dman->giveNumberOfDofs(); DofIDItem _dofid; // simply collect all "structural" dofs of element node for ( _i = 1; _i <= _ndof; _i++ ) { _dofid = dman->giveDof(_i)->giveDofID(); if ( ( _dofid == D_u ) || ( _dofid == D_v ) || ( _dofid == D_w ) || ( _dofid == R_u ) || ( _dofid == R_v ) || ( _dofid == R_w ) ) { answer.followedBy(_dofid); } } }
void PetscNatural2LocalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et) { Domain *d = emodel->giveDomain(di); int i, j, n_eq = 0, ndofs, ndofman = d->giveNumberOfDofManagers(), loc_eq = 1; bool lFlag; DofManager *dman; EModelDefaultEquationNumbering dn; EModelDefaultPrescribedEquationNumbering dpn; // determine number of local eqs + number of those shared DOFs which are numbered by receiver // shared dofman is numbered on partition with lovest rank number if ( et == et_standard ) { n2l.resize( emodel->giveNumberOfEquations(ut) ); } else { n2l.resize( emodel->giveNumberOfPrescribedEquations(ut) ); } for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); lFlag = isLocal(dman); ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { if ( et == et_standard ) { n_eq = dman->giveDof(j)->giveEquationNumber(dn); } else { n_eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( n_eq == 0 ) { continue; } if ( lFlag ) { n2l.at(n_eq) = loc_eq++; } else { n2l.at(n_eq) = 0; } } } } }
int LumpedMassElement :: computeNumberOfDofs(EquationID ut) { DofManager *dman = this->giveDofManager(1); int _i, _ndof = dman->giveNumberOfDofs(); int answer = 0; DofIDItem _dofid; // simply count all "structural" dofs of element node for ( _i = 1; _i <= _ndof; _i++ ) { _dofid = dman->giveDof(_i)->giveDofID(); if ( ( _dofid == D_u ) || ( _dofid == D_v ) || ( _dofid == D_w ) || ( _dofid == R_u ) || ( _dofid == R_v ) || ( _dofid == R_w ) ) { answer++; } } return answer; }
void StructuralInterfaceElementPhF :: computeLocationArrayOfDofIDs( const IntArray &dofIdArray, IntArray &answer ) { // Routine to compute the local ordering array an element given a dofid array. answer.resize( 0 ); int k = 0; for(int i = 1; i <= this->giveNumberOfDofManagers(); i++) { DofManager *dMan = this->giveDofManager( i ); for(int j = 1; j <= dofIdArray.giveSize( ); j++) { if(dMan->hasDofID( (DofIDItem) dofIdArray.at( j ) )) { // hack answer.followedBy( k + j ); } } k += dMan->giveNumberOfDofs( ); } }
void PhaseFieldElement :: computeLocationArrayOfDofIDs( const IntArray &dofIdArray, IntArray &answer ) { // Routine to extract compute the location array an element given an dofid array. answer.clear(); NLStructuralElement *el = this->giveElement(); int k = 0; for ( int i = 1; i <= el->giveNumberOfDofManagers(); i++ ) { DofManager *dMan = el->giveDofManager( i ); for ( int j = 1; j <= dofIdArray.giveSize( ); j++ ) { if ( dMan->hasDofID( (DofIDItem) dofIdArray.at( j ) ) ) { Dof *d = dMan->giveDofWithID( dofIdArray.at( j ) ); answer.followedBy( k + d->giveNumber( ) ); } } k += dMan->giveNumberOfDofs( ); } }
int StructuralEngngModel :: unpackDofManagers(FloatArray *dest, ProcessCommunicator &processComm, bool prescribedEquations) { int result = 1; int i, size; int j, ndofs, eqNum; Domain *domain = this->giveDomain(1); dofManagerParallelMode dofmanmode; IntArray const *toRecvMap = processComm.giveToRecvMap(); ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff(); DofManager *dman; Dof *jdof; double value; size = toRecvMap->giveSize(); for ( i = 1; i <= size; i++ ) { dman = domain->giveDofManager( toRecvMap->at(i) ); ndofs = dman->giveNumberOfDofs(); dofmanmode = dman->giveParallelMode(); for ( j = 1; j <= ndofs; j++ ) { jdof = dman->giveDof(j); if ( prescribedEquations ) { eqNum = jdof->__givePrescribedEquationNumber(); } else { eqNum = jdof->__giveEquationNumber(); } if ( jdof->isPrimaryDof() && eqNum ) { result &= pcbuff->unpackDouble(value); if ( dofmanmode == DofManager_shared ) { dest->at(eqNum) += value; } else if ( dofmanmode == DofManager_remote ) { dest->at(eqNum) = value; } else { _error("unpackReactions: unknown dof namager parallel mode"); } } } } return result; }
void CoupledFieldsElement :: computeLocationArrayOfDofIDs(const IntArray &dofIdArray, IntArray &answer) { // Routine to extract compute the location array an element given an dofid array. answer.resize(0); int k = 0; for ( int i = 1; i <= numberOfDofMans; i++ ) { DofManager *dMan = this->giveDofManager(i); for (int j = 1; j <= dofIdArray.giveSize(); j++ ) { if ( dMan->hasDofID( (DofIDItem) dofIdArray.at(j) ) ) { Dof *d = dMan->giveDofWithID( dofIdArray.at(j) ); answer.followedBy( k + d->giveNumber() ); //answer.followedBy( k + j ); } } k += dMan->giveNumberOfDofs( ); } }
void StructuralEngngModel :: buildReactionTable(IntArray &restrDofMans, IntArray &restrDofs, IntArray &eqn, TimeStep *tStep, int di) { // determine number of restrained dofs Domain *domain = this->giveDomain(di); int numRestrDofs = this->giveNumberOfPrescribedDomainEquations(di, EID_MomentumBalance); int ndofMan = domain->giveNumberOfDofManagers(); int i, j, indofs, rindex, count = 0; DofManager *inode; Dof *jdof; // initialize corresponding dofManagers and dofs for each restrained dof restrDofMans.resize(numRestrDofs); restrDofs.resize(numRestrDofs); eqn.resize(numRestrDofs); for ( i = 1; i <= ndofMan; i++ ) { inode = domain->giveDofManager(i); indofs = inode->giveNumberOfDofs(); for ( j = 1; j <= indofs; j++ ) { jdof = inode->giveDof(j); if ( ( jdof->giveClassID() != SimpleSlaveDofClass ) && ( jdof->hasBc(tStep) ) ) { // skip slave dofs rindex = jdof->__givePrescribedEquationNumber(); if ( rindex ) { count++; restrDofMans.at(count) = i; restrDofs.at(count) = j; eqn.at(count) = rindex; } else { // NullDof has no equation number and no prescribed equation number //_error ("No prescribed equation number assigned to supported DOF"); } } } } }
void PetscNatural2GlobalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et) { Domain *d = emodel->giveDomain(di); int i, j, k, p, ndofs, ndofman = d->giveNumberOfDofManagers(); int myrank = emodel->giveRank(); DofManager *dman; // determine number of local eqs + number of those shared DOFs which are numbered by receiver // shared dofman is numbered on partition with lovest rank number EModelDefaultEquationNumbering dn; EModelDefaultPrescribedEquationNumbering dpn; #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "initializing N2G ordering", myrank); #endif l_neqs = 0; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); /* * if (dman->giveParallelMode() == DofManager_local) { // count all dofman eqs * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * if (dman->giveDof(j)->giveEquationNumber()) l_neqs++; * } * } * } else if (dman->giveParallelMode() == DofManager_shared) { * // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to * // deliver number * IntArray *plist = dman->givePartitionList(); * int n = plist->giveSize(); * int minrank = myrank; * for (j=1; j<=n; j++) minrank = min (minrank, plist->at(j)); * if (minrank == myrank) { // count eqs * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * if (dman->giveDof(j)->giveEquationNumber()) l_neqs++; * } * } * } * } // end shared dman */ if ( isLocal(dman) ) { ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { if ( et == et_standard ) { if ( dman->giveDof(j)->giveEquationNumber(dn) ) { l_neqs++; } } else { if ( dman->giveDof(j)->giveEquationNumber(dpn) ) { l_neqs++; } } } } } } // exchange with other procs the number of eqs numbered on particular procs int *leqs = new int [ emodel->giveNumberOfProcesses() ]; MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD); // compute local offset int offset = 0; for ( j = 0; j < myrank; j++ ) { offset += leqs [ j ]; } // count global number of eqs for ( g_neqs = 0, j = 0; j < emodel->giveNumberOfProcesses(); j++ ) { g_neqs += leqs [ j ]; } // send numbered shared ones if ( et == et_standard ) { locGlobMap.resize( emodel->giveNumberOfEquations(ut) ); } else { locGlobMap.resize( emodel->giveNumberOfPrescribedEquations(ut) ); } // determine shared dofs int psize, nproc = emodel->giveNumberOfProcesses(); IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc); #ifdef __VERBOSE_PARALLEL IntArray nrecToSend(nproc); #endif const IntArray *plist; for ( i = 1; i <= ndofman; i++ ) { // if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) { if ( isShared( d->giveDofManager(i) ) ) { int n = d->giveDofManager(i)->giveNumberOfDofs(); plist = d->giveDofManager(i)->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // count to send for ( j = 1; j <= psize; j++ ) { #ifdef __VERBOSE_PARALLEL nrecToSend( plist->at(j) )++; #endif sizeToSend( plist->at(j) ) += ( 1 + n ); // ndofs+dofman number } } else { nrecToReceive(minrank)++; sizeToRecv(minrank) += ( 1 + n ); // ndofs+dofman number } } } #ifdef __VERBOSE_PARALLEL for ( i = 0; i < nproc; i++ ) { OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n", myrank, nrecToSend(i), nrecToReceive(i), i); } #endif std :: map< int, int >globloc; // global->local mapping for shared // number local guys int globeq = offset; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); //if (dman->giveParallelMode() == DofManager_shared) { if ( isShared(dman) ) { globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared plist = dman->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // local ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { locGlobMap.at(eq) = globeq++; } } } } //} else if (dman->giveParallelMode() == DofManager_local) { } else { ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { locGlobMap.at(eq) = globeq++; } } } } } /* * fprintf (stderr, "[%d] locGlobMap: ", myrank); * for (i=1; i<=locGlobMap.giveSize(); i++) * fprintf (stderr, "%d ",locGlobMap.at(i)); */ // pack data for remote procs CommunicationBuffer **buffs = new CommunicationBuffer * [ nproc ]; for ( p = 0; p < nproc; p++ ) { buffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0); buffs [ p ]->resize( buffs [ p ]->givePackSize(MPI_INT, 1) * sizeToSend(p) ); #if 0 OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Send buffer[%d] size %d\n", myrank, p, sizeToSend(p) ); #endif } for ( i = 1; i <= ndofman; i++ ) { if ( isShared( d->giveDofManager(i) ) ) { dman = d->giveDofManager(i); plist = dman->givePartitionList(); psize = plist->giveSize(); int minrank = myrank; for ( j = 1; j <= psize; j++ ) { minrank = min( minrank, plist->at(j) ); } if ( minrank == myrank ) { // do send for ( j = 1; j <= psize; j++ ) { p = plist->at(j); if ( p == myrank ) { continue; } #if 0 OOFEM_LOG_INFO("[%d]PetscN2G:: init: Sending localShared node %d[%d] to proc %d\n", myrank, i, dman->giveGlobalNumber(), p); #endif buffs [ p ]->packInt( dman->giveGlobalNumber() ); ndofs = dman->giveNumberOfDofs(); for ( k = 1; k <= ndofs; k++ ) { if ( dman->giveDof(k)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(k)->giveEquationNumber(dn); } else { eq = dman->giveDof(k)->giveEquationNumber(dpn); } if ( eq ) { buffs [ p ]->packInt( locGlobMap.at(eq) ); } } } } } } } //fprintf (stderr, "[%d] Sending glob nums ...", myrank); // send buffers for ( p = 0; p < nproc; p++ ) { if ( p != myrank ) { buffs [ p ]->iSend(p, 999); } } /**** * * for (p=0; p<nproc; p++) { * if (p == myrank) continue; * for (i=1; i<= ndofman; i++) { * //if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) { * if (isShared(d->giveDofManager(i))) { * dman = d->giveDofManager(i); * plist = dman->givePartitionList(); * psize = plist->giveSize(); * int minrank = myrank; * for (j=1; j<=psize; j++) minrank = min (minrank, plist->at(j)); * if (minrank == myrank) { // do send * buffs[p]->packInt(dman->giveGlobalNumber()); * ndofs = dman->giveNumberOfDofs (); * for (j=1; j<=ndofs; j++) { * if (dman->giveDof(j)->isPrimaryDof()) { * buffs[p]->packInt(locGlobMap.at(dman->giveDof(j)->giveEquationNumber())); * } * } * } * } * } * // send buffer * buffs[p]->iSend(p, 999); * } ****/ // receive remote eqs and complete global numbering CommunicationBuffer **rbuffs = new CommunicationBuffer * [ nproc ]; for ( p = 0; p < nproc; p++ ) { rbuffs [ p ] = new StaticCommunicationBuffer(MPI_COMM_WORLD, 0); rbuffs [ p ]->resize( rbuffs [ p ]->givePackSize(MPI_INT, 1) * sizeToRecv(p) ); #if 0 OOFEM_LOG_INFO( "[%d]PetscN2G:: init: Receive buffer[%d] size %d\n", myrank, p, sizeToRecv(p) ); #endif } //fprintf (stderr, "[%d] Receiving glob nums ...", myrank); for ( p = 0; p < nproc; p++ ) { if ( p != myrank ) { rbuffs [ p ]->iRecv(p, 999); } } IntArray finished(nproc); finished.zero(); int fin = 1; finished.at(emodel->giveRank() + 1) = 1; do { for ( p = 0; p < nproc; p++ ) { if ( finished.at(p + 1) == 0 ) { if ( rbuffs [ p ]->testCompletion() ) { // data are here // unpack them int nite = nrecToReceive(p); int shdm, ldm; for ( i = 1; i <= nite; i++ ) { rbuffs [ p ]->unpackInt(shdm); #if 0 OOFEM_LOG_INFO("[%d]PetscN2G:: init: Received shared node [%d] from proc %d\n", myrank, shdm, p); #endif // // find local guy coorecponding to shdm if ( globloc.find(shdm) != globloc.end() ) { ldm = globloc [ shdm ]; } else { OOFEM_ERROR3("[%d] PetscNatural2GlobalOrdering :: init: invalid shared dofman received, globnum %d\n", myrank, shdm); } dman = d->giveDofManager(ldm); ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( dman->giveDof(j)->isPrimaryDof() ) { int eq; if ( et == et_standard ) { eq = dman->giveDof(j)->giveEquationNumber(dn); } else { eq = dman->giveDof(j)->giveEquationNumber(dpn); } if ( eq ) { int val; rbuffs [ p ]->unpackInt(val); locGlobMap.at(eq) = val; } } } } finished.at(p + 1) = 1; fin++; } } } } while ( fin < nproc ); /* * fprintf (stderr, "[%d] Finished receiving glob nums ...", myrank); * * fprintf (stderr, "[%d] locGlobMap:", myrank); * for (i=1; i<=locGlobMap.giveSize(); i++) * fprintf (stderr, "%d ",locGlobMap.at(i)); */ #ifdef __VERBOSE_PARALLEL if ( et == et_standard ) { int _eq; char *ptr; char *locname = "local", *shname = "shared", *unkname = "unknown"; for ( i = 1; i <= ndofman; i++ ) { dman = d->giveDofManager(i); if ( dman->giveParallelMode() == DofManager_local ) { ptr = locname; } else if ( dman->giveParallelMode() == DofManager_shared ) { ptr = shname; } else { ptr = unkname; } ndofs = dman->giveNumberOfDofs(); for ( j = 1; j <= ndofs; j++ ) { if ( ( _eq = dman->giveDof(j)->giveEquationNumber(dn) ) ) { fprintf( stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, locGlobMap.at(_eq) ); } else { fprintf(stderr, "[%d] n:%6s %d[%d] (%d), leq = %d, geq = %d\n", emodel->giveRank(), ptr, i, dman->giveGlobalNumber(), j, _eq, 0); } } } } #endif // build reverse map int lneq; if ( et == et_standard ) { lneq = emodel->giveNumberOfEquations(ut); } else { lneq = emodel->giveNumberOfPrescribedEquations(ut); } globLocMap.clear(); for ( i = 1; i <= lneq; i++ ) { globLocMap [ locGlobMap.at(i) ] = i; } for ( p = 0; p < nproc; p++ ) { delete rbuffs [ p ]; delete buffs [ p ]; } delete[] rbuffs; delete[] buffs; delete[] leqs; MPI_Barrier(MPI_COMM_WORLD); #ifdef __VERBOSE_PARALLEL VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "done", myrank); #endif }
bool NRSolver :: checkConvergence(FloatArray &RT, FloatArray &F, FloatArray &rhs, FloatArray &ddX, FloatArray &X, double RRT, const FloatArray &internalForcesEBENorm, int nite, bool &errorOutOfRange, TimeStep *tNow) { double forceErr, dispErr; FloatArray dg_forceErr, dg_dispErr, dg_totalLoadLevel, dg_totalDisp; bool answer; EModelDefaultEquationNumbering dn; #ifdef __PARALLEL_MODE #ifdef __PETSC_MODULE PetscContext *parallel_context = engngModel->givePetscContext(this->domain->giveNumber()); Natural2LocalOrdering *n2l = parallel_context->giveN2Lmap(); #endif #endif /* * The force errors are (if possible) evaluated as relative errors. * If the norm of applied load vector is zero (one may load by temperature, etc) * then the norm of reaction forces is used in relative norm evaluation. * * Note: This is done only when all dofs are included (nccdg = 0). Not implemented if * multiple convergence criteria are used. * */ answer = true; errorOutOfRange = false; if ( internalForcesEBENorm.giveSize() > 1 ) { // Special treatment when just one norm is given; No grouping int nccdg = this->domain->giveMaxDofID(); // Keeps tracks of which dof IDs are actually in use; IntArray idsInUse(nccdg); idsInUse.zero(); // zero error norms per group dg_forceErr.resize(nccdg); dg_forceErr.zero(); dg_dispErr.resize(nccdg); dg_dispErr.zero(); dg_totalLoadLevel.resize(nccdg); dg_totalLoadLevel.zero(); dg_totalDisp.resize(nccdg); dg_totalDisp.zero(); // loop over dof managers int ndofman = domain->giveNumberOfDofManagers(); for ( int idofman = 1; idofman <= ndofman; idofman++ ) { DofManager *dofman = domain->giveDofManager(idofman); #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( !parallel_context->isLocal(dofman) ) { continue; } #endif // loop over individual dofs int ndof = dofman->giveNumberOfDofs(); for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over dof managers // loop over elements and their DOFs int nelem = domain->giveNumberOfElements(); for ( int ielem = 1; ielem <= nelem; ielem++ ) { Element *elem = domain->giveElement(ielem); #ifdef __PARALLEL_MODE if ( elem->giveParallelMode() != Element_local ) { continue; } #endif // loop over element internal Dofs for ( int idofman = 1; idofman <= elem->giveNumberOfInternalDofManagers(); idofman++) { DofManager *dofman = elem->giveInternalDofManager(idofman); int ndof = dofman->giveNumberOfDofs(); // loop over individual dofs for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue; #endif dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over element internal dofmans } // end loop over elements // loop over boundary conditions and their internal DOFs for ( int ibc = 1; ibc <= domain->giveNumberOfBoundaryConditions(); ibc++ ) { GeneralBoundaryCondition *bc = domain->giveBc(ibc); // loop over element internal Dofs for ( int idofman = 1; idofman <= bc->giveNumberOfInternalDofManagers(); idofman++) { DofManager *dofman = bc->giveInternalDofManager(idofman); int ndof = dofman->giveNumberOfDofs(); // loop over individual dofs for ( int idof = 1; idof <= ndof; idof++ ) { Dof *dof = dofman->giveDof(idof); if ( !dof->isPrimaryDof() ) continue; int eq = dof->giveEquationNumber(dn); int dofid = dof->giveDofID(); if ( !eq ) continue; #if ( defined ( __PARALLEL_MODE ) && defined ( __PETSC_MODULE ) ) if ( engngModel->isParallel() && !n2l->giveNewEq(eq) ) continue; #endif dg_forceErr.at(dofid) += rhs.at(eq) * rhs.at(eq); dg_dispErr.at(dofid) += ddX.at(eq) * ddX.at(eq); dg_totalLoadLevel.at(dofid) += RT.at(eq) * RT.at(eq); dg_totalDisp.at(dofid) += X.at(eq) * X.at(eq); idsInUse.at(dofid) = 1; } // end loop over DOFs } // end loop over element internal dofmans } // end loop over elements #ifdef __PARALLEL_MODE // exchange individual partition contributions (simultaneously for all groups) #ifdef __PETSC_MODULE FloatArray collectiveErr(nccdg); parallel_context->accumulate(dg_forceErr, collectiveErr); dg_forceErr = collectiveErr; parallel_context->accumulate(dg_dispErr, collectiveErr); dg_dispErr = collectiveErr; parallel_context->accumulate(dg_totalLoadLevel, collectiveErr); dg_totalLoadLevel = collectiveErr; parallel_context->accumulate(dg_totalDisp, collectiveErr); dg_totalDisp = collectiveErr; #else if ( this->engngModel->isParallel() ) { FloatArray collectiveErr(nccdg); MPI_Allreduce(dg_forceErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_forceErr = collectiveErr; MPI_Allreduce(dg_dispErr.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_dispErr = collectiveErr; MPI_Allreduce(dg_totalLoadLevel.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_totalLoadLevel = collectiveErr; MPI_Allreduce(dg_totalDisp.givePointer(), collectiveErr.givePointer(), nccdg, MPI_DOUBLE, MPI_SUM, comm); dg_totalDisp = collectiveErr; return globalNorm; } #endif #endif OOFEM_LOG_INFO("NRSolver: %-5d", nite); //bool zeroNorm = false; // loop over dof groups and check convergence individually for ( int dg = 1; dg <= nccdg; dg++ ) { bool zeroFNorm = false, zeroDNorm = false; // Skips the ones which aren't used in this problem (the residual will be zero for these anyway, but it is annoying to print them all) if ( !idsInUse.at(dg) ) { continue; } OOFEM_LOG_INFO( " %s:", __DofIDItemToString((DofIDItem)dg).c_str() ); if ( rtolf.at(1) > 0.0 ) { // compute a relative error norm if ( ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) > nrsolver_ERROR_NORM_SMALL_NUM ) { forceErr = sqrt( dg_forceErr.at(dg) / ( dg_totalLoadLevel.at(dg) + internalForcesEBENorm.at(dg) ) ); } else { // If both external forces and internal ebe norms are zero, then the residual must be zero. //zeroNorm = true; // Warning about this afterwards. zeroFNorm = true; forceErr = sqrt( dg_forceErr.at(dg) ); } if ( forceErr > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( forceErr > rtolf.at(1) ) { answer = false; } OOFEM_LOG_INFO( zeroFNorm ? " *%.3e" : " %.3e", forceErr ); } if ( rtold.at(1) > 0.0 ) { // compute displacement error if ( dg_totalDisp.at(dg) > nrsolver_ERROR_NORM_SMALL_NUM ) { dispErr = sqrt( dg_dispErr.at(dg) / dg_totalDisp.at(dg) ); } else { ///@todo This is almost always the case for displacement error. nrsolveR_ERROR_NORM_SMALL_NUM is no good. //zeroNorm = true; // Warning about this afterwards. //zeroDNorm = true; dispErr = sqrt( dg_dispErr.at(dg) ); } if ( dispErr > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( dispErr > rtold.at(1) ) { answer = false; } OOFEM_LOG_INFO( zeroDNorm ? " *%.3e" : " %.3e", dispErr ); } } OOFEM_LOG_INFO("\n"); //if ( zeroNorm ) OOFEM_WARNING("NRSolver :: checkConvergence - Had to resort to absolute error measure (marked by *)"); } else { // No dof grouping double dXX, dXdX; if ( engngModel->giveProblemScale() == macroScale ) { OOFEM_LOG_INFO("NRSolver: %-15d", nite); } else { OOFEM_LOG_INFO(" NRSolver: %-15d", nite); } #ifdef __PARALLEL_MODE forceErr = parallel_context->norm(rhs); forceErr *= forceErr; dXX = parallel_context->localNorm(X); dXX *= dXX; // Note: Solutions are always total global values (natural distribution makes little sense for the solution) dXdX = parallel_context->localNorm(ddX); dXdX *= dXdX; #else forceErr = rhs.computeSquaredNorm(); dXX = X.computeSquaredNorm(); dXdX = ddX.computeSquaredNorm(); #endif if ( rtolf.at(1) > 0.0 ) { // we compute a relative error norm if ( ( RRT + internalForcesEBENorm.at(1) ) > nrsolver_ERROR_NORM_SMALL_NUM ) { forceErr = sqrt( forceErr / ( RRT + internalForcesEBENorm.at(1) ) ); } else { forceErr = sqrt( forceErr ); // absolute norm as last resort } if ( fabs(forceErr) > rtolf.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( fabs(forceErr) > rtolf.at(1) ) { answer = false; } OOFEM_LOG_INFO(" %-15e", forceErr); } if ( rtold.at(1) > 0.0 ) { // compute displacement error // err is relative displacement change if ( dXX > nrsolver_ERROR_NORM_SMALL_NUM ) { dispErr = sqrt( dXdX / dXX ); } else { dispErr = sqrt( dXdX ); } if ( fabs(dispErr) > rtold.at(1) * NRSOLVER_MAX_REL_ERROR_BOUND ) { errorOutOfRange = true; } if ( fabs(dispErr) > rtold.at(1) ) { answer = false; } OOFEM_LOG_INFO(" %-15e", dispErr); } OOFEM_LOG_INFO("\n"); } // end default case (all dofs contributing) return answer; }
void DEIDynamic :: solveYourselfAt(TimeStep *tStep) { // // creates system of governing eq's and solves them at given time step // // this is an explicit problem: we assemble governing equating at time t // and solution is obtained for time t+dt // // first assemble problem at current time step to obtain results in following // time step. // and then print results for this step also. // for first time step we need special start code Domain *domain = this->giveDomain(1); int nelem = domain->giveNumberOfElements(); int nman = domain->giveNumberOfDofManagers(); IntArray loc; Element *element; DofManager *node; Dof *iDof; int nDofs, neq; int i, k, n, j, jj, kk, init = 0; double coeff, maxDt, maxOmi, maxOm = 0., maxOmEl, c1, c2, c3; FloatMatrix charMtrx, charMtrx2; FloatArray previousDisplacementVector; neq = this->giveNumberOfEquations(EID_MomentumBalance); if ( tStep->giveNumber() == giveNumberOfFirstStep() ) { init = 1; #ifdef VERBOSE OOFEM_LOG_INFO("Assembling mass matrix\n"); #endif // // first step assemble mass Matrix // massMatrix.resize(neq); massMatrix.zero(); EModelDefaultEquationNumbering dn; for ( i = 1; i <= nelem; i++ ) { element = domain->giveElement(i); element->giveLocationArray(loc, EID_MomentumBalance, dn); element->giveCharacteristicMatrix(charMtrx, LumpedMassMatrix, tStep); // charMtrx.beLumpedOf(fullCharMtrx); element->giveCharacteristicMatrix(charMtrx2, StiffnessMatrix, tStep); // // assemble it manually // #ifdef DEBUG if ( ( n = loc.giveSize() ) != charMtrx.giveNumberOfRows() ) { _error("solveYourselfAt : dimension mismatch"); } #endif n = loc.giveSize(); maxOmEl = 0.; for ( j = 1; j <= n; j++ ) { if ( charMtrx.at(j, j) > ZERO_MASS ) { maxOmi = charMtrx2.at(j, j) / charMtrx.at(j, j); if ( init ) { maxOmEl = ( maxOmEl > maxOmi ) ? ( maxOmEl ) : ( maxOmi ); } } } maxOm = ( maxOm > maxOmEl ) ? ( maxOm ) : ( maxOmEl ); for ( j = 1; j <= n; j++ ) { jj = loc.at(j); if ( ( jj ) && ( charMtrx.at(j, j) <= ZERO_MASS ) ) { charMtrx.at(j, j) = charMtrx2.at(j, j) / maxOmEl; } } for ( j = 1; j <= n; j++ ) { jj = loc.at(j); if ( jj ) { massMatrix.at(jj) += charMtrx.at(j, j); } } } // if init - try to determine the best deltaT if ( init ) { maxDt = 2 / sqrt(maxOm); if ( deltaT > maxDt ) { OOFEM_LOG_RELEVANT("DEIDynamic: deltaT reduced to %e\n", maxDt); deltaT = maxDt; tStep->setTimeIncrement(deltaT); } } // // special init step - compute displacements at tstep 0 // displacementVector.resize(neq); displacementVector.zero(); nextDisplacementVector.resize(neq); nextDisplacementVector.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); for ( j = 1; j <= nman; j++ ) { node = domain->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions) // now we are setting initial cond. for step -1. iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { nextDisplacementVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Total, tStep); // become displacementVector after init velocityVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Velocity, tStep); // accelerationVector = iDof->giveUnknown(AccelerartionVector,tStep) ; } } } for ( j = 1; j <= neq; j++ ) { nextDisplacementVector.at(j) -= velocityVector.at(j) * ( deltaT ); } return; } // end of init step #ifdef VERBOSE OOFEM_LOG_INFO("Assembling right hand side\n"); #endif c1 = ( 1. / ( deltaT * deltaT ) ); c2 = ( 1. / ( 2. * deltaT ) ); c3 = ( 2. / ( deltaT * deltaT ) ); previousDisplacementVector = displacementVector; displacementVector = nextDisplacementVector; // // assembling the element part of load vector // loadVector.resize( this->giveNumberOfEquations(EID_MomentumBalance) ); loadVector.zero(); this->assembleVector(loadVector, tStep, EID_MomentumBalance, ExternalForcesVector, VM_Total, EModelDefaultEquationNumbering(), domain); // // assembling additional parts of right hand side // EModelDefaultEquationNumbering dn; for ( i = 1; i <= nelem; i++ ) { element = domain->giveElement(i); element->giveLocationArray(loc, EID_MomentumBalance, dn); element->giveCharacteristicMatrix(charMtrx, StiffnessMatrix, tStep); n = loc.giveSize(); for ( j = 1; j <= n; j++ ) { jj = loc.at(j); if ( jj ) { for ( k = 1; k <= n; k++ ) { kk = loc.at(k); if ( kk ) { loadVector.at(jj) -= charMtrx.at(j, k) * displacementVector.at(kk); } } // // if init step - find minimum period of vibration in order to // determine maximal admissible time step // //maxOmi = charMtrx.at(j,j)/massMatrix.at(jj) ; //if (init) maxOm = (maxOm > maxOmi) ? (maxOm) : (maxOmi) ; } } } for ( j = 1; j <= neq; j++ ) { coeff = massMatrix.at(j); loadVector.at(j) += coeff * c3 * displacementVector.at(j) - coeff * ( c1 - dumpingCoef * c2 ) * previousDisplacementVector.at(j); } // // set-up numerical model // /* it is not necessary to call numerical method * approach used here is not good, but effective enough * inverse of diagonal mass matrix is done here */ // // call numerical model to solve arose problem - done locally here // #ifdef VERBOSE OOFEM_LOG_RELEVANT( "Solving [step number %8d, time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif double prevD; for ( i = 1; i <= neq; i++ ) { prevD = previousDisplacementVector.at(i); nextDisplacementVector.at(i) = loadVector.at(i) / ( massMatrix.at(i) * ( c1 + dumpingCoef * c2 ) ); velocityVector.at(i) = nextDisplacementVector.at(i) - prevD; accelerationVector.at(i) = nextDisplacementVector.at(i) - 2. * displacementVector.at(i) + prevD; } accelerationVector.times(c1); velocityVector.times(c2); }
void NlDEIDynamic :: solveYourselfAt(TimeStep *tStep) { // // Creates system of governing eq's and solves them at given time step. // Domain *domain = this->giveDomain(1); int neq = this->giveNumberOfEquations(EID_MomentumBalance); int nman = domain->giveNumberOfDofManagers(); DofManager *node; Dof *iDof; int nDofs; int i, k, j, jj; double coeff, maxDt, maxOm = 0.; double prevIncrOfDisplacement, incrOfDisplacement; if ( initFlag ) { #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling mass matrix\n"); #endif // // Assemble mass matrix. // this->computeMassMtrx(massMatrix, maxOm, tStep); if ( drFlag ) { // If dynamic relaxation: Assemble amplitude load vector. loadRefVector.resize(neq); loadRefVector.zero(); this->computeLoadVector(loadRefVector, VM_Total, tStep); #ifdef __PARALLEL_MODE // Compute the processor part of load vector norm pMp this->pMp = 0.0; double my_pMp = 0.0, coeff = 1.0; int eqNum, ndofs, ndofman = domain->giveNumberOfDofManagers(); dofManagerParallelMode dofmanmode; DofManager *dman; Dof *jdof; for ( int dm = 1; dm <= ndofman; dm++ ) { dman = domain->giveDofManager(dm); ndofs = dman->giveNumberOfDofs(); dofmanmode = dman->giveParallelMode(); // Skip all remote and null dofmanagers coeff = 1.0; if ( ( dofmanmode == DofManager_remote ) || ( ( dofmanmode == DofManager_null ) ) ) { continue; } else if ( dofmanmode == DofManager_shared ) { coeff = 1. / dman->givePartitionsConnectivitySize(); } // For shared nodes we add locally an average = 1/givePartitionsConnectivitySize()*contribution, for ( j = 1; j <= ndofs; j++ ) { jdof = dman->giveDof(j); if ( jdof->isPrimaryDof() && ( eqNum = jdof->__giveEquationNumber() ) ) { my_pMp += coeff * loadRefVector.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum); } } } // Sum up the contributions from processors. MPI_Allreduce(& my_pMp, & pMp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #else this->pMp = 0.0; for ( i = 1; i <= neq; i++ ) { pMp += loadRefVector.at(i) * loadRefVector.at(i) / massMatrix.at(i); } #endif // Solve for rate of loading process (parameter "c") (undamped system assumed), if ( dumpingCoef < 1.e-3 ) { c = 3.0 * this->pyEstimate / pMp / Tau / Tau; } else { c = this->pyEstimate * Tau * dumpingCoef * dumpingCoef * dumpingCoef / pMp / ( -3.0 / 2.0 + dumpingCoef * Tau + 2.0 * exp(-dumpingCoef * Tau) - 0.5 * exp(-2.0 * dumpingCoef * Tau) ); } } initFlag = 0; } if ( tStep->giveNumber() == giveNumberOfFirstStep() ) { // // Special init step - Compute displacements at tstep 0. // displacementVector.resize(neq); displacementVector.zero(); previousIncrementOfDisplacementVector.resize(neq); previousIncrementOfDisplacementVector.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); for ( j = 1; j <= nman; j++ ) { node = domain->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // Ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions) // all dofs are expected to be DisplacementVector type. iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { displacementVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Total, tStep); velocityVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Velocity, tStep); accelerationVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Acceleration, tStep) ; } } } // // Set-up numerical model. // // Try to determine the best deltaT, maxDt = 2.0 / sqrt(maxOm); if ( deltaT > maxDt ) { // Print reduced time step increment and minimum period Tmin OOFEM_LOG_RELEVANT("deltaT reduced to %e, Tmin is %e\n", maxDt, maxDt * M_PI); deltaT = maxDt; tStep->setTimeIncrement(deltaT); } for ( j = 1; j <= neq; j++ ) { previousIncrementOfDisplacementVector.at(j) = velocityVector.at(j) * ( deltaT ); displacementVector.at(j) -= previousIncrementOfDisplacementVector.at(j); } #ifdef VERBOSE OOFEM_LOG_RELEVANT( "\n\nSolving [Step number %8d, Time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif return; } // end of init step #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling right hand side\n"); #endif for ( i = 1; i <= neq; i++ ) { displacementVector.at(i) += previousIncrementOfDisplacementVector.at(i); } // Update solution state counter tStep->incrementStateCounter(); // Compute internal forces. this->giveInternalForces( internalForces, false, 1, tStep ); if ( !drFlag ) { // // Assembling the element part of load vector. // this->computeLoadVector(loadVector, VM_Total, tStep); // // Assembling additional parts of right hand side. // for ( k = 1; k <= neq; k++ ) { loadVector.at(k) -= internalForces.at(k); } } else { // Dynamic relaxation // compute load factor pt = 0.0; #ifdef __PARALLEL_MODE double my_pt = 0.0, coeff = 1.0; int eqNum, ndofs, ndofman = domain->giveNumberOfDofManagers(); dofManagerParallelMode dofmanmode; DofManager *dman; Dof *jdof; for ( int dm = 1; dm <= ndofman; dm++ ) { dman = domain->giveDofManager(dm); ndofs = dman->giveNumberOfDofs(); dofmanmode = dman->giveParallelMode(); // skip all remote and null dofmanagers coeff = 1.0; if ( ( dofmanmode == DofManager_remote ) || ( dofmanmode == DofManager_null ) ) { continue; } else if ( dofmanmode == DofManager_shared ) { coeff = 1. / dman->givePartitionsConnectivitySize(); } // For shared nodes we add locally an average= 1/givePartitionsConnectivitySize()*contribution. for ( j = 1; j <= ndofs; j++ ) { jdof = dman->giveDof(j); if ( jdof->isPrimaryDof() && ( eqNum = jdof->__giveEquationNumber() ) ) { my_pt += coeff * internalForces.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum); } } } // Sum up the contributions from processors. MPI_Allreduce(& my_pt, & pt, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #else for ( k = 1; k <= neq; k++ ) { pt += internalForces.at(k) * loadRefVector.at(k) / massMatrix.at(k); } #endif pt = pt / pMp; if ( dumpingCoef < 1.e-3 ) { pt += c * ( Tau - tStep->giveTargetTime() ) / Tau; } else { pt += c * ( 1.0 - exp( dumpingCoef * ( tStep->giveTargetTime() - Tau ) ) ) / dumpingCoef / Tau; } loadVector.resize( this->giveNumberOfEquations(EID_MomentumBalance) ); for ( k = 1; k <= neq; k++ ) { loadVector.at(k) = pt * loadRefVector.at(k) - internalForces.at(k); } // Compute relative error. double err = 0.0; #ifdef __PARALLEL_MODE double my_err = 0.0; for ( int dm = 1; dm <= ndofman; dm++ ) { dman = domain->giveDofManager(dm); ndofs = dman->giveNumberOfDofs(); dofmanmode = dman->giveParallelMode(); // Skip all remote and null dofmanagers. coeff = 1.0; if ( ( dofmanmode == DofManager_remote ) || ( dofmanmode == DofManager_null ) ) { continue; } else if ( dofmanmode == DofManager_shared ) { coeff = 1. / dman->givePartitionsConnectivitySize(); } // For shared nodes we add locally an average= 1/givePartitionsConnectivitySize()*contribution. for ( j = 1; j <= ndofs; j++ ) { jdof = dman->giveDof(j); if ( jdof->isPrimaryDof() && ( eqNum = jdof->__giveEquationNumber() ) ) { my_err += coeff * loadVector.at(eqNum) * loadVector.at(eqNum) / massMatrix.at(eqNum); } } } // Sum up the contributions from processors. MPI_Allreduce(& my_err, & err, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #else for ( k = 1; k <= neq; k++ ) { err = loadVector.at(k) * loadVector.at(k) / massMatrix.at(k); } #endif err = err / ( pMp * pt * pt ); OOFEM_LOG_RELEVANT("Relative error is %e, loadlevel is %e\n", err, pt); } for ( j = 1; j <= neq; j++ ) { coeff = massMatrix.at(j); loadVector.at(j) += coeff * ( ( 1. / ( deltaT * deltaT ) ) - dumpingCoef * 1. / ( 2. * deltaT ) ) * previousIncrementOfDisplacementVector.at(j); } // // Set-up numerical model // /* it is not necesary to call numerical method * approach used here is not good, but effective enough * inverse of diagonal mass matrix is done here */ // // call numerical model to solve arised problem - done localy here // #ifdef VERBOSE OOFEM_LOG_RELEVANT( "\n\nSolving [Step number %8d, Time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif for ( i = 1; i <= neq; i++ ) { prevIncrOfDisplacement = previousIncrementOfDisplacementVector.at(i); incrOfDisplacement = loadVector.at(i) / ( massMatrix.at(i) * ( 1. / ( deltaT * deltaT ) + dumpingCoef / ( 2. * deltaT ) ) ); accelerationVector.at(i) = ( incrOfDisplacement - prevIncrOfDisplacement ) / ( deltaT * deltaT ); velocityVector.at(i) = ( incrOfDisplacement + prevIncrOfDisplacement ) / ( 2. * deltaT ); previousIncrementOfDisplacementVector.at(i) = incrOfDisplacement; } }
void NonLinearDynamic :: proceedStep(int di, TimeStep *tStep) { // creates system of governing eq's and solves them at given time step // first assemble problem at current time step int neq = this->giveNumberOfEquations(EID_MomentumBalance); // Time-stepping constants double dt2 = deltaT * deltaT; if ( tStep->giveTimeDiscretization() == TD_Newmark ) { OOFEM_LOG_DEBUG("Solving using Newmark-beta method\n"); a0 = 1 / ( beta * dt2 ); a1 = gamma / ( beta * deltaT ); a2 = 1 / ( beta * deltaT ); a3 = 1 / ( 2 * beta ) - 1; a4 = ( gamma / beta ) - 1; a5 = deltaT / 2 * ( gamma / beta - 2 ); a6 = 0; } else if ( ( tStep->giveTimeDiscretization() == TD_TwoPointBackward ) || ( tStep->giveNumber() == giveNumberOfFirstStep() ) ) { if ( tStep->giveTimeDiscretization() != TD_ThreePointBackward ) { OOFEM_LOG_DEBUG("Solving using Backward Euler method\n"); } else { OOFEM_LOG_DEBUG("Solving initial step using Three-point Backward Euler method\n"); } a0 = 1 / dt2; a1 = 1 / deltaT; a2 = 1 / deltaT; a3 = 0; a4 = 0; a5 = 0; a6 = 0; } else if ( tStep->giveTimeDiscretization() == TD_ThreePointBackward ) { OOFEM_LOG_DEBUG("Solving using Three-point Backward Euler method\n"); a0 = 2 / dt2; a1 = 3 / ( 2 * deltaT ); a2 = 2 / deltaT; a3 = 0; a4 = 0; a5 = 0; a6 = 1 / ( 2 * deltaT ); } else { _error("NonLinearDynamic: Time-stepping scheme not found!\n") } if ( tStep->giveNumber() == giveNumberOfFirstStep() ) { // Initialization previousIncrementOfDisplacement.resize(neq); previousIncrementOfDisplacement.zero(); previousTotalDisplacement.resize(neq); previousTotalDisplacement.zero(); totalDisplacement.resize(neq); totalDisplacement.zero(); previousInternalForces.resize(neq); previousInternalForces.zero(); incrementOfDisplacement.resize(neq); incrementOfDisplacement.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); TimeStep *stepWhenIcApply = new TimeStep(giveNumberOfTimeStepWhenIcApply(), this, 0, -deltaT, deltaT, 0); int nDofs, j, k, jj; int nman = this->giveDomain(di)->giveNumberOfDofManagers(); DofManager *node; Dof *iDof; // Considering initial conditions. for ( j = 1; j <= nman; j++ ) { node = this->giveDomain(di)->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // Ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions). iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { incrementOfDisplacement.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Total, stepWhenIcApply); velocityVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Velocity, stepWhenIcApply); accelerationVector.at(jj) = iDof->giveUnknown(EID_MomentumBalance, VM_Acceleration, stepWhenIcApply); } } } } else { incrementOfDisplacement.resize(neq); incrementOfDisplacement.zero(); } if ( initFlag ) { // First assemble problem at current time step. // Option to take into account initial conditions. if ( !stiffnessMatrix ) { stiffnessMatrix = CreateUsrDefSparseMtrx(sparseMtrxType); } if ( stiffnessMatrix == NULL ) { _error("proceedStep: sparse matrix creation failed"); } if ( nonlocalStiffnessFlag ) { if ( !stiffnessMatrix->isAsymmetric() ) { _error("proceedStep: stiffnessMatrix does not support asymmetric storage"); } } stiffnessMatrix->buildInternalStructure( this, di, EID_MomentumBalance, EModelDefaultEquationNumbering() ); // Initialize vectors help.resize(neq); rhs.resize(neq); rhs2.resize(neq); internalForces.resize(neq); help.zero(); rhs.zero(); rhs2.zero(); previousTotalDisplacement.resize(neq); for ( int i = 1; i <= neq; i++ ) { previousTotalDisplacement.at(i) = totalDisplacement.at(i); } initFlag = 0; } #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling load\n"); #endif // Assemble the incremental reference load vector. this->assembleIncrementalReferenceLoadVectors(incrementalLoadVector, incrementalLoadVectorOfPrescribed, refLoadInputMode, this->giveDomain(di), EID_MomentumBalance, tStep); // Assembling the effective load vector for ( int i = 1; i <= neq; i++ ) { help.at(i) = a2 * velocityVector.at(i) + a3 * accelerationVector.at(i) + eta * ( a4 * velocityVector.at(i) + a5 * accelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } this->timesMtrx(help, rhs, MassMatrix, this->giveDomain(di), tStep); if ( delta != 0 ) { for ( int i = 1; i <= neq; i++ ) { help.at(i) = delta * ( a4 * velocityVector.at(i) + a5 * accelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } this->timesMtrx(help, rhs2, StiffnessMatrix, this->giveDomain(di), tStep); help.zero(); for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += rhs2.at(i); } } for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += incrementalLoadVector.at(i) - previousInternalForces.at(i); totalDisplacement.at(i) = previousTotalDisplacement.at(i); } // // Set-up numerical model. // this->giveNumericalMethod( this->giveCurrentMetaStep() ); // // Call numerical model to solve problem. // double loadLevel = 1.0; if ( initialLoadVector.isNotEmpty() ) { numMetStatus = nMethod->solve(stiffnessMatrix, & rhs, & initialLoadVector, & totalDisplacement, & incrementOfDisplacement, & internalForces, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } else { numMetStatus = nMethod->solve(stiffnessMatrix, & rhs, NULL, & totalDisplacement, & incrementOfDisplacement, & internalForces, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } OOFEM_LOG_INFO("Equilibrium reached in %d iterations\n", currentIterations); }
void IncrementalLinearStatic :: solveYourselfAt(TimeStep *tStep) { // Creates system of governing eq's and solves them at given time step // Initiates the total displacement to zero. if ( tStep->isTheFirstStep() ) { Domain *d = this->giveDomain(1); for ( int i = 1; i <= d->giveNumberOfDofManagers(); i++ ) { DofManager *dofman = d->giveDofManager(i); for ( int j = 1; j <= dofman->giveNumberOfDofs(); j++ ) { dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total_Old, 0.); dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total, 0.); // This is actually redundant now; //dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Incremental, 0.); } } int nbc = d->giveNumberOfBoundaryConditions(); for ( int ibc = 1; ibc <= nbc; ++ibc ) { GeneralBoundaryCondition *bc = d->giveBc(ibc); ActiveBoundaryCondition *abc; if ( ( abc = dynamic_cast< ActiveBoundaryCondition * >( bc ) ) ) { int ndman = abc->giveNumberOfInternalDofManagers(); for ( int i = 1; i <= ndman; i++ ) { DofManager *dofman = abc->giveInternalDofManager(i); for ( int j = 1; j <= dofman->giveNumberOfDofs(); j++ ) { dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total_Old, 0.); dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Total, 0.); // This is actually redundant now; //dofman->giveDof(j)->updateUnknownsDictionary(tStep, VM_Incremental, 0.); } } } } } // Apply dirichlet b.c's on total values Domain *d = this->giveDomain(1); for ( int i = 1; i <= d->giveNumberOfDofManagers(); i++ ) { DofManager *dofman = d->giveDofManager(i); for ( int j = 1; j <= dofman->giveNumberOfDofs(); j++ ) { Dof *d = dofman->giveDof(j); double tot = d->giveUnknown(VM_Total_Old, tStep); if ( d->hasBc(tStep) ) { tot += d->giveBcValue(VM_Incremental, tStep); } d->updateUnknownsDictionary(tStep, VM_Total, tot); } } #ifdef VERBOSE OOFEM_LOG_RELEVANT( "Solving [step number %8d, time %15e]\n", tStep->giveNumber(), tStep->giveTargetTime() ); #endif int neq = this->giveNumberOfDomainEquations(1, EModelDefaultEquationNumbering()); if (neq == 0) { // Allows for fully prescribed/empty problems. return; } incrementOfDisplacementVector.resize(neq); incrementOfDisplacementVector.zero(); #ifdef VERBOSE OOFEM_LOG_INFO("Assembling load\n"); #endif // Assembling the element part of load vector internalLoadVector.resize(neq); internalLoadVector.zero(); this->assembleVector( internalLoadVector, tStep, EID_MomentumBalance, InternalForcesVector, VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) ); loadVector.resize(neq); loadVector.zero(); this->assembleVector( loadVector, tStep, EID_MomentumBalance, ExternalForcesVector, VM_Total, EModelDefaultEquationNumbering(), this->giveDomain(1) ); loadVector.subtract(internalLoadVector); #ifdef VERBOSE OOFEM_LOG_INFO("Assembling stiffness matrix\n"); #endif if ( stiffnessMatrix ) { delete stiffnessMatrix; } stiffnessMatrix = classFactory.createSparseMtrx(sparseMtrxType); if ( stiffnessMatrix == NULL ) { _error("solveYourselfAt: sparse matrix creation failed"); } stiffnessMatrix->buildInternalStructure( this, 1, EID_MomentumBalance, EModelDefaultEquationNumbering() ); stiffnessMatrix->zero(); this->assemble( stiffnessMatrix, tStep, EID_MomentumBalance, StiffnessMatrix, EModelDefaultEquationNumbering(), this->giveDomain(1) ); #ifdef VERBOSE OOFEM_LOG_INFO("Solving ...\n"); #endif this->giveNumericalMethod( this->giveCurrentMetaStep() ); NM_Status s = nMethod->solve(stiffnessMatrix, & loadVector, & incrementOfDisplacementVector); if ( !(s & NM_Success) ) { OOFEM_ERROR("IncrementalLinearStatic :: solverYourselfAt - No success in solving system."); } }
void SloanGraph :: initialize() { int i, j, k, ielemnodes, ielemintdmans, ndofmans; int nnodes = domain->giveNumberOfDofManagers(); int nelems = domain->giveNumberOfElements(); int nbcs = domain->giveNumberOfBoundaryConditions(); Element *ielem; GeneralBoundaryCondition *ibc; ///@todo Use std::list for this first part instead (suboptimization?) this->nodes.growTo(nnodes); // Add dof managers. for ( i = 1; i <= nnodes; i++ ) { SloanGraphNode *node = new SloanGraphNode(this, i); nodes.put(i, node); dmans.put(i, domain->giveDofManager(i) ); } k = nnodes; // Add element internal dof managers for ( i = 1; i <= nelems; i++ ) { ielem = domain->giveElement(i); this->nodes.growTo(k+ielem->giveNumberOfInternalDofManagers()); for ( j = 1; j <= ielem->giveNumberOfInternalDofManagers(); ++j ) { SloanGraphNode *node = new SloanGraphNode(this, i); nodes.put(++k, node); dmans.put(++k, ielem->giveInternalDofManager(i) ); } } // Add boundary condition internal dof managers for ( i = 1; i <= nbcs; i++ ) { ibc = domain->giveBc(i); if (ibc) { this->nodes.growTo(k+ibc->giveNumberOfInternalDofManagers()); for ( j = 1; j <= ibc->giveNumberOfInternalDofManagers(); ++j ) { SloanGraphNode *node = new SloanGraphNode(this, i); nodes.put(++k, node); dmans.put(++k, ibc->giveInternalDofManager(i) ); } } } IntArray connections; for ( i = 1; i <= nelems; i++ ) { ielem = domain->giveElement(i); ielemnodes = ielem->giveNumberOfDofManagers(); ielemintdmans = ielem->giveNumberOfInternalDofManagers(); ndofmans = ielemnodes + ielemintdmans; connections.resize(ndofmans); for ( j = 1; j <= ielemnodes; j++ ) { connections.at(j) = ielem->giveDofManager(j)->giveNumber(); } for ( j = 1; j <= ielemintdmans; j++ ) { connections.at(ielemnodes+j) = ielem->giveInternalDofManager(j)->giveNumber(); } for ( j = 1; j <= ndofmans; j++ ) { for ( k = j + 1; k <= ndofmans; k++ ) { // Connect both ways this->giveNode( connections.at(j) )->addNeighbor( connections.at(k) ); this->giveNode( connections.at(k) )->addNeighbor( connections.at(j) ); } } } ///@todo Add connections from dof managers to boundary condition internal dof managers. // loop over dof managers and test if there are some "slave" or rigidArm connection // if yes, such dependency is reflected in the graph by introducing additional // graph edges between slaves and corresponding masters /* * DofManager* iDofMan; * for (i=1; i <= nnodes; i++){ * if (domain->giveDofManager (i)->hasAnySlaveDofs()) { * iDofMan = domain->giveDofManager (i); * if (iDofMan->giveClassID() == RigidArmNodeClass) { * // rigid arm node -> has only one master * int master = ((RigidArmNode*)iDofMan)->giveMasterDofMngr()->giveNumber(); * // add edge * this->giveNode(i)->addNeighbor (master); * this->giveNode(master)->addNeighbor(i); * * } else { * // slave dofs are present in dofManager * // first - ask for masters, these may be different for each dof * int j; * for (j=1; j<=iDofMan->giveNumberOfDofs(); j++) * if (iDofMan->giveDof (j)->giveClassID() == SimpleSlaveDofClass) { * int master = ((SimpleSlaveDof*) iDofMan->giveDof (j))->giveMasterDofManagerNum(); * // add edge * this->giveNode(i)->addNeighbor (master); * this->giveNode(master)->addNeighbor(i); * * } * } * } * } // end dof man loop */ std :: set< int, std :: less< int > >masters; std :: set< int, std :: less< int > > :: iterator it; IntArray dofMasters; DofManager *iDofMan; for ( i = 1; i <= nnodes; i++ ) { if ( domain->giveDofManager(i)->hasAnySlaveDofs() ) { // slave dofs are present in dofManager // first - ask for masters, these may be different for each dof masters.clear(); iDofMan = domain->giveDofManager(i); int j, k; for ( j = 1; j <= iDofMan->giveNumberOfDofs(); j++ ) { if ( !iDofMan->giveDof(j)->isPrimaryDof() ) { iDofMan->giveDof(j)->giveMasterDofManArray(dofMasters); for ( k = 1; k <= dofMasters.giveSize(); k++ ) { masters.insert( dofMasters.at(k) ); } } } for ( it = masters.begin(); it != masters.end(); ++it ) { this->giveNode(i)->addNeighbor( * ( it ) ); this->giveNode( * ( it ) )->addNeighbor(i); } } } // end dof man loop }
void NonLinearDynamic :: proceedStep(int di, TimeStep *tStep) { // creates system of governing eq's and solves them at given time step // first assemble problem at current time step int neq = this->giveNumberOfDomainEquations(1, EModelDefaultEquationNumbering()); // Time-stepping constants this->determineConstants(tStep); if ( ( tStep->giveNumber() == giveNumberOfFirstStep() ) && initFlag ) { // Initialization incrementOfDisplacement.resize(neq); incrementOfDisplacement.zero(); totalDisplacement.resize(neq); totalDisplacement.zero(); velocityVector.resize(neq); velocityVector.zero(); accelerationVector.resize(neq); accelerationVector.zero(); internalForces.resize(neq); internalForces.zero(); previousIncrementOfDisplacement.resize(neq); previousIncrementOfDisplacement.zero(); previousTotalDisplacement.resize(neq); previousTotalDisplacement.zero(); previousVelocityVector.resize(neq); previousVelocityVector.zero(); previousAccelerationVector.resize(neq); previousAccelerationVector.zero(); previousInternalForces.resize(neq); previousInternalForces.zero(); TimeStep *stepWhenIcApply = new TimeStep(giveNumberOfTimeStepWhenIcApply(), this, 0, -deltaT, deltaT, 0); int nDofs, j, k, jj; int nman = this->giveDomain(di)->giveNumberOfDofManagers(); DofManager *node; Dof *iDof; // Considering initial conditions. for ( j = 1; j <= nman; j++ ) { node = this->giveDomain(di)->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // Ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions). iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { totalDisplacement.at(jj) = iDof->giveUnknown(VM_Total, stepWhenIcApply); velocityVector.at(jj) = iDof->giveUnknown(VM_Velocity, stepWhenIcApply); accelerationVector.at(jj) = iDof->giveUnknown(VM_Acceleration, stepWhenIcApply); } } } this->giveInternalForces(internalForces, true, di, tStep); } if ( initFlag ) { // First assemble problem at current time step. // Option to take into account initial conditions. if ( !effectiveStiffnessMatrix ) { effectiveStiffnessMatrix = classFactory.createSparseMtrx(sparseMtrxType); massMatrix = classFactory.createSparseMtrx(sparseMtrxType); } if ( effectiveStiffnessMatrix == NULL || massMatrix == NULL ) { _error("proceedStep: sparse matrix creation failed"); } if ( nonlocalStiffnessFlag ) { if ( !effectiveStiffnessMatrix->isAsymmetric() ) { _error("proceedStep: effectiveStiffnessMatrix does not support asymmetric storage"); } } effectiveStiffnessMatrix->buildInternalStructure( this, di, EID_MomentumBalance, EModelDefaultEquationNumbering() ); massMatrix->buildInternalStructure( this, di, EID_MomentumBalance, EModelDefaultEquationNumbering() ); // Assemble mass matrix this->assemble(massMatrix, tStep, EID_MomentumBalance, MassMatrix, EModelDefaultEquationNumbering(), this->giveDomain(di)); // Initialize vectors help.resize(neq); help.zero(); rhs.resize(neq); rhs.zero(); rhs2.resize(neq); rhs2.zero(); previousIncrementOfDisplacement.resize(neq); previousTotalDisplacement.resize(neq); previousVelocityVector.resize(neq); previousAccelerationVector.resize(neq); previousInternalForces.resize(neq); for ( int i = 1; i <= neq; i++ ) { previousIncrementOfDisplacement.at(i) = incrementOfDisplacement.at(i); previousTotalDisplacement.at(i) = totalDisplacement.at(i); previousVelocityVector.at(i) = velocityVector.at(i); previousAccelerationVector.at(i) = accelerationVector.at(i); previousInternalForces.at(i) = internalForces.at(i); } forcesVector.resize(neq); forcesVector.zero(); totIterations = 0; initFlag = 0; } #ifdef VERBOSE OOFEM_LOG_DEBUG("Assembling load\n"); #endif // Assemble the incremental reference load vector. this->assembleIncrementalReferenceLoadVectors(incrementalLoadVector, incrementalLoadVectorOfPrescribed, refLoadInputMode, this->giveDomain(di), EID_MomentumBalance, tStep); // Assembling the effective load vector for ( int i = 1; i <= neq; i++ ) { help.at(i) = a2 * previousVelocityVector.at(i) + a3 * previousAccelerationVector.at(i) + eta * ( a4 * previousVelocityVector.at(i) + a5 * previousAccelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } massMatrix->times(help, rhs); if ( delta != 0 ) { for ( int i = 1; i <= neq; i++ ) { help.at(i) = delta * ( a4 * previousVelocityVector.at(i) + a5 * previousAccelerationVector.at(i) + a6 * previousIncrementOfDisplacement.at(i) ); } this->timesMtrx(help, rhs2, TangentStiffnessMatrix, this->giveDomain(di), tStep); help.zero(); for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += rhs2.at(i); } } for ( int i = 1; i <= neq; i++ ) { rhs.at(i) += incrementalLoadVector.at(i) - previousInternalForces.at(i); } // // Set-up numerical model. // this->giveNumericalMethod( this->giveCurrentMetaStep() ); // // Call numerical model to solve problem. // double loadLevel = 1.0; if ( totIterations == 0 ) { incrementOfDisplacement.zero(); } if ( initialLoadVector.isNotEmpty() ) { numMetStatus = nMethod->solve(effectiveStiffnessMatrix, & rhs, & initialLoadVector, & totalDisplacement, & incrementOfDisplacement, & forcesVector, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } else { numMetStatus = nMethod->solve(effectiveStiffnessMatrix, & rhs, NULL, & totalDisplacement, & incrementOfDisplacement, & forcesVector, internalForcesEBENorm, loadLevel, refLoadInputMode, currentIterations, tStep); } for ( int i = 1; i <= neq; i++ ) { rhs.at(i) = previousVelocityVector.at(i); rhs2.at(i) = previousAccelerationVector.at(i); accelerationVector.at(i) = a0 * incrementOfDisplacement.at(i) - a2 * rhs.at(i) - a3 * rhs2.at(i); velocityVector.at(i) = a1 * incrementOfDisplacement.at(i) - a4 * rhs.at(i) - a5 * rhs2.at(i) - a6 * previousIncrementOfDisplacement.at(i); } totIterations += currentIterations; }
int DSSMatrix :: buildInternalStructure(EngngModel *eModel, int di, const UnknownNumberingScheme &s) { IntArray loc; Domain *domain = eModel->giveDomain(di); int neq = eModel->giveNumberOfDomainEquations(di, s); unsigned long indx; // allocation map std :: vector< std :: set< int > >columns(neq); unsigned long nz_ = 0; for ( auto &elem : domain->giveElements() ) { elem->giveLocationArray(loc, s); for ( int ii : loc ) { if ( ii > 0 ) { for ( int jj : loc ) { if ( jj > 0 ) { columns [ jj - 1 ].insert(ii - 1); } } } } } // loop over active boundary conditions std::vector<IntArray> r_locs; std::vector<IntArray> c_locs; for ( auto &gbc : domain->giveBcs() ) { ActiveBoundaryCondition *bc = dynamic_cast< ActiveBoundaryCondition * >( gbc.get() ); if ( bc != NULL ) { bc->giveLocationArrays(r_locs, c_locs, UnknownCharType, s, s); for (std::size_t k = 0; k < r_locs.size(); k++) { IntArray &krloc = r_locs[k]; IntArray &kcloc = c_locs[k]; for ( int ii : krloc ) { if ( ii > 0 ) { for ( int jj : kcloc ) { if ( jj > 0 ) { columns [ jj - 1 ].insert(ii - 1); } } } } } } } for ( int i = 0; i < neq; i++ ) { nz_ += columns [ i ].size(); } unsigned long *rowind_ = new unsigned long [ nz_ ]; unsigned long *colptr_ = new unsigned long [ neq + 1 ]; if ( ( rowind_ == NULL ) || ( colptr_ == NULL ) ) { OOFEM_ERROR("free store exhausted, exiting"); } indx = 0; for ( int j = 0; j < neq; j++ ) { // column loop colptr_ [ j ] = indx; for ( auto &val : columns [ j ] ) { // row loop rowind_ [ indx++ ] = val; } } colptr_ [ neq ] = indx; _sm.reset( new SparseMatrixF(neq, NULL, rowind_, colptr_, 0, 0, true) ); if ( !_sm ) { OOFEM_FATAL("free store exhausted, exiting"); } /* * Assemble block to equation mapping information */ bool _succ = true; int _ndofs, _neq, ndofmans = domain->giveNumberOfDofManagers(); int ndofmansbc = 0; ///@todo This still misses element internal dofs. // count number of internal dofmans on active bc for ( auto &bc : domain->giveBcs() ) { ndofmansbc += bc->giveNumberOfInternalDofManagers(); } int bsize = 0; if ( ndofmans > 0 ) { bsize = domain->giveDofManager(1)->giveNumberOfDofs(); } long *mcn = new long [ (ndofmans+ndofmansbc) * bsize ]; long _c = 0; if ( mcn == NULL ) { OOFEM_FATAL("free store exhausted, exiting"); } for ( auto &dman : domain->giveDofManagers() ) { _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() ) { _neq = dof->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } for ( int i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } // loop over internal dofmans of active bc for ( auto &bc : domain->giveBcs() ) { int ndman = bc->giveNumberOfInternalDofManagers(); for (int idman = 1; idman <= ndman; idman ++) { DofManager *dman = bc->giveInternalDofManager(idman); _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( Dof *dof: *dman ) { if ( dof->isPrimaryDof() ) { _neq = dof->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } for ( int i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } if ( _succ ) { _dss->SetMatrixPattern(_sm.get(), bsize); _dss->LoadMCN(ndofmans+ndofmansbc, bsize, mcn); } else { OOFEM_LOG_INFO("DSSMatrix: using assumed block structure"); _dss->SetMatrixPattern(_sm.get(), bsize); } _dss->PreFactorize(); // zero matrix, put unity on diagonal with supported dofs _dss->LoadZeros(); delete[] mcn; OOFEM_LOG_DEBUG("DSSMatrix info: neq is %d, bsize is %d\n", neq, nz_); // increment version this->version++; return true; }
void NonStationaryTransportProblem :: applyIC(TimeStep *stepWhenIcApply) { Domain *domain = this->giveDomain(1); int neq = this->giveNumberOfEquations(EID_ConservationEquation); FloatArray *solutionVector; double val; #ifdef VERBOSE OOFEM_LOG_INFO("Applying initial conditions\n"); #endif int nDofs, j, k, jj; int nman = domain->giveNumberOfDofManagers(); DofManager *node; Dof *iDof; UnknownsField->advanceSolution(stepWhenIcApply); solutionVector = UnknownsField->giveSolutionVector(stepWhenIcApply); solutionVector->resize(neq); solutionVector->zero(); for ( j = 1; j <= nman; j++ ) { node = domain->giveDofManager(j); nDofs = node->giveNumberOfDofs(); for ( k = 1; k <= nDofs; k++ ) { // ask for initial values obtained from // bc (boundary conditions) and ic (initial conditions) iDof = node->giveDof(k); if ( !iDof->isPrimaryDof() ) { continue; } jj = iDof->__giveEquationNumber(); if ( jj ) { val = iDof->giveUnknown(EID_ConservationEquation, VM_Total, stepWhenIcApply); solutionVector->at(jj) = val; //update in dictionary, if the problem is growing/decreasing if ( this->changingProblemSize ) { iDof->updateUnknownsDictionary(stepWhenIcApply, EID_MomentumBalance, VM_Total, val); } } } } int nelem = domain->giveNumberOfElements(); //project initial temperature to integration points // for ( j = 1; j <= nelem; j++ ) { // domain->giveElement(j)->updateInternalState(stepWhenIcApply); // } #ifdef __CEMHYD_MODULE // Not relevant in linear case, but needed for CemhydMat for temperature averaging before solving balance equations // Update element state according to given ic TransportElement *element; CemhydMat *cem; for ( j = 1; j <= nelem; j++ ) { element = ( TransportElement * ) domain->giveElement(j); //assign status to each integration point on each element if ( element->giveMaterial()->giveClassID() == CemhydMatClass ) { element->giveMaterial()->initMaterial(element); //create microstructures and statuses on specific GPs element->updateInternalState(stepWhenIcApply); //store temporary unequilibrated temperature element->updateYourself(stepWhenIcApply); //store equilibrated temperature cem = ( CemhydMat * ) element->giveMaterial(); cem->clearWeightTemperatureProductVolume(element); cem->storeWeightTemperatureProductVolume(element, stepWhenIcApply); } } //perform averaging on each material instance of CemhydMatClass int nmat = domain->giveNumberOfMaterialModels(); for ( j = 1; j <= nmat; j++ ) { if ( domain->giveMaterial(j)->giveClassID() == CemhydMatClass ) { cem = ( CemhydMat * ) domain->giveMaterial(j); cem->averageTemperature(); } } #endif //__CEMHYD_MODULE }
int DSSMatrix :: buildInternalStructure(EngngModel *eModel, int di, EquationID ut, const UnknownNumberingScheme &s) { IntArray loc; Domain *domain = eModel->giveDomain(di); int neq = eModel->giveNumberOfDomainEquations(di, s); int nelem = domain->giveNumberOfElements(); int i, ii, j, jj, n; unsigned long indx; Element *elem; // allocation map std :: vector< std :: set< int > >columns(neq); unsigned long nz_ = 0; for ( n = 1; n <= nelem; n++ ) { elem = domain->giveElement(n); elem->giveLocationArray(loc, ut, s); for ( i = 1; i <= loc.giveSize(); i++ ) { if ( ( ii = loc.at(i) ) ) { for ( j = 1; j <= loc.giveSize(); j++ ) { if ( ( jj = loc.at(j) ) ) { columns [ jj - 1 ].insert(ii - 1); } } } } } // loop over active boundary conditions int nbc = domain->giveNumberOfBoundaryConditions(); std::vector<IntArray> r_locs; std::vector<IntArray> c_locs; for ( i = 1; i <= nbc; ++i ) { ActiveBoundaryCondition *bc = dynamic_cast< ActiveBoundaryCondition * >( domain->giveBc(i) ); if ( bc != NULL ) { bc->giveLocationArrays(r_locs, c_locs, ut, UnknownCharType, s, s); for (std::size_t k = 0; k < r_locs.size(); k++) { IntArray &krloc = r_locs[k]; IntArray &kcloc = c_locs[k]; for ( int ri = 1; ri <= krloc.giveSize(); ri++ ) { if ( ( ii = krloc.at(ri) ) ) { for ( j = 1; j <= kcloc.giveSize(); j++ ) { if ( (jj = kcloc.at(j) ) ) { columns [ jj - 1 ].insert(ii - 1); } } } } } } } for ( i = 0; i < neq; i++ ) { nz_ += columns [ i ].size(); } unsigned long *rowind_ = new unsigned long [ nz_ ]; unsigned long *colptr_ = new unsigned long [ neq + 1 ]; if ( ( rowind_ == NULL ) || ( colptr_ == NULL ) ) { OOFEM_ERROR("DSSMatrix::buildInternalStructure: free store exhausted, exiting"); } indx = 0; std :: set< int > :: iterator pos; for ( j = 0; j < neq; j++ ) { // column loop colptr_ [ j ] = indx; for ( pos = columns [ j ].begin(); pos != columns [ j ].end(); ++pos ) { // row loop rowind_ [ indx++ ] = * pos; } } colptr_ [ neq ] = indx; if ( _sm ) { delete _sm; } if ( ( _sm = new SparseMatrixF(neq, NULL, rowind_, colptr_, 0, 0, true) ) == NULL ) { OOFEM_ERROR("DSSMatrix::buildInternalStructure: free store exhausted, exiting"); } int bsize = eModel->giveDomain(1)->giveDefaultNodeDofIDArry().giveSize(); /* * Assemble block to equation mapping information */ bool _succ = true; int _ndofs, _neq, ndofmans = domain->giveNumberOfDofManagers(); int ndofmansbc = 0; // count number of internal dofmans on active bc for (n=1; n<=nbc; n++) { ndofmansbc+=domain->giveBc(n)->giveNumberOfInternalDofManagers(); } long *mcn = new long [ (ndofmans+ndofmansbc) * bsize ]; long _c = 0; DofManager *dman; if ( mcn == NULL ) { OOFEM_ERROR("DSSMatrix::buildInternalStructure: free store exhausted, exiting"); } for ( n = 1; n <= ndofmans; n++ ) { dman = domain->giveDofManager(n); _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( i = 1; i <= _ndofs; i++ ) { if ( dman->giveDof(i)->isPrimaryDof() ) { _neq = dman->giveDof(i)->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } for ( i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } // loop over internal dofmans of active bc for (int ibc=1; ibc<=nbc; ibc++) { int ndman = domain->giveBc(ibc)->giveNumberOfInternalDofManagers(); for (int idman = 1; idman <= ndman; idman ++) { dman = domain->giveBc(ibc)->giveInternalDofManager(idman); _ndofs = dman->giveNumberOfDofs(); if ( _ndofs > bsize ) { _succ = false; break; } for ( i = 1; i <= _ndofs; i++ ) { if ( dman->giveDof(i)->isPrimaryDof() ) { _neq = dman->giveDof(i)->giveEquationNumber(s); if ( _neq > 0 ) { mcn [ _c++ ] = _neq - 1; } else { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } for ( i = _ndofs + 1; i <= bsize; i++ ) { mcn [ _c++ ] = -1; // no corresponding row in sparse mtrx structure } } } if ( _succ ) { _dss->SetMatrixPattern(_sm, bsize); _dss->LoadMCN(ndofmans+ndofmansbc, bsize, mcn); } else { OOFEM_LOG_INFO("DSSMatrix: using assumed block structure"); _dss->SetMatrixPattern(_sm, bsize); } _dss->PreFactorize(); // zero matrix, put unity on diagonal with supported dofs _dss->LoadZeros(); delete[] mcn; OOFEM_LOG_DEBUG("DSSMatrix info: neq is %d, bsize is %d\n", neq, nz_); // increment version this->version++; return true; }