コード例 #1
0
void
VTKXMLExportModule :: exportPrimVarAs(UnknownType valID, IntArray &mapG2L, IntArray &mapL2G,
                                      int regionDofMans, int ireg,
#ifdef __VTK_MODULE
                                      vtkSmartPointer<vtkUnstructuredGrid> &stream,
#else
                                      FILE *stream,
#endif
                                      TimeStep *tStep)
{
    Domain *d = emodel->giveDomain(1);
    InternalStateValueType type = ISVT_UNDEFINED;

    if ( ( valID == DisplacementVector ) || ( valID == EigenVector ) || ( valID == VelocityVector ) ) {
        type = ISVT_VECTOR;
    } else if ( ( valID == FluxVector ) || ( valID == PressureVector ) || ( valID == Temperature ) ) {
        type = ISVT_SCALAR;
    } else {
        OOFEM_ERROR2( "VTKXMLExportModule::exportPrimVarAs: unsupported UnknownType %s", __UnknownTypeToString(valID) );
    }

#ifdef __VTK_MODULE
    vtkSmartPointer<vtkDoubleArray> primVarArray = vtkSmartPointer<vtkDoubleArray>::New();
    primVarArray->SetName(__UnknownTypeToString(valID));

    if ( type == ISVT_SCALAR ) {
        primVarArray->SetNumberOfComponents(1);
    } else if ( type == ISVT_VECTOR ) {
        primVarArray->SetNumberOfComponents(3);
    } else {
        fprintf( stderr, "VTKXMLExportModule::exportPrimVarAs: unsupported variable type %s\n", __UnknownTypeToString(valID) );
    }
    primVarArray->SetNumberOfTuples(regionDofMans);
#else
    if ( type == ISVT_SCALAR ) {
        fprintf( stream, "<DataArray type=\"Float64\" Name=\"%s\" format=\"ascii\"> ", __UnknownTypeToString(valID) );
    } else if ( type == ISVT_VECTOR ) {
        fprintf( stream, "<DataArray type=\"Float64\" Name=\"%s\" NumberOfComponents=\"3\" format=\"ascii\"> ", __UnknownTypeToString(valID) );
    } else {
        fprintf( stderr, "VTKXMLExportModule::exportPrimVarAs: unsupported variable type %s\n", __UnknownTypeToString(valID) );
    }
#endif

    DofManager *dman;
    FloatArray iVal, iValLCS;

    for ( int inode = 1; inode <= regionDofMans; inode++ ) {
        dman = d->giveNode( mapL2G.at(inode) );

        this->getPrimaryVariable(iVal, dman, tStep, valID, ireg);

        if ( type == ISVT_SCALAR ) {
#ifdef __VTK_MODULE
            primVarArray->SetTuple1(inode-1, iVal.at(1));
#else
            fprintf(stream, "%e ", iVal.at(1) );
#endif
        } else if ( type == ISVT_VECTOR ) {

            //rotate back from nodal CS to global CS if applies
            if ( (dman->giveClassID() == NodeClass) && d->giveNode( dman->giveNumber() )->hasLocalCS() ) {
                iVal.resize(3);
                iValLCS = iVal;
                iVal.beTProductOf(* d->giveNode( dman->giveNumber() )->giveLocalCoordinateTriplet(), iValLCS);
            }

#ifdef __VTK_MODULE
            primVarArray->SetTuple3(inode-1, iVal.at(1), iVal.at(2), iVal.at(3));
#else
            fprintf( stream, "%e %e %e ", iVal.at(1), iVal.at(2), iVal.at(3) );
#endif
        }
    } // end loop over nodes
#ifdef __VTK_MODULE
    if ( type == ISVT_SCALAR ) {
        stream->GetPointData()->SetActiveScalars(__UnknownTypeToString(valID));
        stream->GetPointData()->SetScalars(primVarArray);
    } else if ( type == ISVT_VECTOR ) {
        stream->GetPointData()->SetActiveVectors(__UnknownTypeToString(valID));
        stream->GetPointData()->SetVectors(primVarArray);
    }
#else
    fprintf(stream, "</DataArray>\n");
#endif
}
コード例 #2
0
ファイル: leplic.C プロジェクト: JimBrouzoulis/oofem-1
void
LEPlic :: doLagrangianPhase(TimeStep *atTime)
{
    //Maps element nodes along trajectories using basic Runge-Kutta method (midpoint rule)
    int i, ci, ndofman = domain->giveNumberOfDofManagers();
    int nsd = 2;
    double dt = atTime->giveTimeIncrement();
    DofManager *dman;
    Node *inode;
    IntArray velocityMask;
    FloatArray x, x2(nsd), v_t, v_tn1;
    FloatMatrix t;
#if 1
    EngngModel *emodel = domain->giveEngngModel();
    int err;
#endif
    velocityMask.setValues(2, V_u, V_v);

    updated_XCoords.resize(ndofman);
    updated_YCoords.resize(ndofman);


    for ( i = 1; i <= ndofman; i++ ) {
        dman = domain->giveDofManager(i);
        // skip dofmanagers with no position information
        if ( ( dman->giveClassID() != NodeClass ) && ( dman->giveClassID() != RigidArmNodeClass ) && ( dman->giveClassID() != HangingNodeClass ) ) {
            continue;
        }

        inode = ( Node * ) dman;
        // get node coordinates
        x = * ( inode->giveCoordinates() );
        // get velocity field v(tn, x(tn)) for dof manager

#if 1
        /* Original version */
        dman->giveUnknownVector( v_t, velocityMask, EID_MomentumBalance, VM_Total, atTime->givePreviousStep() );
        /* Modified version */
        //dman->giveUnknownVector(v_t, velocityMask, EID_MomentumBalance, VM_Total, atTime);

        // Original version
        // compute updated position x(tn)+0.5*dt*v(tn,x(tn))
        for ( ci = 1; ci <= nsd; ci++ ) {
            x2.at(ci) = x.at(ci) + 0.5 *dt *v_t.at(ci);
        }

        // compute interpolated velocity field at x2 [ v(tn+1, x(tn)+0.5*dt*v(tn,x(tn))) = v(tn+1, x2) ]
        Field *vfield;
        vfield = emodel->giveContext()->giveFieldManager()->giveField(FT_Velocity);
        if ( vfield == NULL ) {
            _error("doLagrangianPhase: Velocity field not available");
        }

        err = vfield->evaluateAt(v_tn1, x2, VM_Total, atTime);
        if ( err == 1 ) {
            // point outside domain -> be explicit
            v_tn1 = v_t;
        } else if ( err != 0 ) {
            _error2("doLagrangianPhase: vfield->evaluateAt failed, error code %d", err);
        }

        // compute final updated position
        for ( ci = 1; ci <= nsd; ci++ ) {
            x2.at(ci) = x.at(ci) + dt *v_tn1.at(ci);
        }

#else
        // pure explicit version
        dman->giveUnknownVector(v_t, velocityMask, EID_MomentumBalance, VM_Total, atTime);

        for ( ci = 1; ci <= nsd; ci++ ) {
            x2.at(ci) = x.at(ci) + dt *v_t.at(ci);
        }

#endif
        // store updated node position
        updated_XCoords.at(i) = x2.at(1);
        updated_YCoords.at(i) = x2.at(2);
    }
}
コード例 #3
0
ファイル: loadbalancer.C プロジェクト: MartinFagerstrom/oofem
int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
    int myrank = d->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int idofman, ndofman;
    classType dtype;
    DofManager *dofman;
    LoadBalancer :: DofManMode dmode;

    //  **************************************************
    //  Pack migrating data to remote partition
    //  **************************************************

    // pack dofManagers
    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    ProcessCommDataStream pcDataStream(pcbuff);
    // loop over dofManagers
    ndofman = d->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = d->giveDofManager(idofman);
        dmode = this->giveDofManState(idofman);
        dtype = dofman->giveClassID();
        // sync data to remote partition
        // if dofman already present on remote partition then there is no need to sync
        //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
        if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
            ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            pcbuff->packInt(dtype);
            pcbuff->packInt(dmode);
            pcbuff->packInt( dofman->giveGlobalNumber() );

            // pack dofman state (this is the local dofman, not available on remote)
            /* this is a potential performance leak, sending shared dofman to a partition,
             * in which is already shared does not require to send context (is already there)
             * here for simplicity it is always send */
            dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
            // send list of new partitions
            pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
        }
    }

    // pack end-of-dofman-section record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    int ielem, nelem = d->giveNumberOfElements(), nsend = 0;

    Element *elem;

    for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
        elem = d->giveElement(ielem);
        if ( ( elem->giveParallelMode() == Element_local ) &&
            ( this->giveElementPartition(ielem) == iproc ) ) {
            // pack local element (node numbers shuld be global ones!!!)
            // pack type
            pcbuff->packInt( elem->giveClassID() );
            // nodal numbers shuld be packed as global !!
            elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
            nsend++;
        }
    } // end loop over elements

    // pack end-of-element-record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);

    return 1;
}