void ensightParticlePositions ( const Foam::fvMesh& mesh, const Foam::fileName& postProcPath, const Foam::word& timeFile, const Foam::word& cloudName, const bool dataExists ) { if (dataExists) { Info<< "Converting cloud " << cloudName << " positions" << endl; } else { Info<< "Creating empty cloud " << cloudName << " positions" << endl; } const Time& runTime = mesh.time(); fileName ensightFileName(timeFile + "." + cloudName); OFstream ensightFile ( postProcPath/ensightFileName, ios_base::out|ios_base::trunc, runTime.writeFormat(), runTime.writeVersion(), runTime.writeCompression() ); // Output header ensightFile << cloudName.c_str() << nl << "particle coordinates" << nl; if (dataExists) { Cloud<passiveParticle> parcels(mesh, cloudName, false); // Set Format ensightFile.setf(ios_base::scientific, ios_base::floatfield); ensightFile.precision(5); ensightFile<< setw(8) << parcels.size() << nl; label nParcels = 0; // Output positions forAllConstIter(Cloud<passiveParticle>, parcels, elmnt) { const vector& p = elmnt().position(); ensightFile << setw(8) << ++nParcels << setw(12) << p.x() << setw(12) << p.y() << setw(12) << p.z() << nl; } } else {
void Foam::functionObjects::histogram::writeGraph ( const coordSet& coords, const word& fieldName, const scalarField& values ) const { const wordList fieldNames(1, fieldName); fileName outputPath = baseTimeDir(); mkDir(outputPath); OFstream graphFile ( outputPath/formatterPtr_().getFileName(coords, fieldNames) ); Info<< "Writing histogram of " << fieldName << " to " << graphFile.name() << endl; List<const scalarField*> yPtrs(1); yPtrs[0] = &values; formatterPtr_().write(coords, fieldNames, yPtrs, graphFile); }
// Call scotch with options from dictionary. Foam::label Foam::ptscotchDecomp::decompose ( const fileName& meshPath, const label adjncySize, const label adjncy[], const label xadjSize, const label xadj[], const scalarField& cWeights, List<label>& finalDecomp ) const { if (debug) { Pout<< "ptscotchDecomp : entering with xadj:" << xadjSize << endl; } // Dump graph if (decompositionDict_.found("scotchCoeffs")) { const dictionary& scotchCoeffs = decompositionDict_.subDict("scotchCoeffs"); if (scotchCoeffs.lookupOrDefault("writeGraph", false)) { OFstream str ( meshPath + "_" + Foam::name(Pstream::myProcNo()) + ".dgr" ); Pout<< "Dumping Scotch graph file to " << str.name() << endl << "Use this in combination with dgpart." << endl; globalIndex globalCells(xadjSize-1); // Distributed graph file (.grf) label version = 2; str << version << nl; // Number of files (procglbnbr) str << Pstream::nProcs(); // My file number (procloc) str << ' ' << Pstream::myProcNo() << nl; // Total number of vertices (vertglbnbr) str << globalCells.size(); // Total number of connections (edgeglbnbr) str << ' ' << returnReduce(xadj[xadjSize-1], sumOp<label>()) << nl; // Local number of vertices (vertlocnbr) str << xadjSize-1; // Local number of connections (edgelocnbr) str << ' ' << xadj[xadjSize-1] << nl; // Numbering starts from 0 label baseval = 0; // 100*hasVertlabels+10*hasEdgeWeights+1*hasVertWeighs str << baseval << ' ' << "000" << nl; for (label celli = 0; celli < xadjSize-1; celli++) { label start = xadj[celli]; label end = xadj[celli+1]; str << end-start; for (label i = start; i < end; i++) { str << ' ' << adjncy[i]; } str << nl; } } } // Strategy // ~~~~~~~~ // Default. SCOTCH_Strat stradat; check(SCOTCH_stratInit(&stradat), "SCOTCH_stratInit"); if (decompositionDict_.found("scotchCoeffs")) { const dictionary& scotchCoeffs = decompositionDict_.subDict("scotchCoeffs"); string strategy; if (scotchCoeffs.readIfPresent("strategy", strategy)) { if (debug) { Info<< "ptscotchDecomp : Using strategy " << strategy << endl; } SCOTCH_stratDgraphMap(&stradat, strategy.c_str()); //fprintf(stdout, "S\tStrat="); //SCOTCH_stratSave(&stradat, stdout); //fprintf(stdout, "\n"); } } // Graph // ~~~~~ List<label> velotab; // Check for externally provided cellweights and if so initialise weights scalar minWeights = gMin(cWeights); scalar maxWeights = gMax(cWeights); if (maxWeights > minWeights) { if (minWeights <= 0) { WarningInFunction << "Illegal minimum weight " << minWeights << endl; } if (cWeights.size() != xadjSize-1) { FatalErrorInFunction << "Number of cell weights " << cWeights.size() << " does not equal number of cells " << xadjSize-1 << exit(FatalError); } } scalar velotabSum = gSum(cWeights)/minWeights; scalar rangeScale(1.0); if (Pstream::master()) { if (velotabSum > scalar(labelMax - 1)) { // 0.9 factor of safety to avoid floating point round-off in // rangeScale tipping the subsequent sum over the integer limit. rangeScale = 0.9*scalar(labelMax - 1)/velotabSum; WarningInFunction << "Sum of weights has overflowed integer: " << velotabSum << ", compressing weight scale by a factor of " << rangeScale << endl; } } Pstream::scatter(rangeScale); if (maxWeights > minWeights) { if (cWeights.size()) { // Convert to integers. velotab.setSize(cWeights.size()); forAll(velotab, i) { velotab[i] = int((cWeights[i]/minWeights - 1)*rangeScale) + 1; } }
void Foam::processorPolyPatch::calcGeometry(PstreamBuffers& pBufs) { if (Pstream::parRun()) { { UIPstream fromNeighbProc(neighbProcNo(), pBufs); fromNeighbProc >> neighbFaceCentres_ >> neighbFaceAreas_ >> neighbFaceCellCentres_; } // My normals vectorField faceNormals(size()); // Neighbour normals vectorField nbrFaceNormals(neighbFaceAreas_.size()); // Face match tolerances scalarField tols = calcFaceTol(*this, points(), faceCentres()); // Calculate normals from areas and check forAll(faceNormals, facei) { scalar magSf = mag(faceAreas()[facei]); scalar nbrMagSf = mag(neighbFaceAreas_[facei]); scalar avSf = (magSf + nbrMagSf)/2.0; if (magSf < ROOTVSMALL && nbrMagSf < ROOTVSMALL) { // Undetermined normal. Use dummy normal to force separation // check. (note use of sqrt(VSMALL) since that is how mag // scales) faceNormals[facei] = point(1, 0, 0); nbrFaceNormals[facei] = faceNormals[facei]; } else if (mag(magSf - nbrMagSf) > matchTolerance()*sqr(tols[facei])) { fileName nm ( boundaryMesh().mesh().time().path() /name()+"_faces.obj" ); Pout<< "processorPolyPatch::calcGeometry : Writing my " << size() << " faces to OBJ file " << nm << endl; writeOBJ(nm, *this, points()); OFstream ccStr ( boundaryMesh().mesh().time().path() /name() + "_faceCentresConnections.obj" ); Pout<< "processorPolyPatch::calcGeometry :" << " Dumping cell centre lines between" << " corresponding face centres to OBJ file" << ccStr.name() << endl; label vertI = 0; forAll(faceCentres(), faceI) { const point& c0 = neighbFaceCentres_[faceI]; const point& c1 = faceCentres()[faceI]; writeOBJ(ccStr, c0, c1, vertI); } FatalErrorIn ( "processorPolyPatch::calcGeometry()" ) << "face " << facei << " area does not match neighbour by " << 100*mag(magSf - nbrMagSf)/avSf << "% -- possible face ordering problem." << endl << "patch:" << name() << " my area:" << magSf << " neighbour area:" << nbrMagSf << " matching tolerance:" << matchTolerance()*sqr(tols[facei]) << endl << "Mesh face:" << start()+facei << " vertices:" << UIndirectList<point>(points(), operator[](facei))() << endl << "If you are certain your matching is correct" << " you can increase the 'matchTolerance' setting" << " in the patch dictionary in the boundary file." << endl << "Rerun with processor debug flag set for" << " more information." << exit(FatalError); } else {
void Foam::externalCoupledTemperatureMixedFvPatchScalarField::transferData ( OFstream& os ) const { if (log()) { Info<< type() << ": " << this->patch().name() << ": writing data to " << os.name() << endl; } const label patchi = patch().index(); // heat flux [W/m2] scalarField qDot(this->patch().size(), 0.0); typedef compressible::turbulenceModel cmpTurbModelType; static word turbName ( IOobject::groupName ( turbulenceModel::propertiesName, internalField().group() ) ); static word thermoName(basicThermo::dictName); if (db().foundObject<cmpTurbModelType>(turbName)) { const cmpTurbModelType& turbModel = db().lookupObject<cmpTurbModelType>(turbName); const basicThermo& thermo = turbModel.transport(); const fvPatchScalarField& hep = thermo.he().boundaryField()[patchi]; qDot = turbModel.alphaEff(patchi)*hep.snGrad(); } else if (db().foundObject<basicThermo>(thermoName)) { const basicThermo& thermo = db().lookupObject<basicThermo>(thermoName); const fvPatchScalarField& hep = thermo.he().boundaryField()[patchi]; qDot = thermo.alpha().boundaryField()[patchi]*hep.snGrad(); } else { FatalErrorInFunction << "Condition requires either compressible turbulence and/or " << "thermo model to be available" << exit(FatalError); } // patch temperature [K] const scalarField Tp(*this); // near wall cell temperature [K] const scalarField Tc(patchInternalField()); // heat transfer coefficient [W/m2/K] const scalarField htc(qDot/(Tp - Tc + ROOTVSMALL)); if (Pstream::parRun()) { int tag = Pstream::msgType() + 1; List<Field<scalar>> magSfs(Pstream::nProcs()); magSfs[Pstream::myProcNo()].setSize(this->patch().size()); magSfs[Pstream::myProcNo()] = this->patch().magSf(); Pstream::gatherList(magSfs, tag); List<Field<scalar>> values(Pstream::nProcs()); values[Pstream::myProcNo()].setSize(this->patch().size()); values[Pstream::myProcNo()] = Tp; Pstream::gatherList(values, tag); List<Field<scalar>> qDots(Pstream::nProcs()); qDots[Pstream::myProcNo()].setSize(this->patch().size()); qDots[Pstream::myProcNo()] = qDot; Pstream::gatherList(qDots, tag); List<Field<scalar>> htcs(Pstream::nProcs()); htcs[Pstream::myProcNo()].setSize(this->patch().size()); htcs[Pstream::myProcNo()] = htc; Pstream::gatherList(htcs, tag); if (Pstream::master()) { forAll(values, proci) { const Field<scalar>& magSf = magSfs[proci]; const Field<scalar>& value = values[proci]; const Field<scalar>& qDot = qDots[proci]; const Field<scalar>& htc = htcs[proci]; forAll(magSf, facei) { os << magSf[facei] << token::SPACE << value[facei] << token::SPACE << qDot[facei] << token::SPACE << htc[facei] << token::SPACE << nl; } } os.flush(); } }
void ensightCloudField ( const Foam::IOobject& fieldObject, const Foam::fileName& postProcPath, const Foam::word& prepend, const Foam::label timeIndex, const Foam::word& cloudName, Foam::Ostream& ensightCaseFile, const bool dataExists ) { if (dataExists) { Info<< "Converting cloud " << cloudName << " field " << fieldObject.name() << endl; } else { Info<< "Creating empty cloud " << cloudName << " field " << fieldObject.name() << endl; } word timeFile = prepend + itoa(timeIndex); const Time& runTime = fieldObject.time(); if (timeIndex == 0 && Pstream::master()) { ensightCaseFile << pTraits<Type>::typeName << " per measured node: 1 "; ensightCaseFile.width(15); ensightCaseFile.setf(ios_base::left); ensightCaseFile << ("c" + fieldObject.name()).c_str() << (' ' + prepend + "***." + cloudName + "." + fieldObject.name()).c_str() << nl; } fileName ensightFileName ( timeFile + "." + cloudName +"." + fieldObject.name() ); OFstream ensightFile ( postProcPath/ensightFileName, runTime.writeFormat(), runTime.writeVersion(), runTime.writeCompression() ); ensightFile<< pTraits<Type>::typeName << " values" << nl; if (dataExists) { IOField<Type> vf(fieldObject); ensightFile.setf(ios_base::scientific, ios_base::floatfield); ensightFile.precision(5); label count = 0; forAll(vf, i) { Type v = vf[i]; if (mag(v) < 1.0e-90) { v = pTraits<Type>::zero; } for (direction cmpt=0; cmpt<pTraits<Type>::nComponents; cmpt++) { ensightFile << setw(12) << component(v, cmpt); if (++count % 6 == 0) { ensightFile << nl; } } } if ((count % 6 != 0) || (count==0)) { ensightFile << nl; } }
// Call scotch with options from dictionary. Foam::label Foam::ptscotchDecomp::decompose ( const fileName& meshPath, const List<int>& adjncy, const List<int>& xadj, const scalarField& cWeights, List<int>& finalDecomp ) const { if (debug) { Pout<< "ptscotchDecomp : entering with xadj:" << xadj.size() << endl; } // Dump graph if (decompositionDict_.found("ptscotchCoeffs")) { const dictionary& scotchCoeffs = decompositionDict_.subDict("ptscotchCoeffs"); if (scotchCoeffs.lookupOrDefault("writeGraph", false)) { OFstream str ( meshPath + "_" + Foam::name(Pstream::myProcNo()) + ".dgr" ); Pout<< "Dumping Scotch graph file to " << str.name() << endl << "Use this in combination with dgpart." << endl; globalIndex globalCells(xadj.size()-1); // Distributed graph file (.grf) label version = 2; str << version << nl; // Number of files (procglbnbr) str << Pstream::nProcs(); // My file number (procloc) str << ' ' << Pstream::myProcNo() << nl; // Total number of vertices (vertglbnbr) str << globalCells.size(); // Total number of connections (edgeglbnbr) str << ' ' << returnReduce(xadj[xadj.size()-1], sumOp<label>()) << nl; // Local number of vertices (vertlocnbr) str << xadj.size()-1; // Local number of connections (edgelocnbr) str << ' ' << xadj[xadj.size()-1] << nl; // Numbering starts from 0 label baseval = 0; // 100*hasVertlabels+10*hasEdgeWeights+1*hasVertWeighs str << baseval << ' ' << "000" << nl; for (label cellI = 0; cellI < xadj.size()-1; cellI++) { label start = xadj[cellI]; label end = xadj[cellI+1]; str << end-start; for (label i = start; i < end; i++) { str << ' ' << adjncy[i]; } str << nl; } } } // Strategy // ~~~~~~~~ // Default. SCOTCH_Strat stradat; check(SCOTCH_stratInit(&stradat), "SCOTCH_stratInit"); if (decompositionDict_.found("scotchCoeffs")) { const dictionary& scotchCoeffs = decompositionDict_.subDict("scotchCoeffs"); string strategy; if (scotchCoeffs.readIfPresent("strategy", strategy)) { if (debug) { Info<< "ptscotchDecomp : Using strategy " << strategy << endl; } SCOTCH_stratDgraphMap(&stradat, strategy.c_str()); //fprintf(stdout, "S\tStrat="); //SCOTCH_stratSave(&stradat, stdout); //fprintf(stdout, "\n"); } } // Graph // ~~~~~ List<int> velotab; // Check for externally provided cellweights and if so initialise weights scalar minWeights = gMin(cWeights); if (cWeights.size() > 0) { if (minWeights <= 0) { WarningIn ( "ptscotchDecomp::decompose(..)" ) << "Illegal minimum weight " << minWeights << endl; } if (cWeights.size() != xadj.size()-1) { FatalErrorIn ( "ptscotchDecomp::decompose(..)" ) << "Number of cell weights " << cWeights.size() << " does not equal number of cells " << xadj.size()-1 << exit(FatalError); } // Convert to integers. velotab.setSize(cWeights.size()); forAll(velotab, i) { velotab[i] = int(cWeights[i]/minWeights); } } if (debug) { Pout<< "SCOTCH_dgraphInit" << endl; } SCOTCH_Dgraph grafdat; check(SCOTCH_dgraphInit(&grafdat, MPI_COMM_WORLD), "SCOTCH_dgraphInit"); if (debug) { Pout<< "SCOTCH_dgraphBuild with:" << nl << "xadj.size()-1 : " << xadj.size()-1 << nl << "xadj : " << long(xadj.begin()) << nl << "velotab : " << long(velotab.begin()) << nl << "adjncy.size() : " << adjncy.size() << nl << "adjncy : " << long(adjncy.begin()) << nl << endl; } check ( SCOTCH_dgraphBuild ( &grafdat, // grafdat 0, // baseval, c-style numbering xadj.size()-1, // vertlocnbr, nCells xadj.size()-1, // vertlocmax const_cast<SCOTCH_Num*>(xadj.begin()), // vertloctab, start index per cell into // adjncy const_cast<SCOTCH_Num*>(&xadj[1]),// vendloctab, end index ,, const_cast<SCOTCH_Num*>(velotab.begin()),// veloloctab, vtx weights NULL, // vlblloctab adjncy.size(), // edgelocnbr, number of arcs adjncy.size(), // edgelocsiz const_cast<SCOTCH_Num*>(adjncy.begin()), // edgeloctab NULL, // edgegsttab NULL // edlotab, edge weights ), "SCOTCH_dgraphBuild" ); if (debug) { Pout<< "SCOTCH_dgraphCheck" << endl; } check(SCOTCH_dgraphCheck(&grafdat), "SCOTCH_dgraphCheck"); // Architecture // ~~~~~~~~~~~~ // (fully connected network topology since using switch) if (debug) { Pout<< "SCOTCH_archInit" << endl; } SCOTCH_Arch archdat; check(SCOTCH_archInit(&archdat), "SCOTCH_archInit"); List<label> processorWeights; if (decompositionDict_.found("scotchCoeffs")) { const dictionary& scotchCoeffs = decompositionDict_.subDict("scotchCoeffs"); scotchCoeffs.readIfPresent("processorWeights", processorWeights); } if (processorWeights.size()) { if (debug) { Info<< "ptscotchDecomp : Using procesor weights " << processorWeights << endl; } check ( SCOTCH_archCmpltw(&archdat, nProcessors_, processorWeights.begin()), "SCOTCH_archCmpltw" ); } else { if (debug) { Pout<< "SCOTCH_archCmplt" << endl; } check ( SCOTCH_archCmplt(&archdat, nProcessors_), "SCOTCH_archCmplt" ); } //SCOTCH_Mapping mapdat; //SCOTCH_dgraphMapInit(&grafdat, &mapdat, &archdat, NULL); //SCOTCH_dgraphMapCompute(&grafdat, &mapdat, &stradat); /*Perform mapping*/ //SCOTCHdgraphMapExit(&grafdat, &mapdat); // Hack:switch off fpu error trapping # ifdef LINUX_GNUC int oldExcepts = fedisableexcept ( FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW ); # endif if (debug) { Pout<< "SCOTCH_dgraphMap" << endl; } finalDecomp.setSize(xadj.size()-1); finalDecomp = 0; check ( SCOTCH_dgraphMap ( &grafdat, &archdat, &stradat, // const SCOTCH_Strat * finalDecomp.begin() // parttab ), "SCOTCH_graphMap" ); # ifdef LINUX_GNUC feenableexcept(oldExcepts); # endif //finalDecomp.setSize(xadj.size()-1); //check //( // SCOTCH_dgraphPart // ( // &grafdat, // nProcessors_, // partnbr // &stradat, // const SCOTCH_Strat * // finalDecomp.begin() // parttab // ), // "SCOTCH_graphPart" //); if (debug) { Pout<< "SCOTCH_dgraphExit" << endl; } // Release storage for graph SCOTCH_dgraphExit(&grafdat); // Release storage for strategy SCOTCH_stratExit(&stradat); // Release storage for network topology SCOTCH_archExit(&archdat); return 0; }