// In processor patches, there's a mix of internal faces (some // of them turned) and possible cyclics. Slow loop forAll (cp, faceI) { // Subtract one to take into account offsets for // face direction. label curF = cp[faceI] - 1; // Is the face on the boundary? if (curF >= mesh_.nInternalFaces()) { label curBPatch = mesh_.boundaryMesh().whichPatch(curF); if (!patchFields(curBPatch)) { patchFields.set ( curBPatch, fvPatchField<Type>::New ( mesh_.boundary()[curBPatch].type(), mesh_.boundary()[curBPatch], DimensionedField<Type, volMesh>::null() ) ); } // add the face label curPatchFace = mesh_.boundaryMesh() [curBPatch].whichFace(curF); patchFields[curBPatch][curPatchFace] = curProcPatch[faceI]; } }
Foam::tmp<Foam::GeometricField<Type, Foam::fvPatchField, Foam::volMesh> > Foam::fvFieldReconstructor::reconstructFvVolumeField ( const IOobject& fieldIoObject ) { // Read the field for all the processors PtrList<GeometricField<Type, fvPatchField, volMesh> > procFields ( procMeshes_.size() ); forAll (procMeshes_, procI) { procFields.set ( procI, new GeometricField<Type, fvPatchField, volMesh> ( IOobject ( fieldIoObject.name(), procMeshes_[procI].time().timeName(), procMeshes_[procI], IOobject::MUST_READ, IOobject::NO_WRITE ), procMeshes_[procI] ) ); }
void subsetPointFields ( const fvMeshSubset& subsetter, const pointMesh& pMesh, const wordList& fieldNames, PtrList<GeometricField<Type, pointPatchField, pointMesh> >& subFields ) { const fvMesh& baseMesh = subsetter.baseMesh(); forAll(fieldNames, i) { const word& fieldName = fieldNames[i]; Info<< "Subsetting field " << fieldName << endl; GeometricField<Type, pointPatchField, pointMesh> fld ( IOobject ( fieldName, baseMesh.time().timeName(), baseMesh, IOobject::MUST_READ, IOobject::NO_WRITE ), pMesh ); subFields.set(i, subsetter.interpolate(fld)); } }
bool addFieldsToList ( const fvMesh& mesh, PtrList<GeometricField<Type, fvPatchField, volMesh> >& list, const wordList& fieldNames ) { typedef GeometricField<Type, fvPatchField, volMesh> fieldType; label index = 0; forAll(fieldNames, i) { IOobject obj ( fieldNames[i], mesh.time().timeName(), mesh, IOobject::MUST_READ ); if (obj.headerOk() && obj.headerClassName() == fieldType::typeName) { list.set(index++, new fieldType(obj, mesh)); } else { Info<< "Could not find " << fieldNames[i] << endl; return false; } }
forAll (procMeshes_, procI) { const GeometricField<Type, tetPolyPatchField, tetPointMesh>& procField = procFields[procI]; // Get processor-to-global addressing for use in rmap labelList procToGlobalAddr = procAddressing(procI); // Set the cell values in the reconstructed field internalField.rmap ( procField.internalField(), procToGlobalAddr ); // Set the boundary patch values in the reconstructed field forAll(boundaryProcAddressing_[procI], patchI) { // Get patch index of the original patch const label curBPatch = boundaryProcAddressing_[procI][patchI]; // check if the boundary patch is not a processor patch if (curBPatch >= 0) { if (!patchFields(curBPatch)) { patchFields.set ( curBPatch, tetPolyPatchField<Type>::New ( procField.boundaryField()[patchI], mesh_.boundary()[curBPatch], DimensionedField<Type, tetPointMesh>::null(), tetPolyPatchFieldReconstructor ( mesh_.boundary()[curBPatch].size(), procField.boundaryField()[patchI].size() ) ) ); } // If the field stores values, do the rmap if (patchFields[curBPatch].storesFieldData()) { patchFields[curBPatch].rmap ( procField.boundaryField()[patchI], procPatchAddressing ( procToGlobalAddr, procI, patchI ) ); } } } }
void readFields ( PtrList<List<Type> >& values, const List<word>& fieldNames, const IOobjectList& cloudObjs ) { IOobjectList objects(cloudObjs.lookupClass(IOField<Type>::typeName)); forAll(fieldNames, j) { const IOobject* obj = objects.lookup(fieldNames[j]); if (obj != NULL) { Info<< " reading field " << fieldNames[j] << endl; IOField<Type> newField(*obj); values.set(j, new List<Type>(newField.xfer())); } else { FatalErrorIn ( "template<class Type>" "void readFields" "(" "PtrList<List<Type> >&, " "const List<word>&, " "const IOobjectList&" ")" ) << "Unable to read field " << fieldNames[j] << abort(FatalError); } } }
tmp<GeometricField<Type, tetPolyPatchField, tetPointMesh> > tetPointFieldReconstructor::reconstructTetPointField ( const IOobject& fieldIoObject ) { // Read the field for all the processors PtrList<GeometricField<Type, tetPolyPatchField, tetPointMesh> > procFields ( procMeshes_.size() ); forAll (procMeshes_, procI) { procFields.set ( procI, new GeometricField<Type, tetPolyPatchField, tetPointMesh> ( IOobject ( fieldIoObject.name(), procMeshes_[procI]().time().timeName(), procMeshes_[procI](), IOobject::MUST_READ, IOobject::NO_WRITE ), procMeshes_[procI] ) ); }
Foam::tmp<Foam::GeometricField<Type, Foam::pointPatchField, Foam::pointMesh>> Foam::pointFieldReconstructor::reconstructField(const IOobject& fieldIoObject) { // Read the field for all the processors PtrList<GeometricField<Type, pointPatchField, pointMesh>> procFields ( procMeshes_.size() ); forAll(procMeshes_, proci) { procFields.set ( proci, new GeometricField<Type, pointPatchField, pointMesh> ( IOobject ( fieldIoObject.name(), procMeshes_[proci]().time().timeName(), procMeshes_[proci](), IOobject::MUST_READ, IOobject::NO_WRITE ), procMeshes_[proci] ) ); }
void Foam::lagrangianFieldDecomposer::readFields ( const label cloudI, const IOobjectList& lagrangianObjects, PtrList<PtrList<IOField<Type> > >& lagrangianFields ) { // Search list of objects for lagrangian fields IOobjectList lagrangianTypeObjects ( lagrangianObjects.lookupClass(IOField<Type>::typeName) ); lagrangianFields.set ( cloudI, new PtrList<IOField<Type> > ( lagrangianTypeObjects.size() ) ); label lagrangianFieldi = 0; forAllIter(IOobjectList, lagrangianTypeObjects, iter) { lagrangianFields[cloudI].set ( lagrangianFieldi++, new IOField<Type>(*iter()) ); }
forAll(procMeshes_, proci) { const GeometricField<Type, pointPatchField, pointMesh>& procField = procFields[proci]; // Get processor-to-global addressing for use in rmap const labelList& procToGlobalAddr = pointProcAddressing_[proci]; // Set the cell values in the reconstructed field internalField.rmap ( procField.internalField(), procToGlobalAddr ); // Set the boundary patch values in the reconstructed field forAll(boundaryProcAddressing_[proci], patchi) { // Get patch index of the original patch const label curBPatch = boundaryProcAddressing_[proci][patchi]; // check if the boundary patch is not a processor patch if (curBPatch >= 0) { if (!patchFields(curBPatch)) { patchFields.set( curBPatch, pointPatchField<Type>::New ( procField.boundaryField()[patchi], mesh_.boundary()[curBPatch], DimensionedField<Type, pointMesh>::null(), pointPatchFieldReconstructor ( mesh_.boundary()[curBPatch].size() ) ) ); } patchFields[curBPatch].rmap ( procField.boundaryField()[patchi], patchPointAddressing_[proci][patchi] ); } } }
void readFields ( const vtkMesh& vMesh, const typename GeoField::Mesh& mesh, const IOobjectList& objects, const HashSet<word>& selectedFields, PtrList<GeoField>& fields ) { // Search list of objects for volScalarFields IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName)); // Construct the vol scalar fields label nFields = fields.size(); fields.setSize(nFields + fieldObjects.size()); for ( IOobjectList::iterator iter = fieldObjects.begin(); iter != fieldObjects.end(); ++iter ) { if (selectedFields.empty() || selectedFields.found(iter()->name())) { fields.set ( nFields, vMesh.interpolate ( GeoField ( *iter(), mesh ) ) ); nFields++; } } fields.setSize(nFields); }
void Foam::addToFieldList ( PtrList<GeometricField<Type, fvPatchField, volMesh> >& fieldList, const IOobject& obj, const label fieldI, const fvMesh& mesh ) { typedef GeometricField<Type, fvPatchField, volMesh> fieldType; if (obj.headerClassName() == fieldType::typeName) { fieldList.set ( fieldI, new fieldType(obj, mesh) ); Info<< " " << fieldType::typeName << tab << obj.name() << endl; } }
void Foam::readFields ( const Mesh& mesh, const IOobjectList& objects, PtrList<GeoField>& fields ) { // Search list of objects for volScalarFields IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName)); // Remove the cellDist field IOobjectList::iterator celDistIter = fieldObjects.find("cellDist"); if (celDistIter != fieldObjects.end()) { fieldObjects.erase(celDistIter); } // Construct the vol scalar fields fields.setSize(fieldObjects.size()); label fieldi=0; for ( IOobjectList::iterator iter = fieldObjects.begin(); iter != fieldObjects.end(); ++iter ) { fields.set ( fieldi++, new GeoField ( *iter(), mesh ) ); } }
Foam::searchableSurfaceControl::searchableSurfaceControl ( const Time& runTime, const word& name, const dictionary& controlFunctionDict, const conformationSurfaces& geometryToConformTo, const scalar& defaultCellSize ) : cellSizeAndAlignmentControl ( runTime, name, controlFunctionDict, geometryToConformTo, defaultCellSize ), surfaceName_(controlFunctionDict.lookupOrDefault<word>("surface", name)), searchableSurface_(geometryToConformTo.geometry()[surfaceName_]), geometryToConformTo_(geometryToConformTo), cellSizeFunctions_(1), regionToCellSizeFunctions_(searchableSurface_.regions().size(), -1), maxPriority_(-1) { Info<< indent << "Master settings:" << endl; Info<< incrIndent; cellSizeFunctions_.set ( 0, cellSizeFunction::New ( controlFunctionDict, searchableSurface_, defaultCellSize_, labelList() ) ); Info<< decrIndent; PtrList<cellSizeFunction> regionCellSizeFunctions; DynamicList<label> defaultCellSizeRegions; label nRegionCellSizeFunctions = 0; // Loop over regions - if any entry is not specified they should // inherit values from the parent surface. if (controlFunctionDict.found("regions")) { const dictionary& regionsDict = controlFunctionDict.subDict("regions"); const wordList& regionNames = searchableSurface_.regions(); label nRegions = regionsDict.size(); regionCellSizeFunctions.setSize(nRegions); defaultCellSizeRegions.setCapacity(nRegions); forAll(regionNames, regionI) { const word& regionName = regionNames[regionI]; label regionID = geometryToConformTo_.geometry().findSurfaceRegionID ( this->name(), regionName ); if (regionsDict.found(regionName)) { // Get the dictionary for region const dictionary& regionDict = regionsDict.subDict(regionName); Info<< indent << "Region " << regionName << " (ID = " << regionID << ")" << " settings:" << endl; Info<< incrIndent; regionCellSizeFunctions.set ( nRegionCellSizeFunctions, cellSizeFunction::New ( regionDict, searchableSurface_, defaultCellSize_, labelList(1, regionID) ) ); Info<< decrIndent; regionToCellSizeFunctions_[regionID] = nRegionCellSizeFunctions; nRegionCellSizeFunctions++; } else { // Add to default list defaultCellSizeRegions.append(regionID); } } } if (defaultCellSizeRegions.empty() && !regionCellSizeFunctions.empty()) { cellSizeFunctions_.transfer(regionCellSizeFunctions); } else if (nRegionCellSizeFunctions > 0) { regionCellSizeFunctions.setSize(nRegionCellSizeFunctions + 1); regionCellSizeFunctions.set ( nRegionCellSizeFunctions, cellSizeFunction::New ( controlFunctionDict, searchableSurface_, defaultCellSize_, labelList() ) ); const wordList& regionNames = searchableSurface_.regions(); forAll(regionNames, regionI) { if (regionToCellSizeFunctions_[regionI] == -1) { regionToCellSizeFunctions_[regionI] = nRegionCellSizeFunctions; } } cellSizeFunctions_.transfer(regionCellSizeFunctions); }
void Foam::equationReader::removePowExponents ( const label index, tokenList& tl, PtrList<equationOperation>& map, labelList& opLvl, labelList& pl ) const { // Remove pow(a,b) exponent part 'b' from an equation and create a sub- // equation. label tokenI(0); while (tokenI < map.size()) { if (map[tokenI].operation() == equationOperation::otpow) { // Found a 'pow('. Look for ','; fail on ')', or end of list // pl checks ensure the ',' or ')' relate to the 'pow(', and not // another function / parethesis const label powFoundAt(tokenI); const label pLvl(pl[tokenI]); while ((opLvl[tokenI] != 5) || (pl[tokenI] != pLvl)) { if ( ((opLvl[tokenI] == -4) && (pl[tokenI] == pLvl)) || (tokenI == (map.size() - 1)) ) { OStringStream description; description << "pow() function takes two arguments."; fatalParseError ( index, tl, powFoundAt, tokenI, "equationReader::removePowExponents", description ); } tokenI++; } // Found 'pow( ... ,' look for ')', fail on list end const label commaFoundAt(tokenI); while ((opLvl[tokenI] != -4) || (pl[tokenI] != pLvl)) { if (tokenI == (map.size() - 1)) { OStringStream description; description << "Can't find closing parenthesis for " << "pow() function."; fatalParseError ( index, tl, powFoundAt, tokenI, "equationReader::removePowExponents", description ); } tokenI++; } const label closeFoundAt(tokenI); // Ignore if the exponent is only 1 token if ((closeFoundAt - commaFoundAt) > 2) { // Now create sub-equation OStringStream subEqnStream; for ( label subTokenI(commaFoundAt + 1); subTokenI < closeFoundAt; subTokenI++ ) { if ( tl[subTokenI].isPunctuation() && (tl[subTokenI].pToken() == token::COLON)) { subEqnStream << "^"; } else { subEqnStream << tl[subTokenI]; } } string subEqnRawText(subEqnStream.str()); const equation& eqn(operator[](index)); equation subEqn ( eqn.name() + "_powExponent_" + name(powFoundAt), subEqnRawText, eqn.overrideDimensions(), eqn.changeDimensions() ); bool eqnCreated(false); for (label eqnI(0); eqnI < size(); eqnI++) { const equation& eqnTest(operator[](eqnI)); if (eqnTest.name() == subEqn.name()) { clearEquation(eqnI); eqnTest.setRawText(subEqn.rawText()); eqnTest.setOverrideDimensions ( subEqn.overrideDimensions() ); eqnTest.setChangeDimensions ( eqnTest.changeDimensions() ); eqnCreated = true; } } if (!eqnCreated) { createEquation(subEqn); } // Change commaFoundAt + 1 entry to reflect new subEquation // reference tl[commaFoundAt + 1] = token(subEqn.name()); map.set ( commaFoundAt + 1, new equationOperation(findSource(subEqn.name())) ); opLvl[commaFoundAt + 1] = 0; pl[commaFoundAt + 1] = pl[commaFoundAt]; // Remove the subEquation from tl, map, opLvl and pl label tokensRemoved(closeFoundAt - (commaFoundAt + 2)); label newSize(map.size() - tokensRemoved); for ( label subTokenI(commaFoundAt + 2); subTokenI < newSize; subTokenI++ ) { tl[subTokenI] = tl[subTokenI + tokensRemoved]; map[subTokenI] = map[subTokenI + tokensRemoved]; opLvl[subTokenI] = opLvl[subTokenI + tokensRemoved]; pl[subTokenI] = pl[subTokenI + tokensRemoved]; } tl.setSize(newSize); map.setSize(newSize); opLvl.setSize(newSize); pl.setSize(newSize); } } tokenI++; } }
forAll (procMeshes_, procI) { const GeometricField<Type, fvPatchField, volMesh>& procField = procFields[procI]; // Set the cell values in the reconstructed field internalField.rmap ( procField.internalField(), cellProcAddressing_[procI] ); // Set the boundary patch values in the reconstructed field forAll (boundaryProcAddressing_[procI], patchI) { // Get patch index of the original patch const label curBPatch = boundaryProcAddressing_[procI][patchI]; // Get addressing slice for this patch const labelList::subList cp = procMeshes_[procI].boundary()[patchI].patchSlice ( faceProcAddressing_[procI] ); // check if the boundary patch is not a processor patch if (curBPatch >= 0) { // Regular patch. Fast looping if (!patchFields(curBPatch)) { patchFields.set ( curBPatch, fvPatchField<Type>::New ( procField.boundaryField()[patchI], mesh_.boundary()[curBPatch], DimensionedField<Type, volMesh>::null(), fvPatchFieldReconstructor ( mesh_.boundary()[curBPatch].size(), procField.boundaryField()[patchI].size() ) ) ); } const label curPatchStart = mesh_.boundaryMesh()[curBPatch].start(); labelList reverseAddressing(cp.size()); forAll (cp, faceI) { // Subtract one to take into account offsets for // face direction. reverseAddressing[faceI] = cp[faceI] - 1 - curPatchStart; } patchFields[curBPatch].rmap ( procField.boundaryField()[patchI], reverseAddressing ); } else {
void Foam::GAMGSolver::Vcycle ( const PtrList<lduMatrix::smoother>& smoothers, scalargpuField& psi, const scalargpuField& source, scalargpuField& Apsi, scalargpuField& finestCorrection, scalargpuField& finestResidual, scalargpuField& scratch1, scalargpuField& scratch2, PtrList<scalargpuField>& coarseCorrFields, PtrList<scalargpuField>& coarseSources, const direction cmpt ) const { //debug = 2; const label coarsestLevel = matrixLevels_.size() - 1; // Restrict finest grid residual for the next level up. agglomeration_.restrictField(coarseSources[0], finestResidual, 0); if (debug >= 2 && nPreSweeps_) { Pout<< "Pre-smoothing scaling factors: "; } // Residual restriction (going to coarser levels) for (label leveli = 0; leveli < coarsestLevel; leveli++) { if (coarseSources.set(leveli + 1)) { // If the optional pre-smoothing sweeps are selected // smooth the coarse-grid field for the restriced source if (nPreSweeps_) { coarseCorrFields[leveli] = 0.0; smoothers[leveli + 1].smooth ( coarseCorrFields[leveli], coarseSources[leveli], cmpt, min ( nPreSweeps_ + preSweepsLevelMultiplier_*leveli, maxPreSweeps_ ) ); scalargpuField ACf ( const_cast<const scalargpuField&>(scratch1), coarseCorrFields[leveli].size() ); // Scale coarse-grid correction field // but not on the coarsest level because it evaluates to 1 if (scaleCorrection_ && leveli < coarsestLevel - 1) { scale ( coarseCorrFields[leveli], const_cast<scalargpuField&>(ACf), matrixLevels_[leveli], interfaceLevelsBouCoeffs_[leveli], interfaceLevels_[leveli], coarseSources[leveli], cmpt ); } // Correct the residual with the new solution matrixLevels_[leveli].Amul ( ACf, coarseCorrFields[leveli], interfaceLevelsBouCoeffs_[leveli], interfaceLevels_[leveli], cmpt ); coarseSources[leveli] -= ACf; } // Residual is equal to source agglomeration_.restrictField ( coarseSources[leveli + 1], coarseSources[leveli], leveli + 1 ); } } if (debug >= 2 && nPreSweeps_) { Pout<< endl; } // Solve Coarsest level with either an iterative or direct solver if (coarseCorrFields.set(coarsestLevel)) { solveCoarsestLevel ( coarseCorrFields[coarsestLevel], coarseSources[coarsestLevel] ); } if (debug >= 2) { Pout<< "Post-smoothing scaling factors: "; } // Smoothing and prolongation of the coarse correction fields // (going to finer levels) scalargpuField dummyField(0); for (label leveli = coarsestLevel - 1; leveli >= 0; leveli--) { if (coarseCorrFields.set(leveli)) { // Create a field for the pre-smoothed correction field // as a sub-field of the finestCorrection which is not // currently being used scalargpuField preSmoothedCoarseCorrField ( const_cast<const scalargpuField&>(scratch2), coarseCorrFields[leveli].size() ); // Only store the preSmoothedCoarseCorrField if pre-smoothing is // used if (nPreSweeps_) { preSmoothedCoarseCorrField = coarseCorrFields[leveli]; } agglomeration_.prolongField ( coarseCorrFields[leveli], ( coarseCorrFields.set(leveli + 1) ? coarseCorrFields[leveli + 1] : dummyField // dummy value ), leveli + 1 ); // Create A.psi for this coarse level as a sub-field of Apsi scalargpuField ACf ( const_cast<const scalargpuField&>(scratch1), coarseCorrFields[leveli].size() ); scalargpuField& ACfRef = ACf; if (interpolateCorrection_) //&& leveli < coarsestLevel - 2) { if (coarseCorrFields.set(leveli+1)) { interpolate ( coarseCorrFields[leveli], ACfRef, matrixLevels_[leveli], interfaceLevelsBouCoeffs_[leveli], interfaceLevels_[leveli], agglomeration_.restrictSortAddressing(leveli + 1), agglomeration_.restrictTargetAddressing(leveli + 1), agglomeration_.restrictTargetStartAddressing(leveli + 1), coarseCorrFields[leveli + 1], cmpt ); } else { interpolate ( coarseCorrFields[leveli], ACfRef, matrixLevels_[leveli], interfaceLevelsBouCoeffs_[leveli], interfaceLevels_[leveli], cmpt ); } } // Scale coarse-grid correction field // but not on the coarsest level because it evaluates to 1 if ( scaleCorrection_ && (interpolateCorrection_ || leveli < coarsestLevel - 1) ) { scale ( coarseCorrFields[leveli], ACfRef, matrixLevels_[leveli], interfaceLevelsBouCoeffs_[leveli], interfaceLevels_[leveli], coarseSources[leveli], cmpt ); } // Only add the preSmoothedCoarseCorrField if pre-smoothing is // used if (nPreSweeps_) { coarseCorrFields[leveli] += preSmoothedCoarseCorrField; } smoothers[leveli + 1].smooth ( coarseCorrFields[leveli], coarseSources[leveli], cmpt, min ( nPostSweeps_ + postSweepsLevelMultiplier_*leveli, maxPostSweeps_ ) ); } } // Prolong the finest level correction agglomeration_.prolongField ( finestCorrection, coarseCorrFields[0], 0 ); if (interpolateCorrection_) { interpolate ( finestCorrection, Apsi, matrix_, interfaceBouCoeffs_, interfaces_, agglomeration_.restrictSortAddressing(0), agglomeration_.restrictTargetAddressing(0), agglomeration_.restrictTargetStartAddressing(0), coarseCorrFields[0], cmpt ); } if (scaleCorrection_) { // Scale the finest level correction scale ( finestCorrection, Apsi, matrix_, interfaceBouCoeffs_, interfaces_, finestResidual, cmpt ); } thrust::transform ( psi.begin(), psi.end(), finestCorrection.begin(), psi.begin(), thrust::plus<scalar>() ); smoothers[0].smooth ( psi, source, cmpt, nFinestSweeps_ ); }
tmp<GeometricField<Type, pointPatchField, pointMesh> > pointFieldDecomposer::decomposeField ( const GeometricField<Type, pointPatchField, pointMesh>& field ) const { // Create and map the internal field values Field<Type> internalField(field.internalField(), pointAddressing_); // Create a list of pointers for the patchFields including one extra // for the global patch PtrList<pointPatchField<Type> > patchFields ( boundaryAddressing_.size() + 1 ); // Create and map the patch field values forAll (boundaryAddressing_, patchi) { if (patchFieldDecomposerPtrs_[patchi]) { patchFields.set ( patchi, pointPatchField<Type>::New ( field.boundaryField()[boundaryAddressing_[patchi]], procMesh_.boundary()[patchi], DimensionedField<Type, pointMesh>::null(), *patchFieldDecomposerPtrs_[patchi] ) ); } else { patchFields.set ( patchi, new ProcessorPointPatchField < pointPatchField, pointMesh, pointPatch, processorPointPatch, DummyMatrix, Type > ( procMesh_.boundary()[patchi], DimensionedField<Type, pointMesh>::null() ) ); } } // Add the global patch patchFields.set ( boundaryAddressing_.size(), new GlobalPointPatchField < pointPatchField, pointMesh, pointPatch, globalPointPatch, DummyMatrix, Type > ( procMesh_.boundary().globalPatch(), DimensionedField<Type, pointMesh>::null() ) ); // Create the field for the processor return tmp<GeometricField<Type, pointPatchField, pointMesh> > ( new GeometricField<Type, pointPatchField, pointMesh> ( IOobject ( field.name(), procMesh_().time().timeName(), procMesh_(), IOobject::NO_READ, IOobject::NO_WRITE ), procMesh_, field.dimensions(), internalField, patchFields ) ); }
void Foam::GAMGSolver::initVcycle ( PtrList<scalargpuField>& coarseCorrFields, PtrList<scalargpuField>& coarseSources, PtrList<lduMatrix::smoother>& smoothers, scalargpuField& scratch1, scalargpuField& scratch2 ) const { label maxSize = matrix_.diag().size(); coarseCorrFields.setSize(matrixLevels_.size()); coarseSources.setSize(matrixLevels_.size()); smoothers.setSize(matrixLevels_.size() + 1); // Create the smoother for the finest level smoothers.set ( 0, lduMatrix::smoother::New ( fieldName_, matrix_, interfaceBouCoeffs_, interfaceIntCoeffs_, interfaces_, controlDict_ ) ); forAll(matrixLevels_, leveli) { if (agglomeration_.nCells(leveli) >= 0) { label nCoarseCells = agglomeration_.nCells(leveli); coarseSources.set(leveli,GAMGSolverCache::source(leveli,nCoarseCells)); //coarseSources.set(leveli, new scalargpuField(nCoarseCells)); } if (matrixLevels_.set(leveli)) { const lduMatrix& mat = matrixLevels_[leveli]; label nCoarseCells = mat.diag().size(); maxSize = max(maxSize, nCoarseCells); //coarseCorrFields.set(leveli, new scalargpuField(nCoarseCells)); coarseCorrFields.set(leveli,GAMGSolverCache::corr(leveli,nCoarseCells)); smoothers.set ( leveli + 1, lduMatrix::smoother::New ( fieldName_, matrixLevels_[leveli], interfaceLevelsBouCoeffs_[leveli], interfaceLevelsIntCoeffs_[leveli], interfaceLevels_[leveli], controlDict_ ) ); } } if (maxSize > matrix_.diag().size()) { // Allocate some scratch storage scratch1.setSize(maxSize); scratch2.setSize(maxSize); } }
void topoMapper::storeGradients ( GradientTable& gradTable, PtrList<gradType>& gradList ) const { // Define a few typedefs for convenience typedef GeometricField<Type, fvPatchField, volMesh> volType; typedef const GeometricField<Type, fvPatchField, volMesh> constVolType; typedef HashTable<constVolType*> volTypeTable; // Fetch all fields from registry volTypeTable fields(mesh_.objectRegistry::lookupClass<volType>()); // Track field count label nFields = 0; // Store old-times before gradient computation for ( typename volTypeTable::iterator fIter = fields.begin(); fIter != fields.end(); ++fIter ) { fIter()->storeOldTimes(); nFields++; } // Size up the list gradList.setSize(nFields); label fieldIndex = 0; for ( typename volTypeTable::const_iterator fIter = fields.begin(); fIter != fields.end(); ++fIter ) { const volType& field = *fIter(); // Compute the gradient. // If the fvSolution dictionary contains an entry, // use that, otherwise, default to leastSquares word gradName("grad(" + field.name() + ')'); // Register field under a name that's unique word registerName("remapGradient(" + field.name() + ')'); // Make a new entry if (mesh_.schemesDict().subDict("gradSchemes").found(gradName)) { gradList.set ( fieldIndex, new gradType ( IOobject ( registerName, mesh_.time().timeName(), mesh_, IOobject::NO_READ, IOobject::NO_WRITE, true ), fvc::grad(field, gradName)() ) ); } else { gradList.set ( fieldIndex, new gradType ( IOobject ( registerName, mesh_.time().timeName(), mesh_, IOobject::NO_READ, IOobject::NO_WRITE, true ), fv::leastSquaresGrad<Type>(mesh_).grad(field)() ) ); } // Add a map entry gradTable.insert ( field.name(), GradientMap(registerName, fieldIndex++) ); } }
void Foam::nearWallFields::createFields ( PtrList<GeometricField<Type, fvPatchField, volMesh> >& sflds ) const { typedef GeometricField<Type, fvPatchField, volMesh> vfType; HashTable<const vfType*> flds(obr_.lookupClass<vfType>()); forAllConstIter(typename HashTable<const vfType*>, flds, iter) { const vfType& fld = *iter(); if (fieldMap_.found(fld.name())) { const word& sampleFldName = fieldMap_[fld.name()]; if (obr_.found(sampleFldName)) { Info<< " a field " << sampleFldName << " already exists on the mesh." << endl; } else { label sz = sflds.size(); sflds.setSize(sz+1); IOobject io(fld); io.readOpt() = IOobject::NO_READ; io.rename(sampleFldName); sflds.set(sz, new vfType(io, fld)); vfType& sampleFld = sflds[sz]; // Reset the bcs to be directMapped forAllConstIter(labelHashSet, patchSet_, iter) { label patchI = iter.key(); sampleFld.boundaryField().set ( patchI, new selfContainedDirectMappedFixedValueFvPatchField <Type> ( sampleFld.mesh().boundary()[patchI], sampleFld.dimensionedInternalField(), sampleFld.mesh().name(), directMappedPatchBase::NEARESTCELL, word::null, // samplePatch -distance_, sampleFld.name(), // fieldName false, // setAverage pTraits<Type>::zero, // average interpolationCellPoint<Type>::typeName ) ); } Info<< " created " << sampleFld.name() << " to sample " << fld.name() << endl; } } }
void ReadAndMapFields ( const fvMesh& mesh, const IOobjectList& objects, const fvMesh& tetDualMesh, const labelList& map, const typename MappedGeoField::value_type& nullValue, PtrList<MappedGeoField>& tetFields ) { typedef typename MappedGeoField::value_type Type; // Search list of objects for wanted type IOobjectList fieldObjects(objects.lookupClass(ReadGeoField::typeName)); tetFields.setSize(fieldObjects.size()); label i = 0; forAllConstIter(IOobjectList, fieldObjects, iter) { Info<< "Converting " << ReadGeoField::typeName << ' ' << iter.key() << endl; ReadGeoField readField(*iter(), mesh); tetFields.set ( i, new MappedGeoField ( IOobject ( readField.name(), readField.instance(), readField.local(), tetDualMesh, IOobject::NO_READ, IOobject::AUTO_WRITE, readField.registerObject() ), pointMesh::New(tetDualMesh), dimensioned<Type> ( "zero", readField.dimensions(), pTraits<Type>::zero ) ) ); Field<Type>& fld = tetFields[i].internalField(); // Map from read field. Set unmapped entries to nullValue. fld.setSize(map.size(), nullValue); forAll(map, pointI) { label index = map[pointI]; if (index > 0) { label cellI = index-1; fld[pointI] = readField[cellI]; } else if (index < 0) { label faceI = -index-1; label bFaceI = faceI - mesh.nInternalFaces(); if (bFaceI >= 0) { label patchI = mesh.boundaryMesh().patchID()[bFaceI]; label localFaceI = mesh.boundaryMesh()[patchI].whichFace ( faceI ); fld[pointI] = readField.boundaryField()[patchI][localFaceI]; } //else //{ // FatalErrorIn("ReadAndMapFields(..)") // << "Face " << faceI << " from index " << index // << " is not a boundary face." << abort(FatalError); //} } //else //{ // WarningIn("ReadAndMapFields(..)") // << "Point " << pointI << " at " // << tetDualMesh.points()[pointI] // << " has no dual correspondence." << endl; //} }
void Foam::readFields::loadField ( const word& fieldName, PtrList<GeometricField<Type, fvPatchField, volMesh> >& vflds, PtrList<GeometricField<Type, fvsPatchField, surfaceMesh> >& sflds ) const { typedef GeometricField<Type, fvPatchField, volMesh> vfType; typedef GeometricField<Type, fvsPatchField, surfaceMesh> sfType; if (obr_.foundObject<vfType>(fieldName)) { if (debug) { Info<< "readFields : Field " << fieldName << " already in database" << endl; } } else if (obr_.foundObject<sfType>(fieldName)) { if (debug) { Info<< "readFields : Field " << fieldName << " already in database" << endl; } } else { const fvMesh& mesh = refCast<const fvMesh>(obr_); IOobject fieldHeader ( fieldName, mesh.time().timeName(), mesh, IOobject::MUST_READ, IOobject::NO_WRITE ); if ( fieldHeader.headerOk() && fieldHeader.headerClassName() == vfType::typeName ) { // store field locally Info<< " Reading " << fieldName << endl; label sz = vflds.size(); vflds.setSize(sz+1); vflds.set(sz, new vfType(fieldHeader, mesh)); } else if ( fieldHeader.headerOk() && fieldHeader.headerClassName() == sfType::typeName ) { // store field locally Info<< " Reading " << fieldName << endl; label sz = sflds.size(); sflds.setSize(sz+1); sflds.set(sz, new sfType(fieldHeader, mesh)); } } }
tmp<GeometricField<Type, tetPolyPatchField, tetPointMesh> > tetPointFieldDecomposer::decomposeField ( const GeometricField<Type, tetPolyPatchField, tetPointMesh>& field ) const { // Create and map the internal field values Field<Type> internalField(field.internalField(), directAddressing()); // Create and map the patch field values PtrList<tetPolyPatchField<Type> > patchFields ( boundaryAddressing_.size() + 1 ); forAll (boundaryAddressing_, patchI) { if (boundaryAddressing_[patchI] >= 0) { patchFields.set ( patchI, tetPolyPatchField<Type>::New ( field.boundaryField() [boundaryAddressing_[patchI]], processorMesh_.boundary()[patchI], DimensionedField<Type, tetPointMesh>::null(), *patchFieldDecompPtrs_[patchI] ) ); } else { patchFields.set ( patchI, new ProcessorPointPatchField < tetPolyPatchField, tetPointMesh, tetPolyPatch, processorTetPolyPatch, tetFemMatrix, Type > ( processorMesh_.boundary()[patchI], DimensionedField<Type, tetPointMesh>::null() ) ); } } // Add the global patch by hand. This needs to be present on // all processors patchFields.set ( patchFields.size() - 1, new GlobalPointPatchField < tetPolyPatchField, tetPointMesh, tetPolyPatch, globalTetPolyPatch, tetFemMatrix, Type > ( processorMesh_.boundary().globalPatch(), DimensionedField<Type, tetPointMesh>::null() ) ); // Create the field for the processor return tmp<GeometricField<Type, tetPolyPatchField, tetPointMesh> > ( new GeometricField<Type, tetPolyPatchField, tetPointMesh> ( IOobject ( field.name(), processorMesh_().time().timeName(), processorMesh_(), IOobject::NO_READ, IOobject::NO_WRITE ), processorMesh_, field.dimensions(), internalField, patchFields ) ); }
void coupledInfo<MeshType>::setField ( const wordList& fieldNames, const dictionary& fieldDicts, const label internalSize, PtrList<GeomField>& fields ) const { typedef typename GeomField::InternalField InternalField; typedef typename GeomField::PatchFieldType PatchFieldType; typedef typename GeomField::GeometricBoundaryField GeomBdyFieldType; typedef typename GeomField::DimensionedInternalField DimInternalField; // Size up the pointer list fields.setSize(fieldNames.size()); // Define patch type names, assumed to be // common for volume and surface fields word emptyType(emptyPolyPatch::typeName); word processorType(processorPolyPatch::typeName); forAll(fieldNames, i) { // Create and map the patch field values label nPatches = subMesh().boundary().size(); // Create field parts PtrList<PatchFieldType> patchFields(nPatches); // Read dimensions dimensionSet dimSet ( fieldDicts.subDict(fieldNames[i]).lookup("dimensions") ); // Read the internal field InternalField internalField ( "internalField", fieldDicts.subDict(fieldNames[i]), internalSize ); // Create dummy types for initial field creation forAll(patchFields, patchI) { if (patchI == (nPatches - 1)) { // Artificially set last patch patchFields.set ( patchI, PatchFieldType::New ( emptyType, subMesh().boundary()[patchI], DimInternalField::null() ) ); } else { patchFields.set ( patchI, PatchFieldType::New ( PatchFieldType::calculatedType(), subMesh().boundary()[patchI], DimInternalField::null() ) ); } } // Create field with dummy patches fields.set ( i, new GeomField ( IOobject ( fieldNames[i], subMesh().time().timeName(), subMesh(), IOobject::NO_READ, IOobject::NO_WRITE, false ), subMesh(), dimSet, internalField, patchFields ) ); // Set correct references for patch internal fields, // and fetch values from the supplied geometric field dictionaries GeomBdyFieldType& bf = fields[i].boundaryField(); forAll(bf, patchI) { if (patchI == (nPatches - 1)) { // Artificially set last patch bf.set ( patchI, PatchFieldType::New ( emptyType, subMesh().boundary()[patchI], fields[i].dimensionedInternalField() ) ); } else if (isA<processorPolyPatch>(subMesh().boundary()[patchI].patch())) { bf.set ( patchI, PatchFieldType::New ( processorType, subMesh().boundary()[patchI], fields[i].dimensionedInternalField() ) ); } else { bf.set ( patchI, PatchFieldType::New ( subMesh().boundary()[patchI], fields[i].dimensionedInternalField(), fieldDicts.subDict ( fieldNames[i] ).subDict("boundaryField").subDict ( subMesh().boundary()[patchI].name() ) ) ); } } }
void Foam::equationReader::createMap ( const label index, const tokenList& tl, PtrList<equationOperation>& map, labelList& opLvl, labelList& pl ) const { // equation * eqn(&this->operator[](index)); // current parenthesis level - note, a negative parenthesis value indicates // that this is the root level of a function, and therefore ',' is allowed label p(0); forAll(tl, i) { if (tl[i].isNumber()) { // Internal constant. Save to internalScalars and record source opLvl[i] = 0; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stinternalScalar, addInternalScalar(tl[i].number()) + 1, 0, 0, equationOperation::otnone ) ); } else if (tl[i].isWord()) { // could be a variable name, function or mathematical constant // - check for function first - function is [word][punctuation '('] if ( (i < (tl.size() - 1)) && (tl[i + 1].isPunctuation()) && (tl[i + 1].pToken() == token::BEGIN_LIST) ) { // Function detected; function brackets are negative opLvl[i] = 4; p = -mag(p) - 1; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::findOp(tl[i].wordToken()) ) ); if (map[i].operation() == equationOperation::otnone) { OStringStream description; description << tl[i].wordToken() << " is not a recognized " << "function."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); } // Set next token as well (function opening parenthesis) i++; opLvl[i] = 4; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otnone ) ); } else if ( (tl[i].wordToken() == "e_") || (tl[i].wordToken() == "pi_") || (tl[i].wordToken() == "twoPi_") || (tl[i].wordToken() == "piByTwo_") || (tl[i].wordToken() == "GREAT_") || (tl[i].wordToken() == "VGREAT_") || (tl[i].wordToken() == "ROOTVGREAT_") || (tl[i].wordToken() == "SMALL_") || (tl[i].wordToken() == "VSMALL_") || (tl[i].wordToken() == "ROOTVSMALL_") ) { // Mathematical constant if ( findSource(tl[i].wordToken()).sourceType() != equationOperation::stnone ) { // Found a possible conflicting variable name - warn WarningIn("equationReader::createMap") << "Equation for " << operator[](index).name() << ", given by:" << token::NL << token::TAB << operator[](index).rawText() << token:: NL << "refers " << "to '" << tl[i].wordToken() << "'. Although " << "variable " << tl[i].wordToken() << "was found in " << "the data sources, " << tl[i].wordToken() << " is a" << " mathematical constant. The mathematical constant " << "will be used." << endl; } opLvl[i] = 0; pl[i] = p; label internalIndex(0); if (tl[i].wordToken() == "e_") { // MathConstantScope is a hack that allows equationReader // to work in multiple versions of OpenFOAM. See // include/versionSpecific.H internalIndex = addInternalScalar(MathConstantScope::e) + 1; } else if (tl[i].wordToken() == "pi_") { internalIndex = addInternalScalar(MathConstantScope::pi) + 1; } else if (tl[i].wordToken() == "twoPi_") { internalIndex = addInternalScalar(MathConstantScope::twoPi) + 1; } else if (tl[i].wordToken() == "piByTwo_") { internalIndex = addInternalScalar(MathConstantScope::piByTwo) + 1; } else if (tl[i].wordToken() == "GREAT_") { internalIndex = addInternalScalar(GREAT) + 1; } else if (tl[i].wordToken() == "VGREAT_") { internalIndex = addInternalScalar(VGREAT) + 1; } else if (tl[i].wordToken() == "ROOTVGREAT_") { internalIndex = addInternalScalar(ROOTVGREAT) + 1; } else if (tl[i].wordToken() == "SMALL_") { internalIndex = addInternalScalar(SMALL) + 1; } else if (tl[i].wordToken() == "VSMALL_") { internalIndex = addInternalScalar(VSMALL) + 1; } else // tl[i].wordToken() == "ROOTVSMALL_" { internalIndex = addInternalScalar(ROOTVSMALL) + 1; } map.set ( i, new equationOperation ( equationOperation::stinternalScalar, internalIndex, 0, 0, equationOperation::otnone ) ); } else { // Variable name opLvl[i] = 0; pl[i] = p; map.set ( i, new equationOperation(findSource(tl[i].wordToken())) ); if (map[i].sourceIndex() == 0) { OStringStream description; description << "Variable name " << tl[i].wordToken() << " not found in any available sources."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); } if (map[i].componentIndex() < 0) { OStringStream description; description << "Variable name " << tl[i].wordToken() << " is interpretted as variablePart.componentPart, " << "and the componentPart is not valid, or is " << "required, but is missing."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); } } } else if (tl[i].isPunctuation()) { switch (tl[i].pToken()) { case token::BEGIN_LIST: // ( opLvl[i] = 4; p = mag(p) + 1; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otnone ) ); break; case token::END_LIST: // ) { opLvl[i] = -4; pl[i] = p; p = mag(p) - 1; if (p < 0) { OStringStream description; description << "Too many ')'."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); } // Look for preceding parenthesis change - was it negative? for (label j(i - 1); j >= 0; j--) { if (mag(pl[j]) == p) { if (pl[j] < 0) { p = -p; } break; } } map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otnone ) ); break; } case token::COMMA: // , // , is only accepted in a function level parenthesis if (p < 0) { opLvl[i] = 5; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otnone ) ); } else { OStringStream description; description << "The comma, ',' does not make sense " << "here. Only permitted in the root parenthesis " << "level of a function."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); } break; case token::ADD: // + opLvl[i] = 1; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otplus ) ); break; case token::SUBTRACT: // - opLvl[i] = 1; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otminus ) ); break; case token::MULTIPLY: // * opLvl[i] = 2; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::ottimes ) ); break; case token::DIVIDE: // / opLvl[i] = 2; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otdivide ) ); break; case token::COLON: // :, means ^ { OStringStream description; description << "The '^' operator is not currently " << "supported. Use pow(a,b) instead."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); break; } /* opLvl[i] = 3; pl[i] = p; map.set ( i, new equationOperation ( equationOperation::stnone, 0, 0, 0, equationOperation::otpow ) ); break; */ default: { OStringStream description; description << "Punctuation character '" << tl[i].pToken() << "' is prohibitted."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); break; } } // end punctuation switch } // end if punctuation else { OStringStream description; description << "Unrecognized token: [" << tl[i] << "]."; fatalParseError ( index, tl, i, i, "equationReader::parse", description ); } } // mapping loop if (p) { OStringStream description; description << "Parentheses do not match. Expecting " << mag(p) << " additional ')'s."; fatalParseError ( index, tl, 0, tl.size() - 1, "equationReader::parse", description ); } // Assign negatives (distinguish these from subtraction) // The difference is characterized by the preceding character: // -preceeded by an operator = negative '+' '-' '*' '/' '^' // -preceeded by an open bracket = negative '(' // -preceeded by a comma = negative ',' // -preceeded by a variable = subtract 'word' or 'number' // -preceeded by a close bracket = subtract ')' // Negatives are identified by a negative dictLookupIndex if (map[0].operation() == equationOperation::otminus) { opLvl[0] = 2; map[0].dictLookupIndex() = -1; } for (label i(1); i < map.size(); i++) { if (map[i].operation() == equationOperation::otminus) { if (opLvl[i-1] > 0) { opLvl[i] = 2; map[i].dictLookupIndex() = -1; } } } }