void Foam::regionModels::singleLayerRegion::initialise() { if (debug) { Pout<< "singleLayerRegion::initialise()" << endl; } label nBoundaryFaces = 0; const polyBoundaryMesh& rbm = regionMesh().boundaryMesh(); volVectorField& nHat = nHatPtr_(); volScalarField& magSf = magSfPtr_(); forAll(intCoupledPatchIDs_, i) { const label patchI = intCoupledPatchIDs_[i]; const polyPatch& pp = rbm[patchI]; const labelList& fCells = pp.faceCells(); nBoundaryFaces += fCells.size(); UIndirectList<vector>(nHat, fCells) = pp.faceNormals(); UIndirectList<scalar>(magSf, fCells) = mag(pp.faceAreas()); } nHat.correctBoundaryConditions(); magSf.correctBoundaryConditions(); if (nBoundaryFaces != regionMesh().nCells()) { FatalErrorIn("singleLayerRegion::initialise()") << "Number of primary region coupled boundary faces not equal to " << "the number of cells in the local region" << nl << nl << "Number of cells = " << regionMesh().nCells() << nl << "Boundary faces = " << nBoundaryFaces << nl << abort(FatalError); } scalarField passiveMagSf(magSf.size(), 0.0); passivePatchIDs_.setSize(intCoupledPatchIDs_.size(), -1); forAll(intCoupledPatchIDs_, i) { const label patchI = intCoupledPatchIDs_[i]; const polyPatch& ppIntCoupled = rbm[patchI]; if (ppIntCoupled.size() > 0) { label cellId = rbm[patchI].faceCells()[0]; const cell& cFaces = regionMesh().cells()[cellId]; label faceI = ppIntCoupled.start(); label faceO = cFaces.opposingFaceLabel(faceI, regionMesh().faces()); label passivePatchI = rbm.whichPatch(faceO); passivePatchIDs_[i] = passivePatchI; const polyPatch& ppPassive = rbm[passivePatchI]; UIndirectList<scalar>(passiveMagSf, ppPassive.faceCells()) = mag(ppPassive.faceAreas()); } } Pstream::listCombineGather(passivePatchIDs_, maxEqOp<label>()); Pstream::listCombineScatter(passivePatchIDs_); magSf.field() = 0.5*(magSf + passiveMagSf); magSf.correctBoundaryConditions(); }
bool Foam::movingConeTopoFvMesh::update() { // Do mesh changes (use inflation - put new points in topoChangeMap) autoPtr<mapPolyMesh> topoChangeMap = topoChanger_.changeMesh(true); // Calculate the new point positions depending on whether the // topological change has happened or not pointField newPoints; vector curMotionVel_ = motionVelAmplitude_* Foam::sin(time().value()*M_PI/motionVelPeriod_); Pout<< "time:" << time().value() << " curMotionVel_:" << curMotionVel_ << " curLeft:" << curLeft_ << " curRight:" << curRight_ << endl; if (topoChangeMap.valid()) { Info<< "Topology change. Calculating motion points" << endl; if (topoChangeMap().hasMotionPoints()) { Info<< "Topology change. Has premotion points" << endl; //Info<< "preMotionPoints:" << topoChangeMap().preMotionPoints() // << endl; //mkDir(time().timePath()); //{ // OFstream str(time().timePath()/"meshPoints.obj"); // Pout<< "Writing mesh with meshPoints to " << str.name() // << endl; // // const pointField& currentPoints = points(); // label vertI = 0; // forAll(currentPoints, pointI) // { // meshTools::writeOBJ(str, currentPoints[pointI]); // vertI++; // } // forAll(edges(), edgeI) // { // const edge& e = edges()[edgeI]; // str << "l " << e[0]+1 << ' ' << e[1]+1 << nl; // } //} //{ // OFstream str(time().timePath()/"preMotionPoints.obj"); // Pout<< "Writing mesh with preMotionPoints to " << str.name() // << endl; // // const pointField& newPoints = // topoChangeMap().preMotionPoints(); // label vertI = 0; // forAll(newPoints, pointI) // { // meshTools::writeOBJ(str, newPoints[pointI]); // vertI++; // } // forAll(edges(), edgeI) // { // const edge& e = edges()[edgeI]; // str << "l " << e[0]+1 << ' ' << e[1]+1 << nl; // } //} motionMask_ = vertexMarkup ( topoChangeMap().preMotionPoints(), curLeft_, curRight_ ); // Move points inside the motionMask newPoints = topoChangeMap().preMotionPoints() + ( pos(0.5 - mag(motionMask_)) // cells above the body )*curMotionVel_*time().deltaT().value(); } else { Info<< "Topology change. Already set mesh points" << endl; motionMask_ = vertexMarkup ( points(), curLeft_, curRight_ ); // Move points inside the motionMask newPoints = points() + ( pos(0.5 - mag(motionMask_)) // cells above the body )*curMotionVel_*time().deltaT().value(); } } else { Info<< "No topology change" << endl; // Set the mesh motion newPoints = points() + ( pos(0.5 - mag(motionMask_)) // cells above the body )*curMotionVel_*time().deltaT().value(); } // The mesh now contains the cells with zero volume Info << "Executing mesh motion" << endl; movePoints(newPoints); // The mesh now has got non-zero volume cells curLeft_ = average ( faceZones() [ faceZones().findZoneID("leftExtrusionFaces") ]().localPoints() ).x() - SMALL; curRight_ = average ( faceZones() [ faceZones().findZoneID("rightExtrusionFaces") ]().localPoints() ).x() + SMALL; return true; }
/** * @brief Gets the angle between two vectors. * @param vec The second vector. * @return Angle in radians. */ double Vector3D::getADelt(Vector3D vec) const { return acos((*this * vec) / (mag() * vec.mag())); }
void Foam::reitzKHRT::breakupParcel ( parcel& p, const scalar deltaT, const vector& vel, const liquidMixtureProperties& fuels ) const { label cellI = p.cell(); scalar T = p.T(); scalar r = 0.5*p.d(); scalar pc = spray_.p()[cellI]; scalar sigma = fuels.sigma(pc, T, p.X()); scalar rhoLiquid = fuels.rho(pc, T, p.X()); scalar muLiquid = fuels.mu(pc, T, p.X()); scalar rhoGas = spray_.rho()[cellI]; scalar Np = p.N(rhoLiquid); scalar semiMass = Np*pow3(p.d()); scalar weGas = p.We(vel, rhoGas, sigma); scalar weLiquid = p.We(vel, rhoLiquid, sigma); // correct the Reynolds number. Reitz is using radius instead of diameter scalar reLiquid = 0.5*p.Re(rhoLiquid, vel, muLiquid); scalar ohnesorge = sqrt(weLiquid)/(reLiquid + VSMALL); scalar taylor = ohnesorge*sqrt(weGas); vector acceleration = p.Urel(vel)/p.tMom(); vector trajectory = p.U()/mag(p.U()); scalar gt = (g_ + acceleration) & trajectory; // frequency of the fastest growing KH-wave scalar omegaKH = (0.34 + 0.38*pow(weGas, 1.5)) /((1 + ohnesorge)*(1 + 1.4*pow(taylor, 0.6))) *sqrt(sigma/(rhoLiquid*pow3(r))); // corresponding KH wave-length. scalar lambdaKH = 9.02 *r *(1.0 + 0.45*sqrt(ohnesorge)) *(1.0 + 0.4*pow(taylor, 0.7)) /pow(1.0 + 0.865*pow(weGas, 1.67), 0.6); // characteristic Kelvin-Helmholtz breakup time scalar tauKH = 3.726*b1_*r/(omegaKH*lambdaKH); // stable KH diameter scalar dc = 2.0*b0_*lambdaKH; // the frequency of the fastest growing RT wavelength. scalar helpVariable = mag(gt*(rhoLiquid - rhoGas)); scalar omegaRT = sqrt ( 2.0*pow(helpVariable, 1.5) /(3.0*sqrt(3.0*sigma)*(rhoGas + rhoLiquid)) ); // RT wave number scalar KRT = sqrt(helpVariable/(3.0*sigma + VSMALL)); // wavelength of the fastest growing RT frequency scalar lambdaRT = constant::mathematical::twoPi*cRT_/(KRT + VSMALL); // if lambdaRT < diameter, then RT waves are growing on the surface // and we start to keep track of how long they have been growing if ((p.ct() > 0) || (lambdaRT < p.d())) { p.ct() += deltaT; } // characteristic RT breakup time scalar tauRT = cTau_/(omegaRT + VSMALL); // check if we have RT breakup if ((p.ct() > tauRT) && (lambdaRT < p.d())) { // the RT breakup creates diameter/lambdaRT new droplets p.ct() = -GREAT; scalar multiplier = p.d()/lambdaRT; scalar nDrops = multiplier*Np; p.d() = cbrt(semiMass/nDrops); } // otherwise check for KH breakup else if (dc < p.d()) { // no breakup below Weber = 12 if (weGas > weberLimit_) { label injector = label(p.injector()); scalar fraction = deltaT/tauKH; // reduce the diameter according to the rate-equation p.d() = (fraction*dc + p.d())/(1.0 + fraction); scalar ms = rhoLiquid*Np*pow3(dc)*constant::mathematical::pi/6.0; p.ms() += ms; // Total number of parcels for the whole injection event label nParcels = spray_.injectors()[injector].properties()->nParcelsToInject ( spray_.injectors()[injector].properties()->tsoi(), spray_.injectors()[injector].properties()->teoi() ); scalar averageParcelMass = spray_.injectors()[injector].properties()->mass()/nParcels; if (p.ms()/averageParcelMass > msLimit_) { // set the initial ms value to -GREAT. This prevents // new droplets from being formed from the child droplet // from the KH instability // mass of stripped child parcel scalar mc = p.ms(); // Prevent child parcel from taking too much mass mc = min(mc, 0.5*p.m()); spray_.addParticle ( new parcel ( p.mesh(), p.position(), p.cell(), p.tetFace(), p.tetPt(), p.n(), dc, p.T(), mc, 0.0, 0.0, 0.0, -GREAT, p.tTurb(), 0.0, p.injector(), p.U(), p.Uturb(), p.X(), p.fuelNames() ) ); p.m() -= mc; p.ms() = 0.0; } } } }
bool Foam::StandardWallInteraction<CloudType>::correct ( const polyPatch& pp, const label faceId, bool& keepParticle, vector& U ) const { if (isA<wallPolyPatch>(pp)) { switch (interactionType_) { case PatchInteractionModel<CloudType>::itEscape: { keepParticle = false; U = vector::zero; break; } case PatchInteractionModel<CloudType>::itStick: { keepParticle = true; U = vector::zero; break; } case PatchInteractionModel<CloudType>::itRebound: { keepParticle = true; vector nw = pp.faceAreas()[pp.whichFace(faceId)]; nw /= mag(nw); scalar Un = U & nw; vector Ut = U - Un*nw; if (Un > 0) { U -= (1.0 + e_)*Un*nw; } U -= mu_*Ut; break; } default: { FatalErrorIn ( "bool StandardWallInteraction<CloudType>::correct" "(" "const polyPatch&, " "const label, " "bool&, " "vector&" ") const" ) << "Unknown interaction type " << this->interactionTypeToWord(interactionType_) << "(" << interactionType_ << ")" << endl << abort(FatalError); } } return true; } return false; }
void LRR<BasicTurbulenceModel>::correct() { if (!this->turbulence_) { return; } // Local references const alphaField& alpha = this->alpha_; const rhoField& rho = this->rho_; const surfaceScalarField& alphaRhoPhi = this->alphaRhoPhi_; const volVectorField& U = this->U_; volSymmTensorField& R = this->R_; fv::options& fvOptions(fv::options::New(this->mesh_)); ReynoldsStress<RASModel<BasicTurbulenceModel>>::correct(); tmp<volTensorField> tgradU(fvc::grad(U)); const volTensorField& gradU = tgradU(); volSymmTensorField P(-twoSymm(R & gradU)); volScalarField G(this->GName(), 0.5*mag(tr(P))); // Update epsilon and G at the wall epsilon_.boundaryFieldRef().updateCoeffs(); // Dissipation equation tmp<fvScalarMatrix> epsEqn ( fvm::ddt(alpha, rho, epsilon_) + fvm::div(alphaRhoPhi, epsilon_) - fvm::laplacian(alpha*rho*DepsilonEff(), epsilon_) == Ceps1_*alpha*rho*G*epsilon_/k_ - fvm::Sp(Ceps2_*alpha*rho*epsilon_/k_, epsilon_) + fvOptions(alpha, rho, epsilon_) ); epsEqn.ref().relax(); fvOptions.constrain(epsEqn.ref()); epsEqn.ref().boundaryManipulate(epsilon_.boundaryFieldRef()); solve(epsEqn); fvOptions.correct(epsilon_); bound(epsilon_, this->epsilonMin_); // Correct the trace of the tensorial production to be consistent // with the near-wall generation from the wall-functions const fvPatchList& patches = this->mesh_.boundary(); forAll(patches, patchi) { const fvPatch& curPatch = patches[patchi]; if (isA<wallFvPatch>(curPatch)) { forAll(curPatch, facei) { label faceCelli = curPatch.faceCells()[facei]; P[faceCelli] *= min ( G[faceCelli]/(0.5*mag(tr(P[faceCelli])) + SMALL), 1.0 ); } } }
void Foam::calc(const argList& args, const Time& runTime, const fvMesh& mesh) { bool writeResults = !args.optionFound("noWrite"); IOobject Uheader ( "U", runTime.timeName(), mesh, IOobject::MUST_READ ); IOobject Theader ( "T", runTime.timeName(), mesh, IOobject::MUST_READ ); // Check U and T exists if (Uheader.headerOk() && Theader.headerOk()) { autoPtr<volScalarField> MachPtr; volVectorField U(Uheader, mesh); if (isFile(runTime.constantPath()/"thermophysicalProperties")) { // thermophysical Mach autoPtr<basicPsiThermo> thermo ( basicPsiThermo::New(mesh) ); volScalarField Cp = thermo->Cp(); volScalarField Cv = thermo->Cv(); MachPtr.set ( new volScalarField ( IOobject ( "Ma", runTime.timeName(), mesh ), mag(U)/(sqrt((Cp/Cv)*(Cp - Cv)*thermo->T())) ) ); } else { // thermodynamic Mach IOdictionary thermoProps ( IOobject ( "thermodynamicProperties", runTime.constant(), mesh, IOobject::MUST_READ, IOobject::NO_WRITE ) ); dimensionedScalar R(thermoProps.lookup("R")); dimensionedScalar Cv(thermoProps.lookup("Cv")); volScalarField T(Theader, mesh); MachPtr.set ( new volScalarField ( IOobject ( "Ma", runTime.timeName(), mesh ), mag(U)/(sqrt(((Cv + R)/Cv)*R*T)) ) ); } Info<< "Mach max : " << max(MachPtr()).value() << endl; if (writeResults) { MachPtr().write(); } } else { Info<< " Missing U or T" << endl; } }
Foam::pointIndexHit Foam::searchablePlate::findLine ( const point& start, const point& end ) const { pointIndexHit info ( true, vector::zero, 0 ); const vector dir(end-start); if (mag(dir[normalDir_]) < VSMALL) { info.setMiss(); info.setIndex(-1); } else { scalar t = (origin_[normalDir_]-start[normalDir_]) / dir[normalDir_]; if (t < 0 || t > 1) { info.setMiss(); info.setIndex(-1); } else { info.rawPoint() = start+t*dir; info.rawPoint()[normalDir_] = origin_[normalDir_]; // Clip to edges for (direction dir = 0; dir < vector::nComponents; dir++) { if (dir != normalDir_) { if (info.rawPoint()[dir] < origin_[dir]) { info.setMiss(); info.setIndex(-1); break; } else if (info.rawPoint()[dir] > origin_[dir]+span_[dir]) { info.setMiss(); info.setIndex(-1); break; } } } } } // Debug if (info.hit()) { treeBoundBox bb(origin_, origin_+span_); bb.min()[normalDir_] -= 1E-6; bb.max()[normalDir_] += 1E-6; if (!bb.contains(info.hitPoint())) { FatalErrorIn("searchablePlate::findLine(..)") << "bb:" << bb << endl << "origin_:" << origin_ << endl << "span_:" << span_ << endl << "normalDir_:" << normalDir_ << endl << "hitPoint:" << info.hitPoint() << abort(FatalError); } } return info; }
Vector Vector::norm() { return *this / mag(); }
void Foam::directAMI<SourcePatch, TargetPatch>::appendToDirectSeeds ( labelList& mapFlag, labelList& srcTgtSeed, DynamicList<label>& srcSeeds, DynamicList<label>& nonOverlapFaces, label& srcFacei, label& tgtFacei ) const { const labelList& srcNbr = this->srcPatch_.faceFaces()[srcFacei]; const labelList& tgtNbr = this->tgtPatch_.faceFaces()[tgtFacei]; const pointField& srcPoints = this->srcPatch_.points(); const pointField& tgtPoints = this->tgtPatch_.points(); const vectorField& srcCf = this->srcPatch_.faceCentres(); forAll(srcNbr, i) { label srcI = srcNbr[i]; if ((mapFlag[srcI] == 0) && (srcTgtSeed[srcI] == -1)) { // first attempt: match by comparing face centres const face& srcF = this->srcPatch_[srcI]; const point& srcC = srcCf[srcI]; scalar tol = GREAT; forAll(srcF, fpI) { const point& p = srcPoints[srcF[fpI]]; scalar d2 = magSqr(p - srcC); if (d2 < tol) { tol = d2; } } tol = max(SMALL, 0.0001*sqrt(tol)); bool found = false; forAll(tgtNbr, j) { label tgtI = tgtNbr[j]; const face& tgtF = this->tgtPatch_[tgtI]; const point tgtC = tgtF.centre(tgtPoints); if (mag(srcC - tgtC) < tol) { // new match - append to lists found = true; srcTgtSeed[srcI] = tgtI; srcSeeds.append(srcI); break; } } // second attempt: match by shooting a ray into the tgt face if (!found) { const vector srcN = srcF.normal(srcPoints); forAll(tgtNbr, j) { label tgtI = tgtNbr[j]; const face& tgtF = this->tgtPatch_[tgtI]; pointHit ray = tgtF.ray(srcCf[srcI], srcN, tgtPoints); if (ray.hit()) { // new match - append to lists found = true; srcTgtSeed[srcI] = tgtI; srcSeeds.append(srcI); break; } } }
volTensorField Omega(skew(gradU)); // Reynolds stress equation tmp<fvSymmTensorMatrix> REqn ( fvm::ddt(alpha, rho, R) + fvm::div(alphaRhoPhi, R) - fvm::laplacian(alpha*rho*DREff(), R) + fvm::Sp(((C1_/2)*epsilon_ + (C1s_/2)*G)*alpha*rho/k_, R) == alpha*rho*P - ((1.0/3.0)*I)*(((2.0 - C1_)*epsilon_ - C1s_*G)*alpha*rho) + (C2_*(alpha*rho*epsilon_))*dev(innerSqr(b)) + alpha*rho*k_ *( (C3_ - C3s_*mag(b))*dev(S) + C4_*dev(twoSymm(b&S)) + C5_*twoSymm(b&Omega) ) + fvOptions(alpha, rho, R) ); REqn.ref().relax(); fvOptions.constrain(REqn.ref()); solve(REqn); fvOptions.correct(R); this->boundNormalStress(R); k_ = 0.5*tr(R);
int main(int argc, char *argv[]) { timeSelector::addOptions(); # include "addRegionOption.H" argList::addBoolOption ( "noWrite", "suppress writing results" ); #include "addDictOption.H" #include "setRootCase.H" #include "createTime.H" instantList timeDirs = timeSelector::select0(runTime, args); #include "createNamedMesh.H" #include "createFields.H" // Create particle cloud cfdemCloud particleCloud(mesh); // Post-processing dictionary #include "postProcessingDict.H" // Create conditional averaging class conditionalAve condAve(postProcessingDict,conditionalAveragingDict, mesh,nVariable,nAveragingVariable,nTotalCase,conditionalAveraging); // Create multiple variable conditional averaging class multipleVarsConditionalAve multipleVarsCondAve(postProcessingDict,multConditionalAveragingDict, mesh,multNVariable,multNAveragingVariable,multNTotalCase,multConditionalAveraging); forAll(timeDirs, timeI) { runTime.setTime(timeDirs[timeI], timeI); Pout << " " << endl; Pout << "\nTime = " << runTime.timeName() << endl; mesh.readUpdate(); // Read gas volume fraction IOobject voidfractionheader ( "voidfraction", runTime.timeName(), mesh, IOobject::MUST_READ ); Info<< " Reading voidfraction" << endl; volScalarField voidfraction(voidfractionheader,mesh); // Read Eulerian particle velocity IOobject Usheader ( "Us", runTime.timeName(), mesh, IOobject::MUST_READ ); Info<< " Reading Us" << endl; volVectorField Us(Usheader,mesh); // Read particle kinetic stresses IOobject sigmaKinHeader ( "sigmaKin", runTime.timeName(), mesh, IOobject::MUST_READ ); Info<< " Reading sigmaKin" << endl; volSymmTensorField sigmaKin(sigmaKinHeader,mesh); // Read particle collisional stresses IOobject sigmaCollHeader ( "sigmaColl", runTime.timeName(), mesh, IOobject::MUST_READ ); Info<< " Reading sigmaColl" << endl; volSymmTensorField sigmaColl(sigmaCollHeader,mesh); // Particle pressure volScalarField Pp ( IOobject ( "Pp", runTime.timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE ), mesh, dimensionedScalar( "zero", dimensionSet(1,-1,-2,0,0), scalar(0) ) ); Pp = 1./3. * tr( sigmaKin + sigmaColl ) ; // Write into the results folder Pp.write(); // Calculate the particulate pressure gradient volVectorField gradPp(fvc::grad(Pp)); // Particle shear stress volTensorField sigmap ( IOobject ( "sigmap", runTime.timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE ), mesh, dimensionedTensor( "zero", dimensionSet(1,-1,-2,0,0), tensor(0,0,0,0,0,0,0,0,0) ) ); sigmap = ( sigmaKin + sigmaColl ) - tensor(I) * Pp; // Write into the results folder sigmap.write(); // Particle viscosity volScalarField mup ( IOobject ( "mup", runTime.timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE ), mesh, dimensionedScalar( "zero", dimensionSet(1,-1,-1,0,0), scalar(0) ) ); // Particle shear stresses volTensorField S("S",fvc::grad(Us) + fvc::grad(Us)().T()); dimensionedScalar SSsmall("zero", dimensionSet(0,0,-2,0,0,0,0), SMALL); mup = ( sigmap && S ) / ( max ( S && S, SSsmall ) ); // Limit by zero mup.max(0.); // Write into the results folder mup.write(); // Particle viscosity/sqrt(p) dimensionedScalar PpSmall("zero", dimensionSet(1,-1,-2,0,0,0,0), 1.e-06); volScalarField mupSqrtPp ( IOobject ( "mupSqrtPp", runTime.timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE ), mup/sqrt((mag(Pp)+PpSmall)*rhop)/dp ); //- Dummy word word varName(""); //- Conditional averaging condAve.calc(); condAve.write(varName); //- Multi-variable conditional averaging multipleVarsCondAve.calc(); multipleVarsCondAve.write(varName); }
void Foam::dirichletNeumannFriction::correct ( const vectorField& slavePressure, const PrimitivePatch<face, List, pointField>& masterFaceZonePatch, const PrimitivePatch<face, List, pointField>& slaveFaceZonePatch, const intersection::algorithm alg, const intersection::direction dir, const word interpolationMethod, const word fieldName, const Switch orthotropic, const word nonLinear, const vectorField& slaveFaceNormals ) { const fvMesh& mesh = mesh_; const label slavePatchIndex = slavePatchID(); const label masterPatchIndex = masterPatchID(); contactIterNum_++; // we have local masterDU and we want to interpolate it to the slave // to get local masterDUInterpToSlave (i.e. masterDU interpolated // to the slave) // so the method is: // create global masterDU field // interpolate global masterDU from master global face zone to // slave global zone // then find local masterDUInterpToSlave from the global interpolated field vectorField masterDUInterpToSlave (mesh.boundaryMesh()[slavePatchIndex].size(), vector::zero); // global master DU vectorField globalMasterDU(masterFaceZonePatch.size(), vector::zero); // lookup current displacement field const volVectorField& dispField = mesh.objectRegistry::lookupObject<volVectorField>(fieldName); // local master and slave DU increment vectorField masterDU = dispField.boundaryField()[masterPatchIndex]; vectorField slaveDU = dispField.boundaryField()[slavePatchIndex]; if (fieldName == "U") { // lookup old U const volVectorField& dispOldField = mesh.objectRegistry::lookupObject<volVectorField>(fieldName+"_0"); // subtract old U masterDU -= dispOldField.boundaryField()[masterPatchIndex]; slaveDU -= dispOldField.boundaryField()[slavePatchIndex]; } else if (fieldName != "DU") { FatalError << "dirichletNeumannFriction::correct()\n" " The displacement field must be called U or DU" << exit(FatalError); } // put local masterDU into globalMasterDU const label masterPatchStart = mesh.boundaryMesh()[masterPatchIndex].start(); forAll(masterDU, i) { globalMasterDU[ mesh.faceZones()[masterFaceZoneID() ].whichFace(masterPatchStart + i)] = masterDU[i]; } //- exchange parallel data // sum because each face is only on one proc reduce(globalMasterDU, sumOp<vectorField>()); // globalMasterDU is interpolated to the slave vectorField globalMasterDUInterpToSlave (slaveFaceZonePatch.size(), vector::zero); // interpolate DU from master to slave using inverse distance or ggi if (interpolationMethod == "patchToPatch") { PatchToPatchInterpolation< PrimitivePatch< face, List, pointField >, PrimitivePatch<face, List, pointField> > masterToSlavePatchToPatchInterpolator ( masterFaceZonePatch, // from zone slaveFaceZonePatch, // to zone alg, dir ); globalMasterDUInterpToSlave = masterToSlavePatchToPatchInterpolator.faceInterpolate<vector> ( globalMasterDU ); } else if (interpolationMethod == "ggi") { GGIInterpolation< PrimitivePatch< face, List, pointField >, PrimitivePatch< face, List, pointField > > masterToSlaveGgiInterpolator ( masterFaceZonePatch, // master zone slaveFaceZonePatch, // slave zone tensorField(0), tensorField(0), vectorField(0), 0.0, 0.0, true, ggiInterpolation::AABB ); globalMasterDUInterpToSlave = masterToSlaveGgiInterpolator.masterToSlave ( globalMasterDU ); } else { FatalError << "dirichletNeumannFriction::correct()\n" "interpolationMethod " << interpolationMethod << " not known\n" "interpolationMethod must be patchToPatch or ggi" << exit(FatalError); } // now put global back into local const label slavePatchStart = mesh.boundaryMesh()[slavePatchIndex].start(); forAll(masterDUInterpToSlave, i) { masterDUInterpToSlave[i] = globalMasterDUInterpToSlave [ mesh.faceZones()[slaveFaceZoneID()].whichFace(slavePatchStart + i) ]; } // Now masterDUInterpToSlave should have masterDU interpolated to the slave // Calculate current slave shear traction from the normal gradient field const fvPatch& slavePatch = mesh.boundary()[slavePatchIndex]; const fvPatchField<tensor>& gradField = slavePatch.lookupPatchField<volTensorField, tensor> ("grad(" + fieldName + ")"); bool incremental(fieldName == "DU"); vectorField slaveShearTraction ( (I - sqr(slaveFaceNormals)) & tractionBoundaryGradient::traction ( gradField, fieldName, "U", slavePatch, orthotropic, nonLinearGeometry::nonLinearNames_[nonLinear], incremental ) ); // algorithm // if the face pressure is negative/zero then the friction is zero // so set valueFrac to zero and traction to zero // if the face pressure is positive and the shear traction is less // than fricCoeff*pressure then this is a sticking face so // set the valueFrac to (I-n^2) and set the disp to remove any // slip // if the face pressure is positive and the shear traction is greater // than fricCoeff*pressure then this is a slipping face so // set the valueFrac to zero and set the shear traction to // fricCoeff*pressure in the opposite direction to slip // if the shear traction on a slipping face is acting in the same // direction as the slip then this face should not be slipping // so we make it a sticking face // const volVectorField& prevSlaveDispField = // mesh.objectRegistry::lookupObject<volVectorField>(fieldName); const vectorField prevSlaveShearDisp = (I - sqr(slaveFaceNormals)) & dispField.boundaryField()[slavePatchIndex]; label numSlipFaces = 0; label numStickFaces = 0; scalarField& stickSlip = stickSlipFaces(); const scalarField magSlavePressure = mag(slavePressure); scalar maxMagSlavePressure = 0.0; if (slavePressure.size() > 0) maxMagSlavePressure = max(magSlavePressure); reduce(maxMagSlavePressure, maxOp<scalar>()); // slip is the difference between the master tangential DU // and slave tangential DU vectorField slip = (I - sqr(slaveFaceNormals)) & ( slaveDU - masterDUInterpToSlave); // under-relax the slip slip = relaxationFactor_*slip + (1.0 - relaxationFactor_)*oldSlip_; oldSlip_ = slip; vectorField slipDir = slip/(mag(slip)+SMALL); const scalar pressureTol = 1e-3*maxMagSlavePressure; // first we calculate slaveDisp assuming every face is sticking // and we calculate slaveTraction assuming every face is sliding // then we use the slaveValueFrac to set the face to slip/stick // depending on the slip function slaveDisp_ = -slip + prevSlaveShearDisp; //slaveDisp_ = -slip + slaveDisp_; // see if convergence is better scalarField slipTrac = frictionLawPtr_->slipTraction(magSlavePressure); // new forAll(slaveDisp_, facei) { if (mag(slavePressure[facei]) < pressureTol) { // not in contact slaveDisp_[facei] = prevSlaveShearDisp[facei]; slaveTraction_[facei] = vector::zero; slaveValueFrac_[facei] = symmTensor::zero; stickSlip[facei] = -1; } else if ( (mag(slaveShearTraction[facei]) > 0.999*slipTrac[facei]) && ((slip[facei] & slaveShearTraction[facei]) < 0.0) // opposite directions ) { // slip slaveDisp_[facei] = -slip[facei] + prevSlaveShearDisp[facei]; // better convergence slaveTraction_[facei] = -slipDir[facei] * slipTrac[facei]; slaveValueFrac_[facei] = symmTensor::zero; numSlipFaces++; stickSlip[facei] = 0; } else { // stick slaveDisp_[facei] = -slip[facei] + prevSlaveShearDisp[facei]; slaveTraction_[facei] = -slipDir[facei] * slipTrac[facei]; //slaveTraction_[facei] = slaveShearTraction[facei]; //slaveTraction_[facei] = vector::zero; slaveValueFrac_[facei] = (I - sqr(slaveFaceNormals[facei])); numStickFaces++; stickSlip[facei] = 2; } } // slaveTraction_ = -slipDir * slipTrac; // // slipFunc is 1.0 for slip and 0.0 for stick // // scalarField slipFunc = mag(slaveShearTraction) - 0.999*slipTrac; // scalarField slipFunc = mag(slaveShearTraction) - slipTrac; // //slipFunc /= mag(slipFunc+SMALL); // bugfix add SMALL // //slipFunc = max(slipFunc, 0.0); // //const scalar slipStressTol = 1e-4*gMax(mag(slaveShearTraction)); // const scalar pressureTol = 1e-3*maxMagSlavePressure; // forAll(slipFunc, facei) // { // if (slipFunc[facei] > pressureTol) // { // slipFunc[facei] = 1.0; // } // else // { // slipFunc[facei] = 0.0; // } // } // // if the slip and trac are in the same direction then we will change // // the face from slip to stick // // { // // scalarField changeFace = slip & slaveTraction_; // // forAll(changeFace, facei) // // { // // if (changeFace[facei] > SMALL) // // { // // slipFunc[facei] = 0.0; // // } // // } // // } // // stickSlip // // -1 for faces not in contact // // 0 for slipping faces // // 1 for stick faces // stickSlip = (1.0 - slipFunc); // // const scalar pressureTol = 1e-3*maxMagSlavePressure; // forAll(slip, facei) // { // if (magSlavePressure[facei] < pressureTol) // { // stickSlip[facei] = -1; // } // } // //slaveValueFrac_ = (1.0 - slipFunc) * (I - sqr(slaveFaceNormals)); // slaveValueFrac_ = max(stickSlip, 0.0) * (I - sqr(slaveFaceNormals)); // // set valueFace to zero for faces not in contact // // and also reset traction on sticking faces and displacement // // on slipping faces // forAll(slip, facei) // { // if (magSlavePressure[facei] < pressureTol) // { // slaveDisp_[facei] = prevSlaveShearDisp[facei]; // slaveTraction_[facei] = vector::zero; // slaveValueFrac_[facei] = symmTensor::zero; // if ( mag(stickSlip[facei] + 1) > SMALL) // Info << "face " << facei << "changed to tracFree" << endl; // stickSlip[facei] = -1; // } // else if (slipFunc[facei] > SMALL) // { // slaveDisp_[facei] = prevSlaveShearDisp[facei]; // numSlipFaces++; // if ( mag(stickSlip[facei]) > SMALL) // Info << "face " << facei << "changed to slip" << endl; // stickSlip[facei] = 0; // } // else // { // //slaveTraction_[facei] = slaveShearTraction[facei]; // numStickFaces++; // if ( mag(stickSlip[facei] - 1) > SMALL) // Info << "face " << facei << "changed to stick" << endl; // stickSlip[facei] = 1; // } // } // correct oscillations if (oscillationCorr_) { //correctOscillations(slaveFaceZonePatch); // interpolate face values to points then interpolate back // this essentially smooths the field primitivePatchInterpolation localSlaveInterpolator (mesh.boundaryMesh()[slavePatchIndex]); vectorField slaveDispPoints (mesh.boundaryMesh()[slavePatchIndex].nPoints(), vector::zero); for (int i=0; i<smoothingSteps_; i++) { slaveDispPoints = localSlaveInterpolator.faceToPointInterpolate<vector>(slaveDisp_); slaveDisp_ = localSlaveInterpolator.pointToFaceInterpolate<vector> (slaveDispPoints); // make sure no normal component slaveDisp_ = (I - sqr(slaveFaceNormals)) & slaveDisp_; } } // under-relax traction slaveDisp_ = relaxationFactor_*slaveDisp_ + (1.0 - relaxationFactor_)*prevSlaveShearDisp; //oldSlaveDisp_; oldSlaveDisp_ = slaveDisp_; slaveValueFrac_ = relaxationFactor_*slaveValueFrac_ + (1.0 - relaxationFactor_)*oldSlaveValueFrac_; oldSlaveValueFrac_ = slaveValueFrac_; slaveTraction_ = relaxationFactor_*slaveTraction_ + (1.0 - relaxationFactor_)*oldSlaveTraction_; oldSlaveTraction_ = slaveTraction_; stickSlip = relaxationFactor_*stickSlip + (1.0 - relaxationFactor_)*oldStickSlip_; oldStickSlip_ = stickSlip; // get global values // in parallel, the log is poluted with warnings that // I am getting max of a list of size zero so // I will get the max of procs which have some // of the slave faces //scalar maxMagMasterTraction = gMax(mag(slaveTraction_)) scalar maxMagMasterTraction = 0.0; if (slaveTraction_.size() > 0) { maxMagMasterTraction = max(mag(slaveTraction_)); } reduce(maxMagMasterTraction, maxOp<scalar>()); reduce(numSlipFaces, sumOp<int>()); reduce(numStickFaces, sumOp<int>()); // master writes to contact info file if (Pstream::master() && (contactIterNum_ % infoFreq_ == 0)) { OFstream& contactFile = *contactFilePtr_; int width = 20; contactFile << mesh.time().value(); contactFile.width(width); contactFile << contactIterNum_; contactFile.width(width); contactFile << relaxationFactor_; contactFile.width(width); contactFile << numSlipFaces; contactFile.width(width); contactFile << numStickFaces; contactFile.width(width); contactFile << maxMagMasterTraction << endl; } }
void Foam::timeVaryingMappedFixedValuePointPatchField<Type>::updateCoeffs() { if (this->updated()) { return; } checkTable(); // Interpolate between the sampled data Type wantedAverage; if (endSampleTime_ == -1) { // only start value if (debug) { Pout<< "updateCoeffs : Sampled, non-interpolated values" << " from start time:" << sampleTimes_[startSampleTime_].name() << nl; } this->operator==(startSampledValues_); wantedAverage = startAverage_; } else { scalar start = sampleTimes_[startSampleTime_].value(); scalar end = sampleTimes_[endSampleTime_].value(); scalar s = (this->db().time().value()-start)/(end-start); if (debug) { Pout<< "updateCoeffs : Sampled, interpolated values" << " between start time:" << sampleTimes_[startSampleTime_].name() << " and end time:" << sampleTimes_[endSampleTime_].name() << " with weight:" << s << endl; } this->operator==((1-s)*startSampledValues_ + s*endSampledValues_); wantedAverage = (1-s)*startAverage_ + s*endAverage_; } // Enforce average. Either by scaling (if scaling factor > 0.5) or by // offsetting. if (setAverage_) { const Field<Type>& fld = *this; Type averagePsi = gAverage(fld); if (debug) { Pout<< "updateCoeffs :" << " actual average:" << averagePsi << " wanted average:" << wantedAverage << endl; } if (mag(averagePsi) < VSMALL) { // Field too small to scale. Offset instead. const Type offset = wantedAverage - averagePsi; if (debug) { Pout<< "updateCoeffs :" << " offsetting with:" << offset << endl; } this->operator==(fld+offset); } else { const scalar scale = mag(wantedAverage)/mag(averagePsi); if (debug) { Pout<< "updateCoeffs :" << " scaling with:" << scale << endl; } this->operator==(scale*fld); } } if (debug) { Pout<< "updateCoeffs : set fixedValue to min:" << gMin(*this) << " max:" << gMax(*this) << endl; } fixedValuePointPatchField<Type>::updateCoeffs(); }
// Cutting line of two planes Foam::plane::ray Foam::plane::planeIntersect(const plane& plane2) const { // Mathworld plane-plane intersection. Assume there is a point on the // intersection line with z=0 and solve the two plane equations // for that (now 2x2 equation in x and y) // Better: use either z=0 or x=0 or y=0. const vector& n1 = normal(); const vector& n2 = plane2.normal(); const point& p1 = refPoint(); const point& p2 = plane2.refPoint(); scalar n1p1 = n1&p1; scalar n2p2 = n2&p2; vector dir = n1 ^ n2; // Determine zeroed out direction (can be x,y or z) by looking at which // has the largest component in dir. scalar magX = mag(dir.x()); scalar magY = mag(dir.y()); scalar magZ = mag(dir.z()); direction iZero, i1, i2; if (magX > magY) { if (magX > magZ) { iZero = 0; i1 = 1; i2 = 2; } else { iZero = 2; i1 = 0; i2 = 1; } } else { if (magY > magZ) { iZero = 1; i1 = 2; i2 = 0; } else { iZero = 2; i1 = 0; i2 = 1; } } vector pt; pt[iZero] = 0; pt[i1] = (n2[i2]*n1p1 - n1[i2]*n2p2) / (n1[i1]*n2[i2] - n2[i1]*n1[i2]); pt[i2] = (n2[i1]*n1p1 - n1[i1]*n2p2) / (n1[i2]*n2[i1] - n1[i1]*n2[i2]); return ray(pt, dir); }
void Foam::processorPolyPatch::calcGeometry(PstreamBuffers& pBufs) { if (Pstream::parRun()) { { UIPstream fromNeighbProc(neighbProcNo(), pBufs); fromNeighbProc >> neighbFaceCentres_ >> neighbFaceAreas_ >> neighbFaceCellCentres_; } // My normals vectorField faceNormals(size()); // Neighbour normals vectorField nbrFaceNormals(neighbFaceAreas_.size()); // Face match tolerances scalarField tols = calcFaceTol(*this, points(), faceCentres()); // Calculate normals from areas and check forAll(faceNormals, facei) { scalar magSf = mag(faceAreas()[facei]); scalar nbrMagSf = mag(neighbFaceAreas_[facei]); scalar avSf = (magSf + nbrMagSf)/2.0; // For small face area calculation the results of the area // calculation have been found to only be accurate to ~1e-20 if (magSf < SMALL || nbrMagSf < SMALL) { // Undetermined normal. Use dummy normal to force separation // check. faceNormals[facei] = point(1, 0, 0); nbrFaceNormals[facei] = -faceNormals[facei]; tols[facei] = GREAT; } else if (mag(magSf - nbrMagSf) > matchTolerance()*sqr(tols[facei])) { fileName nm ( boundaryMesh().mesh().time().path() /name()+"_faces.obj" ); Pout<< "processorPolyPatch::calcGeometry : Writing my " << size() << " faces to OBJ file " << nm << endl; writeOBJ(nm, *this, points()); OFstream ccStr ( boundaryMesh().mesh().time().path() /name() + "_faceCentresConnections.obj" ); Pout<< "processorPolyPatch::calcGeometry :" << " Dumping cell centre lines between" << " corresponding face centres to OBJ file" << ccStr.name() << endl; label vertI = 0; forAll(faceCentres(), facej) { const point& c0 = neighbFaceCentres_[facej]; const point& c1 = faceCentres()[facej]; writeOBJ(ccStr, c0, c1, vertI); } FatalErrorInFunction << "face " << facei << " area does not match neighbour by " << 100*mag(magSf - nbrMagSf)/avSf << "% -- possible face ordering problem." << endl << "patch:" << name() << " my area:" << magSf << " neighbour area:" << nbrMagSf << " matching tolerance:" << matchTolerance()*sqr(tols[facei]) << endl << "Mesh face:" << start()+facei << " vertices:" << UIndirectList<point>(points(), operator[](facei))() << endl << "If you are certain your matching is correct" << " you can increase the 'matchTolerance' setting" << " in the patch dictionary in the boundary file." << endl << "Rerun with processor debug flag set for" << " more information." << exit(FatalError); } else {
void KochHillDrag::setForce() const { if (scaleDia_ > 1) Info << "KochHill using scale = " << scaleDia_ << endl; else if (particleCloud_.cg() > 1){ scaleDia_=particleCloud_.cg(); Info << "KochHill using scale from liggghts cg = " << scaleDia_ << endl; } // get viscosity field #ifdef comp const volScalarField nufField = particleCloud_.turbulence().mu()/rho_; #else const volScalarField& nufField = particleCloud_.turbulence().nu(); #endif vector position(0,0,0); scalar voidfraction(1); vector Ufluid(0,0,0); vector drag(0,0,0); label cellI=0; vector Us(0,0,0); vector Ur(0,0,0); scalar ds(0); scalar nuf(0); scalar rho(0); scalar magUr(0); scalar Rep(0); scalar Vs(0); scalar volumefraction(0); scalar betaP(0); interpolationCellPoint<scalar> voidfractionInterpolator_(voidfraction_); interpolationCellPoint<vector> UInterpolator_(U_); #include "setupProbeModel.H" for(int index = 0;index < particleCloud_.numberOfParticles(); index++) { //if(mask[index][0]) //{ cellI = particleCloud_.cellIDs()[index][0]; drag = vector(0,0,0); betaP = 0; Vs = 0; Ufluid =vector(0,0,0); voidfraction=0; if (cellI > -1) // particle Found { if(interpolation_) { position = particleCloud_.position(index); voidfraction = voidfractionInterpolator_.interpolate(position,cellI); Ufluid = UInterpolator_.interpolate(position,cellI); //Ensure interpolated void fraction to be meaningful // Info << " --> voidfraction: " << voidfraction << endl; if(voidfraction>1.00) voidfraction = 1.00; if(voidfraction<0.40) voidfraction = 0.40; }else { voidfraction = voidfraction_[cellI]; Ufluid = U_[cellI]; } Us = particleCloud_.velocity(index); Ur = Ufluid-Us; ds = particleCloud_.d(index); nuf = nufField[cellI]; rho = rho_[cellI]; magUr = mag(Ur); Rep = 0; Vs = ds*ds*ds*M_PI/6; volumefraction = 1-voidfraction+SMALL; if (magUr > 0) { // calc particle Re Nr Rep = ds/scaleDia_*voidfraction*magUr/(nuf+SMALL); // calc model coefficient F0 scalar F0=0.; if(volumefraction < 0.4) { F0 = (1+3*sqrt((volumefraction)/2)+135/64*volumefraction*log(volumefraction) +16.14*volumefraction )/ (1+0.681*volumefraction-8.48*sqr(volumefraction) +8.16*volumefraction*volumefraction*volumefraction ); } else { F0 = 10*volumefraction/(voidfraction*voidfraction*voidfraction); } // calc model coefficient F3 scalar F3 = 0.0673+0.212*volumefraction+0.0232/pow(voidfraction,5); //Calculate F scalar F = voidfraction * (F0 + 0.5*F3*Rep); // calc drag model coefficient betaP betaP = 18.*nuf*rho/(ds/scaleDia_*ds/scaleDia_)*voidfraction*F; // calc particle's drag drag = Vs*betaP*Ur*scaleDrag_; if (modelType_=="B") drag /= voidfraction; } if(verbose_ && index >=0 && index <2) { Pout << "cellI = " << cellI << endl; Pout << "index = " << index << endl; Pout << "Us = " << Us << endl; Pout << "Ur = " << Ur << endl; Pout << "ds = " << ds << endl; Pout << "ds/scale = " << ds/scaleDia_ << endl; Pout << "rho = " << rho << endl; Pout << "nuf = " << nuf << endl; Pout << "voidfraction = " << voidfraction << endl; Pout << "Rep = " << Rep << endl; Pout << "betaP = " << betaP << endl; Pout << "drag = " << drag << endl; } //Set value fields and write the probe if(probeIt_) { #include "setupProbeModelfields.H" vValues.append(drag); //first entry must the be the force vValues.append(Ur); sValues.append(Rep); sValues.append(betaP); sValues.append(voidfraction); particleCloud_.probeM().writeProbe(index, sValues, vValues); } } // set force on particle if(treatExplicit_) for(int j=0;j<3;j++) expForces()[index][j] += drag[j]; else for(int j=0;j<3;j++) impForces()[index][j] += drag[j]; // set Cd if(implDEM_) { for(int j=0;j<3;j++) fluidVel()[index][j]=Ufluid[j]; if (modelType_=="B" && cellI > -1) Cds()[index][0] = Vs*betaP/voidfraction*scaleDrag_; else Cds()[index][0] = Vs*betaP*scaleDrag_; }else{ for(int j=0;j<3;j++) DEMForces()[index][j] += drag[j]; } //} } }
Foam::extendedFeatureEdgeMesh::extendedFeatureEdgeMesh(const IOobject& io) : regIOobject(io), edgeMesh(pointField(0), edgeList(0)), concaveStart_(0), mixedStart_(0), nonFeatureStart_(0), internalStart_(0), flatStart_(0), openStart_(0), multipleStart_(0), normals_(0), edgeDirections_(0), edgeNormals_(0), featurePointNormals_(0), featurePointEdges_(0), regionEdges_(0), pointTree_(), edgeTree_(), edgeTreesByType_() { if ( io.readOpt() == IOobject::MUST_READ || io.readOpt() == IOobject::MUST_READ_IF_MODIFIED || (io.readOpt() == IOobject::READ_IF_PRESENT && headerOk()) ) { if (readOpt() == IOobject::MUST_READ_IF_MODIFIED) { WarningIn ( "extendedFeatureEdgeMesh::extendedFeatureEdgeMesh" "(const IOobject&)" ) << "Specified IOobject::MUST_READ_IF_MODIFIED but class" << " does not support automatic rereading." << endl; } Istream& is = readStream(typeName); is >> *this >> concaveStart_ >> mixedStart_ >> nonFeatureStart_ >> internalStart_ >> flatStart_ >> openStart_ >> multipleStart_ >> normals_ >> edgeNormals_ >> featurePointNormals_ >> featurePointEdges_ >> regionEdges_; close(); { // Calculate edgeDirections const edgeList& eds(edges()); const pointField& pts(points()); edgeDirections_.setSize(eds.size()); forAll(eds, eI) { edgeDirections_[eI] = eds[eI].vec(pts); } edgeDirections_ /= mag(edgeDirections_); } }
void parcel::setRelaxationTimes ( label celli, scalar& tauMomentum, scalarField& tauEvaporation, scalar& tauHeatTransfer, scalarField& tauBoiling, const spray& sDB, const scalar rho, const vector& Up, const scalar temperature, const scalar pressure, const scalarField& Yfg, const scalarField& m0, const scalar dt ) { const liquidMixture& fuels = sDB.fuels(); scalar mCell = rho*sDB.mesh().V()[cell()]; scalarField mfg(Yfg*mCell); label Ns = sDB.composition().Y().size(); label Nf = fuels.components().size(); // Tf is based on the 1/3 rule scalar Tf = T() + (temperature - T())/3.0; // calculate mixture properties scalar W = 0.0; scalar kMixture = 0.0; scalar cpMixture = 0.0; scalar muf = 0.0; for(label i=0; i<Ns; i++) { scalar Y = sDB.composition().Y()[i][celli]; W += Y/sDB.gasProperties()[i].W(); // Using mass-fractions to average... kMixture += Y*sDB.gasProperties()[i].kappa(Tf); cpMixture += Y*sDB.gasProperties()[i].Cp(Tf); muf += Y*sDB.gasProperties()[i].mu(Tf); } W = 1.0/W; scalarField Xf(Nf, 0.0); scalarField Yf(Nf, 0.0); scalarField psat(Nf, 0.0); scalarField msat(Nf, 0.0); for(label i=0; i<Nf; i++) { label j = sDB.liquidToGasIndex()[i]; scalar Y = sDB.composition().Y()[j][celli]; scalar Wi = sDB.gasProperties()[j].W(); Yf[i] = Y; Xf[i] = Y*W/Wi; psat[i] = fuels.properties()[i].pv(pressure, temperature); msat[i] = min(1.0, psat[i]/pressure)*Wi/W; } scalar nuf = muf/rho; scalar liquidDensity = fuels.rho(pressure, T(), X()); scalar liquidcL = fuels.cp(pressure, T(), X()); scalar heatOfVapour = fuels.hl(pressure, T(), X()); // calculate the partial rho of the fuel vapour // alternative is to use the mass fraction // however, if rhoFuelVap is small (zero) // d(mass)/dt = 0 => no evaporation... hmmm... is that good? NO! // Assume equilibrium at drop-surface => pressure @ surface // = vapour pressure to calculate fuel-vapour density @ surface scalar pressureAtSurface = fuels.pv(pressure, T(), X()); scalar rhoFuelVap = pressureAtSurface*fuels.W(X())/(specie::RR*Tf); scalarField Xs(sDB.fuels().Xs(pressure, temperature, T(), Xf, X())); scalarField Ys(Nf, 0.0); scalar Wliq = 0.0; for(label i=0; i<Nf; i++) { label j = sDB.liquidToGasIndex()[i]; scalar Wi = sDB.gasProperties()[j].W(); Wliq += Xs[i]*Wi; } for(label i=0; i<Nf; i++) { label j = sDB.liquidToGasIndex()[i]; scalar Wi = sDB.gasProperties()[j].W(); Ys[i] = Xs[i]*Wi/Wliq; } scalar Reynolds = Re(Up, nuf); scalar Prandtl = Pr(cpMixture, muf, kMixture); // calculate the characteritic times if(liquidCore_> 0.5) { // no drag for parcels in the liquid core.. tauMomentum = GREAT; } else { tauMomentum = sDB.drag().relaxationTime ( Urel(Up), d(), rho, liquidDensity, nuf, dev() ); } // store the relaxationTime since it is needed in some breakup models. tMom_ = tauMomentum; tauHeatTransfer = sDB.heatTransfer().relaxationTime ( liquidDensity, d(), liquidcL, kMixture, Reynolds, Prandtl ); // evaporation-properties are evaluated at averaged temperature // set the boiling conditions true if pressure @ surface is 99.9% // of the pressure // this is mainly to put a limit on the evaporation time, // since tauEvaporation is very very small close to the boiling point. for(label i=0; i<Nf; i++) { scalar Td = min(T(), 0.999*fuels.properties()[i].Tc()); bool boiling = fuels.properties()[i].pv(pressure, Td) >= 0.999*pressure; scalar Di = fuels.properties()[i].D(pressure, Td); scalar Schmidt = Sc(nuf, Di); scalar partialPressure = Xf[i]*pressure; // saturated vapour if(partialPressure > psat[i]) { tauEvaporation[i] = GREAT; } // not saturated vapour else { if (!boiling) { // For saturation evaporation, only use 99.99% for // numerical robustness scalar dm = max(SMALL, 0.9999*msat[i] - mfg[i]); tauEvaporation[i] = sDB.evaporation().relaxationTime ( d(), fuels.properties()[i].rho(pressure, Td), rhoFuelVap, Di, Reynolds, Schmidt, Xs[i], Xf[i], m0[i], dm, dt ); } else { scalar Nusselt = sDB.heatTransfer().Nu(Reynolds, Prandtl); // calculating the boiling temperature of the liquid at ambient pressure scalar tBoilingSurface = Td; label Niter = 0; scalar deltaT = 10.0; scalar dp0 = fuels.properties()[i].pv(pressure, tBoilingSurface) - pressure; while ((Niter < 200) && (mag(deltaT) > 1.0e-3)) { Niter++; scalar pBoil = fuels.properties()[i].pv(pressure, tBoilingSurface); scalar dp = pBoil - pressure; if ( (dp > 0.0) && (dp0 > 0.0) ) { tBoilingSurface -= deltaT; } else { if ( (dp < 0.0) && (dp0 < 0.0) ) { tBoilingSurface += deltaT; } else { deltaT *= 0.5; if ( (dp > 0.0) && (dp0 < 0.0) ) { tBoilingSurface -= deltaT; } else { tBoilingSurface += deltaT; } } } dp0 = dp; } scalar vapourSurfaceEnthalpy = 0.0; scalar vapourFarEnthalpy = 0.0; for(label k = 0; k < sDB.gasProperties().size(); k++) { vapourSurfaceEnthalpy += sDB.composition().Y()[k][celli]*sDB.gasProperties()[k].H(tBoilingSurface); vapourFarEnthalpy += sDB.composition().Y()[k][celli]*sDB.gasProperties()[k].H(temperature); } scalar kLiquid = fuels.properties()[i].K(pressure, 0.5*(tBoilingSurface+T())); tauBoiling[i] = sDB.evaporation().boilingTime ( fuels.properties()[i].rho(pressure, Td), fuels.properties()[i].cp(pressure, Td), heatOfVapour, kMixture, Nusselt, temperature - T(), d(), liquidCore(), sDB.runTime().value() - ct(), Td, tBoilingSurface, vapourSurfaceEnthalpy, vapourFarEnthalpy, cpMixture, temperature, kLiquid ); } } } }
void Clef::layout1() { qreal smag = _small ? score()->style(ST_smallClefMag).toDouble() : 1.0; qreal _spatium = spatium(); qreal msp = _spatium * smag; qreal yoff = 0.0; qDeleteAll(elements); elements.clear(); Symbol* symbol = new Symbol(score()); switch (curClefType) { case CLEF_G: // G clef on 2nd line symbol->setSym(trebleclefSym); yoff = 3.0 * curLineDist; break; case CLEF_G1: // G clef 8va on 2nd line { symbol->setSym(trebleclefSym); yoff = 3.0 * curLineDist; Symbol* number = new Symbol(score()); number->setMag(smag); number->setSym(clefEightSym); addElement(number, 1.0 * msp, -5.0 * msp + yoff * _spatium); } break; case CLEF_G2: // G clef 15ma on 2nd line { symbol->setSym(trebleclefSym); yoff = 3.0 * curLineDist; Symbol* number = new Symbol(score()); symbol->setMag(smag); number->setSym(clefOneSym); addElement(number, .6 * msp, -5.0 * msp + yoff * _spatium); number = new Symbol(score()); number->setSym(clefFiveSym); addElement(number, 1.4 * msp, -5.0 * msp + yoff * _spatium); } break; case CLEF_G3: // G clef 8va bassa on 2nd line { symbol->setSym(trebleclefSym); yoff = 3.0 * curLineDist; Symbol* number = new Symbol(score()); symbol->setMag(smag); number->setSym(clefEightSym); addElement(number, 1.0 * msp, 4.0 * msp + yoff * _spatium); } break; case CLEF_F: // F clef on penultimate line symbol->setSym(bassclefSym); yoff = 1.0 * curLineDist; break; case CLEF_F8: // F clef 8va bassa on penultimate line { symbol->setSym(bassclefSym); yoff = 1.0 * curLineDist; Symbol* number = new Symbol(score()); symbol->setMag(smag); number->setSym(clefEightSym); addElement(number, .5* msp, 4.5 * msp + yoff * _spatium); } break; case CLEF_F15: // F clef 15ma bassa on penultimate line { symbol->setSym(bassclefSym); yoff = 1.0 * curLineDist; Symbol* number = new Symbol(score()); symbol->setMag(smag); number->setSym(clefOneSym); addElement(number, .3* msp, 4.5 * msp + yoff * _spatium); number = new Symbol(score()); number->setSym(clefFiveSym); addElement(number, 1.1 * msp, 4.5 * msp + yoff * _spatium); } break; case CLEF_F_B: // baritone clef symbol->setSym(bassclefSym); yoff = 2.0 * curLineDist; break; case CLEF_F_C: // subbass clef symbol->setSym(bassclefSym); yoff = 0.0; break; case CLEF_C1: // C clef in 1st line symbol->setSym(altoclefSym); yoff = 4.0 * curLineDist; break; case CLEF_C2: // C clef on 2nd line symbol->setSym(altoclefSym); yoff = 3.0 * curLineDist; break; case CLEF_C3: // C clef in 3rd line symbol->setSym(altoclefSym); yoff = 2.0 * curLineDist; break; case CLEF_C4: // C clef on 4th line symbol->setSym(altoclefSym); yoff = 1.0 * curLineDist; break; case CLEF_C5: // C clef on 5th line symbol->setSym(altoclefSym); yoff = 0.0; break; case CLEF_TAB: // TAB clef symbol->setSym(tabclefSym); // on tablature, position clef at half the number of spaces * line distance yoff = curLineDist * (curLines - 1) * .5; break; // TAB clef alternate style case CLEF_TAB2: symbol->setSym(tabclef2Sym); // on tablature, position clef at half the number of spaces * line distance yoff = curLineDist * (curLines - 1) * .5; break; case CLEF_PERC: // percussion clefs case CLEF_PERC2: symbol->setSym(percussionclefSym); yoff = curLineDist * (curLines - 1) * 0.5; break; case CLEF_G4: // G clef in 1st line symbol->setSym(trebleclefSym); yoff = 4.0 * curLineDist; break; case CLEF_F_8VA: // F clef 8va on penultimate line { symbol->setSym(bassclefSym); yoff = 1.0 * curLineDist; Symbol* number = new Symbol(score()); number->setMag(smag); number->setSym(clefEightSym); addElement(number, .5 * msp, -1.5 * msp + yoff * _spatium); } break; case CLEF_F_15MA: // F clef 15ma on penultimate line { symbol->setSym(bassclefSym); yoff = 1.0 * curLineDist; Symbol* number = new Symbol(score()); symbol->setMag(smag); number->setSym(clefOneSym); addElement(number, .3* msp, -1.5 * msp + yoff * _spatium); number = new Symbol(score()); number->setSym(clefFiveSym); addElement(number, 1.1 * msp, -1.5 * msp + yoff * _spatium); } break; case CLEF_INVALID: case CLEF_MAX: return; } symbol->setMag(smag * mag()); symbol->layout(); addElement(symbol, .0, yoff * _spatium); setbbox(QRectF()); for (auto i = elements.begin(); i != elements.end(); ++i) { Element* e = *i; e->setColor(curColor()); addbbox(e->bbox().translated(e->pos())); e->setSelected(selected()); } }
Foam::lduSolverPerformance Foam::PCG::solve ( scalarField& x, const scalarField& b, const direction cmpt ) const { // --- Setup class containing solver performance data lduSolverPerformance solverPerf ( lduMatrix::preconditioner::getName(dict()) + typeName, fieldName() ); register label nCells = x.size(); scalar* __restrict__ xPtr = x.begin(); scalarField pA(nCells); scalar* __restrict__ pAPtr = pA.begin(); scalarField wA(nCells); scalar* __restrict__ wAPtr = wA.begin(); // Calculate A.x matrix_.Amul(wA, x, coupleBouCoeffs_, interfaces_, cmpt); // Calculate initial residual field scalarField rA(b - wA); scalar* __restrict__ rAPtr = rA.begin(); // Calculate normalisation factor scalar normFactor = this->normFactor(x, b, wA, pA, cmpt); if (lduMatrix::debug >= 2) { Info<< " Normalisation factor = " << normFactor << endl; } // Calculate normalised residual norm solverPerf.initialResidual() = gSumMag(rA)/normFactor; solverPerf.finalResidual() = solverPerf.initialResidual(); // Check convergence, solve if not converged if (!stop(solverPerf)) { scalar wArA = matrix_.great_; scalar wArAold = wArA; // Select and construct the preconditioner autoPtr<lduPreconditioner> preconPtr; preconPtr = lduPreconditioner::New ( matrix_, coupleBouCoeffs_, coupleIntCoeffs_, interfaces_, dict() ); // Solver iteration do { // Store previous wArA wArAold = wArA; // Precondition residual preconPtr->precondition(wA, rA, cmpt); // Update search directions: wArA = gSumProd(wA, rA); if (solverPerf.nIterations() == 0) { for (register label cell=0; cell<nCells; cell++) { pAPtr[cell] = wAPtr[cell]; } } else { scalar beta = wArA/wArAold; for (register label cell=0; cell<nCells; cell++) { pAPtr[cell] = wAPtr[cell] + beta*pAPtr[cell]; } } // Update preconditioned residual matrix_.Amul(wA, pA, coupleBouCoeffs_, interfaces_, cmpt); scalar wApA = gSumProd(wA, pA); // Test for singularity if (solverPerf.checkSingularity(mag(wApA)/normFactor)) break; // Update solution and residual: scalar alpha = wArA/wApA; for (register label cell=0; cell<nCells; cell++) { xPtr[cell] += alpha*pAPtr[cell]; rAPtr[cell] -= alpha*wAPtr[cell]; } solverPerf.finalResidual() = gSumMag(rA)/normFactor; solverPerf.nIterations()++; } while (!stop(solverPerf)); } return solverPerf; }
void Foam::MULES::implicitSolve ( const RhoType& rho, volScalarField& psi, const surfaceScalarField& phi, surfaceScalarField& phiPsi, const SpType& Sp, const SuType& Su, const scalar psiMax, const scalar psiMin ) { const fvMesh& mesh = psi.mesh(); const dictionary& MULEScontrols = mesh.solverDict(psi.name()); label maxIter ( readLabel(MULEScontrols.lookup("maxIter")) ); label nLimiterIter ( readLabel(MULEScontrols.lookup("nLimiterIter")) ); scalar maxUnboundedness ( readScalar(MULEScontrols.lookup("maxUnboundedness")) ); scalar CoCoeff ( readScalar(MULEScontrols.lookup("CoCoeff")) ); scalarField allCoLambda(mesh.nFaces()); { slicedSurfaceScalarField CoLambda ( IOobject ( "CoLambda", mesh.time().timeName(), mesh, IOobject::NO_READ, IOobject::NO_WRITE, false ), mesh, dimless, allCoLambda, false // Use slices for the couples ); if (phi.dimensions() == dimDensity*dimVelocity*dimArea) { tmp<surfaceScalarField> Cof = mesh.time().deltaT()*mesh.surfaceInterpolation::deltaCoeffs() *mag(phi/interpolate(rho))/mesh.magSf(); CoLambda == 1.0/max(CoCoeff*Cof, scalar(1)); } else { tmp<surfaceScalarField> Cof = mesh.time().deltaT()*mesh.surfaceInterpolation::deltaCoeffs() *mag(phi)/mesh.magSf(); CoLambda == 1.0/max(CoCoeff*Cof, scalar(1)); } } scalarField allLambda(allCoLambda); //scalarField allLambda(mesh.nFaces(), 1.0); slicedSurfaceScalarField lambda ( IOobject ( "lambda", mesh.time().timeName(), mesh, IOobject::NO_READ, IOobject::NO_WRITE, false ), mesh, dimless, allLambda, false // Use slices for the couples ); linear<scalar> CDs(mesh); upwind<scalar> UDs(mesh, phi); //fv::uncorrectedSnGrad<scalar> snGrads(mesh); fvScalarMatrix psiConvectionDiffusion ( fvm::ddt(rho, psi) + fv::gaussConvectionScheme<scalar>(mesh, phi, UDs).fvmDiv(phi, psi) //- fv::gaussLaplacianScheme<scalar, scalar>(mesh, CDs, snGrads) //.fvmLaplacian(Dpsif, psi) - fvm::Sp(Sp, psi) - Su ); surfaceScalarField phiBD(psiConvectionDiffusion.flux()); surfaceScalarField& phiCorr = phiPsi; phiCorr -= phiBD; for (label i=0; i<maxIter; i++) { if (i != 0 && i < 4) { allLambda = allCoLambda; } limiter ( allLambda, rho, psi, phiBD, phiCorr, Sp, Su, psiMax, psiMin, nLimiterIter ); solve ( psiConvectionDiffusion + fvc::div(lambda*phiCorr), MULEScontrols ); scalar maxPsiM1 = gMax(psi.internalField()) - 1.0; scalar minPsi = gMin(psi.internalField()); scalar unboundedness = max(max(maxPsiM1, 0.0), -min(minPsi, 0.0)); if (unboundedness < maxUnboundedness) { break; } else { Info<< "MULES: max(" << psi.name() << " - 1) = " << maxPsiM1 << " min(" << psi.name() << ") = " << minPsi << endl; phiBD = psiConvectionDiffusion.flux(); /* word gammaScheme("div(phi,gamma)"); word gammarScheme("div(phirb,gamma)"); const surfaceScalarField& phir = mesh.lookupObject<surfaceScalarField>("phir"); phiCorr = fvc::flux ( phi, psi, gammaScheme ) + fvc::flux ( -fvc::flux(-phir, scalar(1) - psi, gammarScheme), psi, gammarScheme ) - phiBD; */ } } phiPsi = psiConvectionDiffusion.flux() + lambda*phiCorr; }
int calc_angles(binary * b) { double x,y,z,vx,vy,vz,r,v,k,h[3],R[3],V[3],e_vec[3],vxh1[3],vxh2[3],R_temp[3],theta,edotr,ip; double i,z_hat[3] = {0,0,1}, ea, lan_calc, aop_calc, n[3]; double beta; int ret=0; //First translate to mass 1 rest frame x = b->x2[0] - b->x1[0]; y = b->x2[1] - b->x1[1]; z = b->x2[2] - b->x1[2]; vx = b->v2[0] - b->v1[0]; vy = b->v2[1] - b->v1[1]; vz = b->v2[2] - b->v1[2]; r = sqrt(x*x + y*y + z*z); v = sqrt(vx*vx + vy*vy + vz*vz); k = G*b->mass1 + G*b->mass2; //here k is the standard gravitational parameter //put together vectors of r and v R[0] = x; R[1] = y; R[2] = z; V[0] = vx; V[1] = vy; V[2] = vz; //calculate specific angular momentum vector h cross_prod(R,V,h); //calculate the eccentricity vector cross_prod(V,h,vxh1); vect_mult_scalar(vxh1, 1/k, vxh2, 3); vect_mult_scalar(R,-1/r,R_temp,3); vect_add(vxh2,R_temp,e_vec,3); //find true anomaly from r edotr = dot_prod(e_vec,R,3); theta = acos(edotr / ( mag(e_vec,3) * mag(R,3) ) ); if(dot_prod(R,V,3) < 0) { theta = 2*PI - theta; ret = 1; } b->true_anomaly = theta; i = acos(h[2]/mag(h,3)); //calculate n, unit normal vector vect_mult_scalar(h,1/mag(h,3),n,3); lan_calc = atan2(n[0],-n[1]); lan_calc = fix_angle(lan_calc); //keep angles in range 0 to 2*pi if(i==0) //zero inclination makes lan meaningless. Keep it at zero lan_calc = 0; beta = atan2(-cos(i) * sin(lan_calc) * x + cos(i) * cos(lan_calc) * y + sin(i) * z , cos(lan_calc) * x + sin(lan_calc) * y); beta = fix_angle(beta); //keep angles in range 0 to 2*pi aop_calc = PI + beta - theta; aop_calc = fix_angle(aop_calc); //keep angles in range 0 to 2*pi ea = acos((mag(e_vec,3) + cos(theta)) / (1.0 + mag(e_vec,3)*cos(theta))); b->i_a = i; b->la_n = lan_calc; b->ao_p = aop_calc; return ret; }
void surfaceSlipDisplacementPointPatchVectorField::evaluate ( const Pstream::commsTypes commsType ) { const polyMesh& mesh = patch().boundaryMesh().mesh()(); //const scalar deltaT = mesh.time().deltaT().value(); // Construct large enough vector in direction of projectDir so // we're guaranteed to hit something. const scalar projectLen = mag(mesh.bounds().max()-mesh.bounds().min()); // For case of fixed projection vector: vector projectVec; if (projectMode_ == FIXEDNORMAL) { vector n = projectDir_/mag(projectDir_); projectVec = projectLen*n; } //- Per point projection vector: const pointField& localPoints = patch().localPoints(); const labelList& meshPoints = patch().meshPoints(); vectorField displacement(this->patchInternalField()); // Get fixed points (bit of a hack) const pointZone* zonePtr = NULL; if (frozenPointsZone_.size() > 0) { const pointZoneMesh& pZones = mesh.pointZones(); zonePtr = &pZones[pZones.findZoneID(frozenPointsZone_)]; Pout<< "surfaceSlipDisplacementPointPatchVectorField : Fixing all " << zonePtr->size() << " points in pointZone " << zonePtr->name() << endl; } // Get the starting locations from the motionSolver const displacementLaplacianFvMotionSolver& motionSolver = mesh.lookupObject<displacementLaplacianFvMotionSolver> ( "dynamicMeshDict" ); const pointField& points0 = motionSolver.points0(); //XXXXXX pointField start(meshPoints.size()); forAll(start, i) { start[i] = points0[meshPoints[i]] + displacement[i]; }
void Foam::threePhaseInterfaceProperties::correctContactAngle ( surfaceVectorField::GeometricBoundaryField& nHatb ) const { const volScalarField::GeometricBoundaryField& alpha1 = mixture_.alpha1().boundaryField(); const volScalarField::GeometricBoundaryField& alpha2 = mixture_.alpha2().boundaryField(); const volScalarField::GeometricBoundaryField& alpha3 = mixture_.alpha3().boundaryField(); const volVectorField::GeometricBoundaryField& U = mixture_.U().boundaryField(); const fvMesh& mesh = mixture_.U().mesh(); const fvBoundaryMesh& boundary = mesh.boundary(); forAll(boundary, patchi) { if (isA<alphaContactAngleFvPatchScalarField>(alpha1[patchi])) { const alphaContactAngleFvPatchScalarField& a2cap = refCast<const alphaContactAngleFvPatchScalarField> (alpha2[patchi]); const alphaContactAngleFvPatchScalarField& a3cap = refCast<const alphaContactAngleFvPatchScalarField> (alpha3[patchi]); scalarField twoPhaseAlpha2(max(a2cap, scalar(0))); scalarField twoPhaseAlpha3(max(a3cap, scalar(0))); scalarField sumTwoPhaseAlpha ( twoPhaseAlpha2 + twoPhaseAlpha3 + SMALL ); twoPhaseAlpha2 /= sumTwoPhaseAlpha; twoPhaseAlpha3 /= sumTwoPhaseAlpha; fvsPatchVectorField& nHatp = nHatb[patchi]; scalarField theta ( convertToRad * ( twoPhaseAlpha2*(180 - a2cap.theta(U[patchi], nHatp)) + twoPhaseAlpha3*(180 - a3cap.theta(U[patchi], nHatp)) ) ); vectorField nf(boundary[patchi].nf()); // Reset nHatPatch to correspond to the contact angle scalarField a12(nHatp & nf); scalarField b1(cos(theta)); scalarField b2(nHatp.size()); forAll(b2, facei) { b2[facei] = cos(acos(a12[facei]) - theta[facei]); } scalarField det(1.0 - a12*a12); scalarField a((b1 - a12*b2)/det); scalarField b((b2 - a12*b1)/det); nHatp = a*nf + b*nHatp; nHatp /= (mag(nHatp) + deltaN_.value()); } }
void Foam::chemPointISAT<CompType, ThermoType>::qrDecompose ( const label nCols, scalarSquareMatrix& R ) { scalarField c(nCols); scalarField d(nCols); scalar scale, sigma, sum; for (label k=0; k<nCols-1; k++) { scale = 0; for (label i=k; i<nCols; i++) { scale=max(scale, mag(R(i, k))); } if (scale == 0) { c[k] = d[k] = 0; } else { for (label i=k; i<nCols; i++) { R(i, k) /= scale; } sum = 0; for (label i=k; i<nCols; i++) { sum += sqr(R(i, k)); } sigma = sign(R(k, k))*sqrt(sum); R(k, k) += sigma; c[k] = sigma*R(k, k); d[k] = -scale*sigma; for (label j=k+1; j<nCols; j++) { sum=0; for ( label i=k; i<nCols; i++) { sum += R(i, k)*R(i, j); } scalar tau = sum/c[k]; for ( label i=k; i<nCols; i++) { R(i, j) -= tau*R(i, k); } } } } d[nCols-1] = R(nCols-1, nCols-1); // form R for (label i=0; i<nCols; i++) { R(i, i) = d[i]; for ( label j=0; j<i; j++) { R(i, j)=0; } } }
/** * @brief Returns the angle to the Z axis. * @return Angle in radians */ double Vector3D::getAngleZ() const { return acosf(zComp / mag()); }
// Return distance from the given point to the plane Foam::scalar Foam::plane::distance(const point& p) const { return mag((p - basePoint_) & unitVector_); }
void kepler_elements_from_state( double mu, const double *pos, const double *vel, double epoch, struct kepler_elements *elements) { double r = mag(pos); double v2 = dot(vel, vel); // specific angular momentum double h[3]; cross(pos, vel, h); // TODO: check for radial trajectory // eccentricity vector, direction: to periapsis, magnitude: eccentricity // e = 1/mu * (v^2 - mu/r) * r - dot(r, v) * v; double ecc[3]; for(int i = 0; i < 3; ++i) ecc[i] = (1.0 / mu) * (pos[i]*(v2 - mu/r) - vel[i]*dot(pos, vel)); double e = mag(ecc); bool circular = zero(e); bool parabolic = zero(e - 1.0); // line of nodes, pointing to ascending node, equatorial -> zero double nodes[3] = { -h[1], h[0], 0.0 }; double N = mag(nodes); bool equatorial = zero(dot(nodes, nodes)); // semi-latus rectum double p = dot(h, h) / mu; // inclination double i = acos(clamp(-1.0, 1.0, h[2] / mag(h))); // longitude of ascending node double an = equatorial ? 0.0 : atan2(nodes[1], nodes[0]); // argument of periapsis double arg = 0.0 / 0.0; // NaN if(circular) // circular, zero arg = 0.0; else if(equatorial) // equatorial, measure from X-axis, negative for retrograde arg = sign(h[2]) * atan2(ecc[1], ecc[0]); else // angle between eccentricity vector and line of nodes (ascending node) arg = sign(ecc[2]) * acos(clamp(-1.0, 1.0, dot(nodes, ecc) / (N * e))); // true anomaly double f = 0.0 / 0.0; // NaN if(circular && equatorial) // circular, equatorial -> measure from X-axis, negative for retrograde f = sign(h[2]) * atan2(pos[1], pos[0]); else if(circular) // circular orbit -> measure from ascending node f = -sign(dot(vel, nodes)) * acos(clamp(-1.0, 1.0, dot(nodes, pos) / (N * r))); else // measure true anomaly from periapsis (eccentricity vector) f = -sign(dot(vel, ecc)) * acos(clamp(-1.0, 1.0, dot(ecc, pos) / (e * r))); // mean anomaly at epoch double M0 = kepler_anomaly_true_to_mean(e, f); // mean motion double a = p / (1.0 - e*e); double n = parabolic ? sqrt(mu / (p*p*p)) : sqrt(mu / fabs(a*a*a)); // time at periapsis double periapsis_time = epoch - M0 / n; elements->semi_latus_rectum = p; elements->eccentricity = e; elements->mean_motion = n; elements->inclination = i; elements->longitude_of_ascending_node = an; elements->argument_of_periapsis = arg; elements->periapsis_time = periapsis_time; }
bool Foam::solidParticle::move ( trackingData& td, const scalar trackTime ) { td.switchProcessor = false; td.keepParticle = true; const polyBoundaryMesh& pbMesh = mesh_.boundaryMesh(); scalar tEnd = (1.0 - stepFraction())*trackTime; scalar dtMax = tEnd; while (td.keepParticle && !td.switchProcessor && tEnd > SMALL) { if (debug) { Info<< "Time = " << mesh_.time().timeName() << " trackTime = " << trackTime << " tEnd = " << tEnd << " steptFraction() = " << stepFraction() << endl; } // set the lagrangian time-step scalar dt = min(dtMax, tEnd); // remember which cell the parcel is in // since this will change if a face is hit label cellI = cell(); dt *= trackToFace(position() + dt*U_, td); tEnd -= dt; stepFraction() = 1.0 - tEnd/trackTime; cellPointWeight cpw(mesh_, position(), cellI, face()); scalar rhoc = td.rhoInterp().interpolate(cpw); vector Uc = td.UInterp().interpolate(cpw); scalar nuc = td.nuInterp().interpolate(cpw); scalar rhop = td.cloud().rhop(); scalar magUr = mag(Uc - U_); scalar ReFunc = 1.0; scalar Re = magUr*d_/nuc; if (Re > 0.01) { ReFunc += 0.15*pow(Re, 0.687); } scalar Dc = (24.0*nuc/d_)*ReFunc*(3.0/4.0)*(rhoc/(d_*rhop)); U_ = (U_ + dt*(Dc*Uc + (1.0 - rhoc/rhop)*td.g()))/(1.0 + dt*Dc); if (onBoundary() && td.keepParticle) { if (isA<processorPolyPatch>(pbMesh[patch(face())])) { td.switchProcessor = true; } } } return td.keepParticle; }