void Foam::cyclicACMIFvPatchField<Type>::updateInterfaceMatrix ( gpuField<Type>& result, const gpuField<Type>& psiInternal, const scalargpuField& coeffs, const Pstream::commsTypes ) const { // note: only applying coupled contribution const labelgpuList& nbrFaceCellsCoupled = cyclicACMIPatch_.cyclicACMIPatch().neighbPatch().getFaceCells(); gpuField<Type> pnf(psiInternal, nbrFaceCellsCoupled); // Transform according to the transformation tensors transformCoupleField(pnf); pnf = cyclicACMIPatch_.interpolate(pnf); matrixPatchOperation ( this->patch().index(), result, this->patch().boundaryMesh().mesh().lduAddr(), matrixInterfaceFunctor<Type> ( coeffs.data(), pnf.data() ) ); }
void Foam::jumpCyclicAMIFvPatchField<Type>::updateInterfaceMatrix ( gpuField<Type>& result, const gpuField<Type>& psiInternal, const scalargpuField& coeffs, const Pstream::commsTypes ) const { const labelgpuList& nbrFaceCells = this->cyclicAMIPatch().cyclicAMIPatch().neighbPatch().getFaceCells(); gpuField<Type> pnf(psiInternal, nbrFaceCells); if (this->cyclicAMIPatch().applyLowWeightCorrection()) { pnf = this->cyclicAMIPatch().interpolate ( pnf, this->patchInternalField()() ); } else { pnf = this->cyclicAMIPatch().interpolate(pnf); } // only apply jump to original field if (&psiInternal == &this->internalField()) { gpuField<Type> jf(this->jump()); if (!this->cyclicAMIPatch().owner()) { jf *= -1.0; } pnf -= jf; } // Transform according to the transformation tensors this->transformCoupleField(pnf); // Multiply the field by coefficients and add into the result matrixPatchOperation ( this->patch().index(), result, this->patch().boundaryMesh().mesh().lduAddr(), matrixInterfaceFunctor<Type> ( coeffs.data(), pnf.data() ) ); }
void epsilonLowReWallFunctionFvPatchScalarField::calculate ( const turbulenceModel& turbulence, const gpuList<scalar>& cornerWeights, const fvPatch& patch, scalargpuField& G, scalargpuField& epsilon ) { const label patchi = patch.index(); const scalargpuField& y = turbulence.y()[patchi]; const scalar Cmu25 = pow025(Cmu_); const scalar Cmu75 = pow(Cmu_, 0.75); const tmp<volScalarField> tk = turbulence.k(); const volScalarField& k = tk(); const tmp<scalargpuField> tnuw = turbulence.nu(patchi); const scalargpuField& nuw = tnuw(); const tmp<scalargpuField> tnutw = turbulence.nut(patchi); const scalargpuField& nutw = tnutw(); const fvPatchVectorField& Uw = turbulence.U().boundaryField()[patchi]; const scalargpuField magGradUw(mag(Uw.snGrad())); matrixPatchOperation ( patchi, epsilon, patch.boundaryMesh().mesh().lduAddr(), EpsilonLowReCalculateEpsilonFunctor ( yPlusLam_, Cmu25, Cmu75, kappa_, cornerWeights.data(), y.data(), k.getField().data(), nuw.data() ) ); matrixPatchOperation ( patchi, G, patch.boundaryMesh().mesh().lduAddr(), EpsilonLowReCalculateGFunctor ( Cmu25, kappa_, cornerWeights.data(), y.data(), k.getField().data(), nuw.data(), nutw.data(), magGradUw.data() ) ); }
void Foam::processorFvPatchField<Type>::updateInterfaceMatrix ( gpuField<Type>& result, const gpuField<Type>&, const scalargpuField& coeffs, const Pstream::commsTypes commsType ) const { if (this->updatedMatrix()) { return; } if (commsType == Pstream::nonBlocking && !Pstream::floatTransfer) { // Fast path. if ( outstandingRecvRequest_ >= 0 && outstandingRecvRequest_ < Pstream::nRequests() ) { UPstream::waitRequest(outstandingRecvRequest_); } // Recv finished so assume sending finished as well. outstandingSendRequest_ = -1; outstandingRecvRequest_ = -1; // Consume straight from receiveBuf_ // Transform according to the transformation tensor gpuReceiveBuf_ = receiveBuf_; transformCoupleField(gpuReceiveBuf_); // Multiply the field by coefficients and add into the result matrixPatchOperation ( this->patch().index(), result, this->patch().boundaryMesh().mesh().lduAddr(), processorFvPatchFunctor<Type> ( coeffs.data(), gpuReceiveBuf_.data() ) ); } else { gpuField<Type> pnf ( procPatch_.compressedReceive<Type>(commsType, this->size())() ); // Transform according to the transformation tensor transformCoupleField(pnf); // Multiply the field by coefficients and add into the result matrixPatchOperation ( this->patch().index(), result, this->patch().boundaryMesh().mesh().lduAddr(), processorFvPatchFunctor<Type> ( coeffs.data(), pnf.data() ) ); } const_cast<processorFvPatchField<Type>&>(*this).updatedMatrix() = true; }
void processorFvPatchField<scalar>::updateInterfaceMatrix ( scalargpuField& result, const scalargpuField&, const scalargpuField& coeffs, const direction, const Pstream::commsTypes commsType ) const { if (this->updatedMatrix()) { return; } if (commsType == Pstream::nonBlocking && !Pstream::floatTransfer) { // Fast path. if ( outstandingRecvRequest_ >= 0 && outstandingRecvRequest_ < Pstream::nRequests() ) { UPstream::waitRequest(outstandingRecvRequest_); } // Recv finished so assume sending finished as well. outstandingSendRequest_ = -1; outstandingRecvRequest_ = -1; scalargpuReceiveBuf_ = scalarReceiveBuf_; // Consume straight from scalarReceiveBuf_ /* forAll(faceCells, elemI) { result[faceCells[elemI]] -= coeffs[elemI]*scalarReceiveBuf_[elemI]; } */ matrixPatchOperation(this->patch().index(), result, this->patch().boundaryMesh().mesh().lduAddr(), processorFvPatchScalarFunctor(coeffs.data(),scalargpuReceiveBuf_.data())); } else { scalargpuField pnf ( procPatch_.compressedReceive<scalar>(commsType, this->size())() ); /* forAll(faceCells, elemI) { result[faceCells[elemI]] -= coeffs[elemI]*pnf[elemI]; } */ matrixPatchOperation(this->patch().index(), result, this->patch().boundaryMesh().mesh().lduAddr(), processorFvPatchScalarFunctor(coeffs.data(),pnf.data())); } const_cast<processorFvPatchField<scalar>&>(*this).updatedMatrix() = true; }