void NodalConstraint::computeResidual(NumericVector<Number> & residual) { Real scaling_factor = _var.scalingFactor(); _qp = 0; // master node dof_id_type & dof_idx = _var.nodalDofIndex(); residual.add(dof_idx, scaling_factor * computeQpResidual(Moose::Master)); // slave node dof_id_type & dof_idx_neighbor = _var.nodalDofIndexNeighbor(); residual.add(dof_idx_neighbor, scaling_factor * computeQpResidual(Moose::Slave)); }
void NodeFaceConstraint::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); DenseVector<Number> & neighbor_re = _assembly.residualBlockNeighbor(_master_var.number()); _qp=0; for (_i=0; _i<_test_master.size(); _i++) neighbor_re(_i) += computeQpResidual(Moose::Master); _i=0; re(0) = computeQpResidual(Moose::Slave); }
void ADIntegratedBCTempl<T, compute_stage>::computeJacobian() { DenseMatrix<Number> & ke = _assembly.jacobianBlock(_var.number(), _var.number()); _local_ke.resize(ke.m(), ke.n()); _local_ke.zero(); size_t ad_offset = _var.number() * _sys.getMaxVarNDofsPerElem(); for (_qp = 0; _qp < _qrule->n_points(); _qp++) for (_i = 0; _i < _test.size(); _i++) { DualReal residual = computeQpResidual(); for (_j = 0; _j < _var.phiSize(); ++_j) _local_ke(_i, _j) += (_ad_JxW[_qp] * _coord[_qp] * residual).derivatives()[ad_offset + _j]; } ke += _local_ke; if (_has_diag_save_in) { unsigned int rows = ke.m(); DenseVector<Number> diag(rows); for (unsigned int i = 0; i < rows; i++) diag(i) = _local_ke(i, i); Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i = 0; i < _diag_save_in.size(); i++) _diag_save_in[i]->sys().solution().add_vector(diag, _diag_save_in[i]->dofIndices()); } }
void ODETimeKernel::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number(), Moose::KT_TIME); for (_i = 0; _i < _var.order(); _i++) re(_i) += computeQpResidual(); }
void AlphaCED::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); for (_i = 0; _i < re.size(); _i++) re(_i) += computeQpResidual(); }
void ADNodalBCTempl<T, compute_stage>::computeOffDiagJacobian(unsigned int jvar) { if (jvar == _var.number()) computeJacobian(); else { auto ad_offset = jvar * _sys.getMaxVarNDofsPerNode(); auto residual = computeQpResidual(); const std::vector<dof_id_type> & cached_rows = _var.dofIndices(); // Note: this only works for Lagrange variables... dof_id_type cached_col = _current_node->dof_number(_sys.number(), jvar, 0); // Cache the user's computeQpJacobian() value for later use. for (auto tag : _matrix_tags) if (_sys.hasMatrix(tag)) for (size_t i = 0; i < cached_rows.size(); ++i) _fe_problem.assembly(0).cacheJacobianContribution( cached_rows[i], cached_col, conversionHelper(residual, i).derivatives()[ad_offset + i], tag); } }
void ADIntegratedBCTempl<T, compute_stage>::computeJacobianBlock(MooseVariableFEBase & jvar) { auto jvar_num = jvar.number(); if (jvar_num == _var.number()) computeJacobian(); else { DenseMatrix<Number> & ke = _assembly.jacobianBlock(_var.number(), jvar_num); if (jvar.phiFaceSize() != ke.n()) return; size_t ad_offset = jvar_num * _sys.getMaxVarNDofsPerElem(); for (_i = 0; _i < _test.size(); _i++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) { DualReal residual = _ad_JxW[_qp] * _coord[_qp] * computeQpResidual(); for (_j = 0; _j < jvar.phiFaceSize(); _j++) ke(_i, _j) += residual.derivatives()[ad_offset + _j]; } } }
void FaceFaceConstraint::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); for (_qp = 0; _qp < _qrule->n_points(); _qp++) for (_i = 0; _i < _test.size(); _i++) re(_i) += _JxW_lm[_qp] * _coord[_qp] * computeQpResidual(); }
void ODEKernel::computeResidual() { prepareVectorTag(_assembly, _var.number()); for (_i = 0; _i < _var.order(); _i++) _local_re(_i) += computeQpResidual(); accumulateTaggedLocalResidual(); }
void ADNodalBCTempl<T, compute_stage>::computeResidual() { const std::vector<dof_id_type> & dof_indices = _var.dofIndices(); auto residual = computeQpResidual(); for (auto tag_id : _vector_tags) if (_sys.hasVector(tag_id)) for (size_t i = 0; i < dof_indices.size(); ++i) _sys.getVector(tag_id).set(dof_indices[i], conversionHelper(residual, i)); }
void NodalConstraint::computeResidual(NumericVector<Number> & residual) { if ((_weights.size() == 0) && (_master_node_vector.size() == 1)) _weights.push_back(1.0); std::vector<dof_id_type> masterdof = _var.dofIndices(); std::vector<dof_id_type> slavedof = _var.dofIndicesNeighbor(); DenseVector<Number> re(masterdof.size()); DenseVector<Number> neighbor_re(slavedof.size()); re.zero(); neighbor_re.zero(); for (_i = 0; _i < slavedof.size(); ++_i) { for (_j = 0; _j < masterdof.size(); ++_j) { switch (_formulation) { case Moose::Penalty: re(_j) += computeQpResidual(Moose::Master) * _var.scalingFactor(); neighbor_re(_i) += computeQpResidual(Moose::Slave) * _var.scalingFactor(); break; case Moose::Kinematic: // Transfer the current residual of the slave node to the master nodes Real res = residual(slavedof[_i]); re(_j) += res * _weights[_j]; neighbor_re(_i) += -res / _master_node_vector.size() + computeQpResidual(Moose::Slave); break; } } } _assembly.cacheResidualNodes(re, masterdof); _assembly.cacheResidualNodes(neighbor_re, slavedof); }
void DiracKernel::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); for (_qp = 0; _qp < _qrule->n_points(); _qp++) { _current_point=_physical_point[_qp]; if (isActiveAtPoint(_current_elem, _current_point)) { for (_i = 0; _i < _test.size(); _i++) re(_i) += computeQpResidual(); } } }
DenseVector<Number> FDKernel::perturbedResidual(unsigned int varnum, unsigned int perturbationj, Real perturbation_scale, Real& perturbation) { DenseVector<Number> re; re.resize(_var.dofIndices().size()); re.zero(); MooseVariable& var = _sys.getVariable(_tid,varnum); var.computePerturbedElemValues(perturbationj,perturbation_scale,perturbation); precalculateResidual(); for (_i = 0; _i < _test.size(); _i++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) re(_i) += _JxW[_qp] * _coord[_qp] * computeQpResidual(); var.restoreUnperturbedElemValues(); return re; }
void ElemElemConstraint::computeElemNeighResidual(Moose::DGResidualType type) { bool is_elem; if (type == Moose::Element) is_elem = true; else is_elem = false; const VariableTestValue & test_space = is_elem ? _test : _test_neighbor; DenseVector<Number> & re = is_elem ? _assembly.residualBlock(_var.number()) : _assembly.residualBlockNeighbor(_var.number()); for (_qp = 0; _qp < _constraint_q_point.size(); _qp++) for (_i = 0; _i< test_space.size(); _i++) re(_i) += _constraint_weight[_qp] * computeQpResidual(type); }
void ADNodalBCTempl<T, compute_stage>::computeJacobian() { auto ad_offset = _var.number() * _sys.getMaxVarNDofsPerNode(); auto residual = computeQpResidual(); const std::vector<dof_id_type> & cached_rows = _var.dofIndices(); // Cache the user's computeQpJacobian() value for later use. for (auto tag : _matrix_tags) if (_sys.hasMatrix(tag)) for (size_t i = 0; i < cached_rows.size(); ++i) _fe_problem.assembly(0).cacheJacobianContribution( cached_rows[i], cached_rows[i], conversionHelper(residual, i).derivatives()[ad_offset + i], tag); }
void NodalBC::computeResidual(NumericVector<Number> & residual) { if (_var.isNodalDefined()) { dof_id_type & dof_idx = _var.nodalDofIndex(); _qp = 0; Real res = computeQpResidual(); residual.set(dof_idx, res); if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i=0; i<_save_in.size(); i++) _save_in[i]->sys().solution().set(_save_in[i]->nodalDofIndex(), res); } } }
void NodalKernel::computeResidual() { if (_var.isNodalDefined()) { dof_id_type & dof_idx = _var.nodalDofIndex(); _qp = 0; Real res = computeQpResidual(); _assembly.cacheResidualContribution(dof_idx, res, Moose::KT_NONTIME); if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i=0; i<_save_in.size(); i++) _save_in[i]->sys().solution().add(_save_in[i]->nodalDofIndex(), res); } } }
void NodalKernel::computeResidual() { if (_var.isNodalDefined()) { dof_id_type & dof_idx = _var.nodalDofIndex(); _qp = 0; Real res = computeQpResidual(); _assembly.cacheResidualContribution(dof_idx, res, _vector_tags); if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (const auto & var : _save_in) var->sys().solution().add(var->nodalDofIndex(), res); } } }
void IntegratedBC::computeResidual() { prepareVectorTag(_assembly, _var.number()); for (_qp = 0; _qp < _qrule->n_points(); _qp++) for (_i = 0; _i < _test.size(); _i++) _local_re(_i) += _JxW[_qp] * _coord[_qp] * computeQpResidual(); accumulateTaggedLocalResidual(); if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i = 0; i < _save_in.size(); i++) _save_in[i]->sys().solution().add_vector(_local_re, _save_in[i]->dofIndices()); } }
void IntegratedBC::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); _local_re.resize(re.size()); _local_re.zero(); for (_qp = 0; _qp < _qrule->n_points(); _qp++) for (_i = 0; _i < _test.size(); _i++) _local_re(_i) += _JxW[_qp]*_coord[_qp]*computeQpResidual(); re += _local_re; if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i=0; i<_save_in.size(); i++) _save_in[i]->sys().solution().add_vector(_local_re, _save_in[i]->dofIndices()); } }
void StressDivergenceRZ::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); _local_re.resize(re.size()); _local_re.zero(); if (_volumetric_locking_correction) { // calculate volume averaged value of shape function derivative _avg_grad_test.resize(_test.size()); for (_i = 0; _i < _test.size(); _i++) { _avg_grad_test[_i].resize(2); _avg_grad_test[_i][_component] = 0.0; for (_qp = 0; _qp < _qrule->n_points(); _qp++) { if (_component == 0) _avg_grad_test[_i][_component] += (_grad_test[_i][_qp](_component) + _test[_i][_qp] / _q_point[_qp](0)) * _JxW[_qp] * _coord[_qp]; else _avg_grad_test[_i][_component] += _grad_test[_i][_qp](_component) * _JxW[_qp] * _coord[_qp]; } _avg_grad_test[_i][_component] /= _current_elem_volume; } } precalculateResidual(); for (_i = 0; _i < _test.size(); _i++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) _local_re(_i) += _JxW[_qp] * _coord[_qp] * computeQpResidual(); re += _local_re; if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (const auto & var : _save_in) var->sys().solution().add_vector(_local_re, var->dofIndices()); } }
void Kernel::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); _local_re.resize(re.size()); _local_re.zero(); precalculateResidual(); for (_i = 0; _i < _test.size(); _i++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) _local_re(_i) += _JxW[_qp] * _coord[_qp] * computeQpResidual(); re += _local_re; if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (const auto & var : _save_in) var->sys().solution().add_vector(_local_re, var->dofIndices()); } }
void NodalBC::computeResidual() { if (_var.isNodalDefined()) { dof_id_type & dof_idx = _var.nodalDofIndex(); _qp = 0; Real res = 0; res = computeQpResidual(); for (auto tag_id : _vector_tags) if (_sys.hasVector(tag_id)) _sys.getVector(tag_id).set(dof_idx, res); if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i = 0; i < _save_in.size(); i++) _save_in[i]->sys().solution().set(_save_in[i]->nodalDofIndex(), res); } } }
void EigenKernel::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); _local_re.resize(re.size()); _local_re.zero(); mooseAssert(*_eigenvalue != 0.0, "Can't divide by zero eigenvalue in EigenKernel!"); Real one_over_eigen = 1.0 / *_eigenvalue; for (_i = 0; _i < _test.size(); _i++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) _local_re(_i) += _JxW[_qp] * _coord[_qp] * one_over_eigen * computeQpResidual(); re += _local_re; if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (const auto & var : _save_in) var->sys().solution().add_vector(_local_re, var->dofIndices()); } }
void DiracKernel::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); const std::vector<unsigned int> * multiplicities = _drop_duplicate_points ? NULL : &_local_dirac_kernel_info.getPoints()[_current_elem].second; unsigned int local_qp = 0; Real multiplicity = 1.0; for (_qp = 0; _qp < _qrule->n_points(); _qp++) { _current_point = _physical_point[_qp]; if (isActiveAtPoint(_current_elem, _current_point)) { if (!_drop_duplicate_points) multiplicity = (*multiplicities)[local_qp++]; for (_i = 0; _i < _test.size(); _i++) re(_i) += multiplicity * computeQpResidual(); } } }
void StressDivergenceTensors::computeResidual() { DenseVector<Number> & re = _assembly.residualBlock(_var.number()); _local_re.resize(re.size()); _local_re.zero(); if (_volumetric_locking_correction) computeAverageGradientTest(); precalculateResidual(); for (_i = 0; _i < _test.size(); ++_i) for (_qp = 0; _qp < _qrule->n_points(); ++_qp) _local_re(_i) += _JxW[_qp] * _coord[_qp] * computeQpResidual(); re += _local_re; if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (const auto & var : _save_in) var->sys().solution().add_vector(_local_re, var->dofIndices()); } }
void Q2PPorepressureFlux::upwind(bool compute_res, bool compute_jac, unsigned int jvar) { if (compute_jac && !(jvar == _var.number() || jvar == _sat_var)) return; // calculate the mobility values and their derivatives prepareNodalValues(); // compute the residual without the mobility terms // Even if we are computing the jacobian we still need this // in order to see which nodes are upwind and which are downwind DenseVector<Number> & re = _assembly.residualBlock(_var.number()); _local_re.resize(re.size()); _local_re.zero(); for (_i = 0; _i < _test.size(); _i++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) _local_re(_i) += _JxW[_qp] * _coord[_qp] * computeQpResidual(); DenseMatrix<Number> & ke = _assembly.jacobianBlock(_var.number(), jvar); if (compute_jac) { _local_ke.resize(ke.m(), ke.n()); _local_ke.zero(); for (_i = 0; _i < _test.size(); _i++) for (_j = 0; _j < _phi.size(); _j++) for (_qp = 0; _qp < _qrule->n_points(); _qp++) _local_ke(_i, _j) += _JxW[_qp] * _coord[_qp] * computeQpJac(jvar); } // Now perform the upwinding by multiplying the residuals at the // upstream nodes by their mobilities // // The residual for the kernel is the darcy flux. // This is // R_i = int{mobility*flux_no_mob} = int{mobility*grad(pot)*permeability*grad(test_i)} // for node i. where int is the integral over the element. // However, in fully-upwind, the first step is to take the mobility outside the // integral, which was done in the _local_re calculation above. // // NOTE: Physically _local_re(_i) is a measure of fluid flowing out of node i // If we had left in mobility, it would be exactly the mass flux flowing out of node i. // // This leads to the definition of upwinding: // *** // If _local_re(i) is positive then we use mobility_i. That is // we use the upwind value of mobility. // *** // // The final subtle thing is we must also conserve fluid mass: the total mass // flowing out of node i must be the sum of the masses flowing // into the other nodes. // FIRST: // this is a dirty way of getting around precision loss problems // and problems at steadystate where upwinding oscillates from // node-to-node causing nonconvergence. // I'm not sure if i actually need to do this in moose. Certainly // in cosflow it is necessary. // I will code a better algorithm if necessary bool reached_steady = true; for (unsigned int nodenum = 0; nodenum < _num_nodes; ++nodenum) { if (_local_re(nodenum) >= 1E-20) { reached_steady = false; break; } } reached_steady = false; // DEFINE VARIABLES USED TO ENSURE MASS CONSERVATION // total mass out - used for mass conservation Real total_mass_out = 0; // total flux in Real total_in = 0; // the following holds derivatives of these std::vector<Real> dtotal_mass_out; std::vector<Real> dtotal_in; if (compute_jac) { dtotal_mass_out.assign(_num_nodes, 0); dtotal_in.assign(_num_nodes, 0); } // PERFORM THE UPWINDING! for (unsigned int nodenum = 0; nodenum < _num_nodes; ++nodenum) { if (_local_re(nodenum) >= 0 || reached_steady) // upstream node { if (compute_jac) { for (_j = 0; _j < _phi.size(); _j++) _local_ke(nodenum, _j) *= _mobility[nodenum]; if (jvar == _var.number()) // deriv wrt P _local_ke(nodenum, nodenum) += _dmobility_dp[nodenum] * _local_re(nodenum); else // deriv wrt S _local_ke(nodenum, nodenum) += _dmobility_ds[nodenum] * _local_re(nodenum); for (_j = 0; _j < _phi.size(); _j++) dtotal_mass_out[_j] += _local_ke(nodenum, _j); } _local_re(nodenum) *= _mobility[nodenum]; total_mass_out += _local_re(nodenum); } else { total_in -= _local_re(nodenum); // note the -= means the result is positive if (compute_jac) for (_j = 0; _j < _phi.size(); _j++) dtotal_in[_j] -= _local_ke(nodenum, _j); } } // CONSERVE MASS // proportion the total_mass_out mass to the inflow nodes, weighting by their _local_re values if (!reached_steady) for (unsigned int nodenum = 0; nodenum < _num_nodes; ++nodenum) if (_local_re(nodenum) < 0) { if (compute_jac) for (_j = 0; _j < _phi.size(); _j++) { _local_ke(nodenum, _j) *= total_mass_out / total_in; _local_ke(nodenum, _j) += _local_re(nodenum) * (dtotal_mass_out[_j] / total_in - dtotal_in[_j] * total_mass_out / total_in / total_in); } _local_re(nodenum) *= total_mass_out / total_in; } // ADD RESULTS TO RESIDUAL OR JACOBIAN if (compute_res) { re += _local_re; if (_has_save_in) { Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i = 0; i < _save_in.size(); i++) _save_in[i]->sys().solution().add_vector(_local_re, _save_in[i]->dofIndices()); } } if (compute_jac) { ke += _local_ke; if (_has_diag_save_in && jvar == _var.number()) { const unsigned int rows = ke.m(); DenseVector<Number> diag(rows); for (unsigned int i = 0; i < rows; i++) diag(i) = _local_ke(i, i); Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx); for (unsigned int i = 0; i < _diag_save_in.size(); i++) _diag_save_in[i]->sys().solution().add_vector(diag, _diag_save_in[i]->dofIndices()); } } }