void ConvectionDiffusionFE<TDomain>:: prep_elem_loop(const ReferenceObjectID roid, const int si) { if( m_imSourceExpl.data_given() || m_imReactionExpl.data_given() || m_imReactionRateExpl.data_given()) UG_THROW("ConvectionDiffusionFE: Explicit terms not implemented."); // request geometry TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); // prepare geometry for type and order try{ geo.update_local(roid, m_lfeID, m_quadOrder); }UG_CATCH_THROW("ConvectionDiffusion::prep_elem_loop:" " Cannot update Finite Element Geometry."); // set local positions static const int refDim = TElem::dim; m_imDiffusion.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imVelocity.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imFlux.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imSource.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imVectorSource.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imReactionRate.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imReaction.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imMassScale.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); m_imMass.template set_local_ips<refDim>(geo.local_ips(), geo.num_ip(), false); }
void FV1InnerBoundaryElemDisc<TDomain>:: prep_err_est_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[]) { // get error estimator err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); // set local positions if (TFVGeom::usesHangingNodes) { static const int refDim = TElem::dim; ReferenceObjectID roid = elem->reference_object_id(); size_t numSideIPs; const MathVector<refDim>* sideIPs; try { numSideIPs = err_est_data->get(0)->num_side_ips(roid); sideIPs = err_est_data->get(0)->template side_local_ips<refDim>(roid); } UG_CATCH_THROW("Integration points for error estimator cannot be set."); // store values of shape functions in local IPs LagrangeP1<typename reference_element_traits<TElem>::reference_element_type> trialSpace = Provider<LagrangeP1<typename reference_element_traits<TElem>::reference_element_type> >::get(); m_shapeValues.resize(numSideIPs, trialSpace.num_sh()); for (size_t ip = 0; ip < numSideIPs; ip++) trialSpace.shapes(m_shapeValues.shapesAtSideIP(ip), sideIPs[ip]); } }
bool SchurPrecond<TAlgebra>:: preprocess(SmartPtr<MatrixOperator<matrix_type, vector_type> > A) { try{ // status UG_DLOG(SchurDebug, 2, "\n% Initializing SCHUR precond: \n"); m_pA = A; if(check_requirements() == false) return false; // Determine slicing for SchurComplementOperator std::vector<slice_desc_type> skeletonMark; get_skeleton_slicing(A, skeletonMark); // create & init local Schur complement object if(create_and_init_local_schur_complement(A, skeletonMark) == false) return false; // configure schur complement solver init_skeleton_solver(); // status UG_DLOG(SchurDebug, 1, "\n% 'SchurPrecond::init()' done!\n"); // we're done return true; }UG_CATCH_THROW("SchurPrecond::" << __FUNCTION__ << " failed"); return false; } /* end 'SchurPrecond::preprocess()' */
void ConvectionDiffusionFE<TDomain>:: prep_err_est_elem_loop(const ReferenceObjectID roid, const int si) { // get the error estimator data object and check that it is of the right type // we check this at this point in order to be able to dispense with this check later on // (i.e. in prep_err_est_elem and compute_err_est_A_elem()) if (this->m_spErrEstData.get() == NULL) { UG_THROW("No ErrEstData object has been given to this ElemDisc!"); } err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); if (!err_est_data) { UG_THROW("Dynamic cast to SideAndElemErrEstData failed." << std::endl << "Make sure you handed the correct type of ErrEstData to this discretization."); } // set local positions static const int refDim = TElem::dim; // get local IPs size_t numSideIPs, numElemIPs; const MathVector<refDim>* sideIPs; const MathVector<refDim>* elemIPs; try { numSideIPs = err_est_data->num_all_side_ips(roid); numElemIPs = err_est_data->num_elem_ips(roid); sideIPs = err_est_data->template side_local_ips<refDim>(roid); elemIPs = err_est_data->template elem_local_ips<refDim>(roid); if (!sideIPs || !elemIPs) return; // are NULL if TElem is not of the same dim as TDomain } UG_CATCH_THROW("Integration points for error estimator cannot be set."); // set local IPs in imports m_imDiffusion.template set_local_ips<refDim>(sideIPs, numSideIPs, false); m_imVelocity.template set_local_ips<refDim>(sideIPs, numSideIPs, false); m_imFlux.template set_local_ips<refDim>(sideIPs, numSideIPs, false); m_imSource.template set_local_ips<refDim>(elemIPs, numElemIPs, false); m_imVectorSource.template set_local_ips<refDim>(sideIPs, numSideIPs, false); m_imReactionRate.template set_local_ips<refDim>(elemIPs, numElemIPs, false); m_imReaction.template set_local_ips<refDim>(elemIPs, numElemIPs, false); m_imMassScale.template set_local_ips<refDim>(elemIPs, numElemIPs, false); m_imMass.template set_local_ips<refDim>(elemIPs, numElemIPs, false); // store values of shape functions in local IPs LagrangeP1<typename reference_element_traits<TElem>::reference_element_type> trialSpace = Provider<LagrangeP1<typename reference_element_traits<TElem>::reference_element_type> >::get(); m_shapeValues.resize(numElemIPs, numSideIPs, trialSpace.num_sh()); for (size_t ip = 0; ip < numElemIPs; ip++) trialSpace.shapes(m_shapeValues.shapesAtElemIP(ip), elemIPs[ip]); for (size_t ip = 0; ip < numSideIPs; ip++) trialSpace.shapes(m_shapeValues.shapesAtSideIP(ip), sideIPs[ip]); }
void VectorDataExport<dim>::eval_and_deriv(MathVector<dim> vValue[], const MathVector<dim> vGlobIP[], number time, int si, GridObject* elem, const MathVector<dim> vCornerCoords[], const MathVector<refDim> vLocIP[], const size_t nip, LocalVector* u, bool bDeriv, int s, std::vector<std::vector<MathVector<dim> > > vvvDeriv[], const MathMatrix<refDim, dim>* vJT) const { // reference object id const ReferenceObjectID roid = elem->reference_object_id(); // access local vector by map u->access_by_map(this->map()); if(bDeriv) this->set_zero(vvvDeriv, nip); for(size_t d = 0; d < dim; ++d) { // local finite element id const LFEID& lfeID = this->function_group().local_finite_element_id(d); // request for trial space try{ const LocalShapeFunctionSet<refDim>& rTrialSpace = LocalFiniteElementProvider::get<refDim>(roid, lfeID); // memory for shapes std::vector<number> vShape; // loop ips for(size_t ip = 0; ip < nip; ++ip) { // evaluate at shapes at ip rTrialSpace.shapes(vShape, vLocIP[ip]); // compute value at ip vValue[ip] = 0.0; for(size_t sh = 0; sh < vShape.size(); ++sh) vValue[ip][d] += (*u)(d, sh) * vShape[sh]; // store derivative if(bDeriv) for(size_t sh = 0; sh < vShape.size(); ++sh) vvvDeriv[ip][d][sh][d] = vShape[sh]; } } UG_CATCH_THROW("VectorDataExport: Trial space missing, Reference Object: " <<roid<<", Trial Space: "<<lfeID<<", refDim="<<refDim); } }
void NeumannBoundaryFE<TDomain>:: prep_elem_loop(const ReferenceObjectID roid, const int si) { update_subset_groups(); m_si = si; // register subsetIndex at Geometry TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); try{ geo.update_local(roid, m_lfeID, m_quadOrder); } UG_CATCH_THROW("NeumannBoundaryFE::prep_elem_loop:" " Cannot update Finite Element Geometry."); // request subset indices as boundary subset. This will force the // creation of boundary subsets when calling geo.update for(size_t i = 0; i < m_vNumberData.size(); ++i){ if(!m_vNumberData[i].InnerSSGrp.contains(m_si)) continue; for(size_t s = 0; s < m_vNumberData[i].BndSSGrp.size(); ++s){ const int si = m_vNumberData[i].BndSSGrp[s]; geo.add_boundary_subset(si); } } for(size_t i = 0; i < m_vBNDNumberData.size(); ++i){ if(!m_vBNDNumberData[i].InnerSSGrp.contains(m_si)) continue; for(size_t s = 0; s < m_vBNDNumberData[i].BndSSGrp.size(); ++s){ const int si = m_vBNDNumberData[i].BndSSGrp[s]; geo.add_boundary_subset(si); } } for(size_t i = 0; i < m_vVectorData.size(); ++i){ if(!m_vVectorData[i].InnerSSGrp.contains(m_si)) continue; for(size_t s = 0; s < m_vVectorData[i].BndSSGrp.size(); ++s){ const int si = m_vVectorData[i].BndSSGrp[s]; geo.add_boundary_subset(si); } } // clear imports, since we will set them afterwards this->clear_imports(); ReferenceObjectID id = geometry_traits<TElem>::REFERENCE_OBJECT_ID; // set lin defect fct for imports for(size_t data = 0; data < m_vNumberData.size(); ++data) { if(!m_vNumberData[data].InnerSSGrp.contains(m_si)) continue; m_vNumberData[data].import.set_fct(id, &m_vNumberData[data], &NumberData::template lin_def<TElem, TFEGeom>); this->register_import(m_vNumberData[data].import); m_vNumberData[data].import.set_rhs_part(); } }
void FV1InnerBoundaryElemDisc<TDomain>:: prep_err_est_elem_loop(const ReferenceObjectID roid, const int si) { // get the error estimator data object and check that it is of the right type // we check this at this point in order to be able to dispense with this check later on // (i.e. in prep_err_est_elem and compute_err_est_A_elem()) if (this->m_spErrEstData.get() == NULL) { UG_THROW("No ErrEstData object has been given to this ElemDisc!"); } err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); if (!err_est_data) { UG_THROW("Dynamic cast to MultipleSideAndElemErrEstData failed." << std::endl << "Make sure you handed the correct type of ErrEstData to this discretization."); } if (!err_est_data->equal_side_order()) { UG_THROW("The underlying error estimator data objects of this discretization's " "error estimator do not all have the same integration orders. This case " "is not supported by the implementation. If you need it, implement!"); } if (err_est_data->num() < 1) { UG_THROW("No underlying error estimator data objects present. No IPs can be determined."); } // set local positions if (!TFVGeom::usesHangingNodes) { static const int refDim = TElem::dim; // get local IPs size_t numSideIPs; const MathVector<refDim>* sideIPs; try { numSideIPs = err_est_data->get(0)->num_side_ips(roid); sideIPs = err_est_data->get(0)->template side_local_ips<refDim>(roid); } UG_CATCH_THROW("Integration points for error estimator cannot be set."); // store values of shape functions in local IPs LagrangeP1<typename reference_element_traits<TElem>::reference_element_type> trialSpace = Provider<LagrangeP1<typename reference_element_traits<TElem>::reference_element_type> >::get(); m_shapeValues.resize(numSideIPs, trialSpace.num_sh()); for (size_t ip = 0; ip < numSideIPs; ip++) trialSpace.shapes(m_shapeValues.shapesAtSideIP(ip), sideIPs[ip]); } }
void ConvectionDiffusionFE<TDomain>:: prep_err_est_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[]) { err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); // request geometry TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); try{ geo.update(elem, vCornerCoords); } UG_CATCH_THROW("ConvectionDiffusion::prep_elem:" " Cannot update Finite Element Geometry."); // roid ReferenceObjectID roid = elem->reference_object_id(); // set global positions size_t numSideIPs, numElemIPs; const MathVector<dim>* sideIPs; const MathVector<dim>* elemIPs; try { numSideIPs = err_est_data->num_all_side_ips(roid); numElemIPs = err_est_data->num_elem_ips(roid); sideIPs = err_est_data->all_side_global_ips(elem, vCornerCoords); elemIPs = err_est_data->elem_global_ips(elem, vCornerCoords); } UG_CATCH_THROW("Global integration points for error estimator cannot be set."); m_imDiffusion. set_global_ips(&sideIPs[0], numSideIPs); m_imVelocity. set_global_ips(&sideIPs[0], numSideIPs); m_imFlux. set_global_ips(&sideIPs[0], numSideIPs); m_imSource. set_global_ips(&elemIPs[0], numElemIPs); m_imVectorSource. set_global_ips(&sideIPs[0], numSideIPs); m_imReactionRate. set_global_ips(&elemIPs[0], numElemIPs); m_imReaction. set_global_ips(&elemIPs[0], numElemIPs); m_imMassScale. set_global_ips(&elemIPs[0], numElemIPs); m_imMass. set_global_ips(&elemIPs[0], numElemIPs); }
void ConvectionDiffusionFVCR<TDomain>:: prep_elem(const LocalVector& u, GridObject* elem, const ReferenceObjectID roid, const MathVector<dim> vCornerCoords[]) { // Update Geometry for this element static TFVGeom& geo = GeomProvider<TFVGeom>::get(); try{ geo.update(elem, vCornerCoords, &(this->subset_handler())); } UG_CATCH_THROW("ConvectionDiffusion::prep_elem:" " Cannot update Finite Volume Geometry."); // set local positions if(TFVGeom::usesHangingNodes) { static const int refDim = TElem::dim; m_imDiffusion.template set_local_ips<refDim>(geo.scvf_local_ips(), geo.num_scvf_ips()); m_imVelocity.template set_local_ips<refDim>(geo.scvf_local_ips(), geo.num_scvf_ips()); m_imSource.template set_local_ips<refDim>(geo.scv_local_ips(), geo.num_scv_ips()); m_imReactionRate.template set_local_ips<refDim>(geo.scv_local_ips(), geo.num_scv_ips()); m_imReaction.template set_local_ips<refDim>(geo.scv_local_ips(), geo.num_scv_ips()); m_imMassScale.template set_local_ips<refDim>(geo.scv_local_ips(), geo.num_scv_ips()); m_imMass.template set_local_ips<refDim>(geo.scv_local_ips(), geo.num_scv_ips()); /* if(m_spConvShape.valid()) if(!m_spConvShape->template set_geometry_type<TFVGeom>(geo)) UG_THROW("ConvectionDiffusion::prep_elem_loop:" " Cannot init upwind for element type.");*/ } // set global positions const MathVector<dim>* vSCVFip = geo.scvf_global_ips(); const size_t numSCVFip = geo.num_scvf_ips(); const MathVector<dim>* vSCVip = geo.scv_global_ips(); const size_t numSCVip = geo.num_scv_ips(); m_imDiffusion. set_global_ips(vSCVFip, numSCVFip); m_imVelocity. set_global_ips(vSCVFip, numSCVFip); // m_imFlux. set_global_ips(vSCVFip, numSCVFip); m_imSource. set_global_ips(vSCVip, numSCVip); // m_imVectorSource. set_global_ips(vSCVFip, numSCVFip); m_imReactionRate. set_global_ips(vSCVip, numSCVip); // m_imReactionRateExpl. set_global_ips(vSCVip, numSCVip); // m_imReactionExpl. set_global_ips(vSCVip, numSCVip); // m_imSourceExpl. set_global_ips(vSCVip, numSCVip); m_imReaction. set_global_ips(vSCVip, numSCVip); // m_imMassScale. set_global_ips(vSCVip, numSCVip); m_imMass. set_global_ips(vSCVip, numSCVip); }
void FV1InnerBoundaryElemDisc<TDomain>:: prep_elem(const LocalVector& u, GridObject* elem, const ReferenceObjectID roid, const MathVector<dim> vCornerCoords[]) { // update Geometry for this element static TFVGeom& geo = GeomProvider<TFVGeom>::get(); try { geo.update(elem, vCornerCoords, &(this->subset_handler())); } UG_CATCH_THROW("FV1InnerBoundaryElemDisc::prep_elem: " "Cannot update Finite Volume Geometry."); }
void ConvectionDiffusionFE<TDomain>:: compute_err_est_M_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale) { // note: mass parts only enter volume term err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");} MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid()); typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list; pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem); if (elem_list.size() != 1) UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'"); // request geometry static const TFEGeom& geo = GeomProvider<TFEGeom>::get(); // loop integration points try { for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++) { number total = 0.0; // mass scale // if (m_imMassScale.data_given()) { number val = 0.0; for (size_t sh = 0; sh < geo.num_sh(); sh++) val += u(_C_,sh) * m_shapeValues.shapeAtElemIP(sh,ip); total += m_imMassScale[ip] * val; } // mass // if (m_imMass.data_given()) { total += m_imMass[ip]; } (*err_est_data)(elem_list[0],ip) += scale * total; } } UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl << "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData."); }
void NeumannBoundaryFE<TDomain>:: prep_elem(const LocalVector& u, GridObject* elem, const ReferenceObjectID roid, const MathVector<dim> vCornerCoords[]) { // update Geometry for this element TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); try{ geo.update_boundary_faces(elem, vCornerCoords, m_quadOrder, &(this->subset_handler())); } UG_CATCH_THROW("NeumannBoundaryFE::prep_elem: " "Cannot update Finite Element Geometry."); for(size_t i = 0; i < m_vNumberData.size(); ++i) if(m_vNumberData[i].InnerSSGrp.contains(m_si)) m_vNumberData[i].template extract_bip<TElem, TFEGeom>(geo); }
void ConvectionDiffusionFE<TDomain>:: prep_elem(const LocalVector& u, GridObject* elem, const ReferenceObjectID roid, const MathVector<dim> vCornerCoords[]) { // request geometry TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); try{ geo.update(elem, vCornerCoords); } UG_CATCH_THROW("ConvectionDiffusion::prep_elem:" " Cannot update Finite Element Geometry."); // set global positions for rhs m_imDiffusion. set_global_ips(geo.global_ips(), geo.num_ip()); m_imVelocity. set_global_ips(geo.global_ips(), geo.num_ip()); m_imFlux. set_global_ips(geo.global_ips(), geo.num_ip()); m_imSource. set_global_ips(geo.global_ips(), geo.num_ip()); m_imVectorSource.set_global_ips(geo.global_ips(), geo.num_ip()); m_imReactionRate.set_global_ips(geo.global_ips(), geo.num_ip()); m_imReaction. set_global_ips(geo.global_ips(), geo.num_ip()); m_imMassScale. set_global_ips(geo.global_ips(), geo.num_ip()); m_imMass. set_global_ips(geo.global_ips(), geo.num_ip()); }
void FV1InnerBoundaryElemDisc<TDomain>:: compute_err_est_A_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale) { // get error estimator err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); // cast this elem to side_type of error estimator typename SideAndElemErrEstData<TDomain>::side_type* side = dynamic_cast<typename SideAndElemErrEstData<TDomain>::side_type*>(elem); if (!side) { UG_THROW("Error in DependentNeumannBoundaryFV1<TDomain>::compute_err_est_A_elem():\n" "Element that error assembling routine is called for has the wrong type."); } // global IPs ReferenceObjectID roid = side->reference_object_id(); size_t numSideIPs = err_est_data->get(0)->num_side_ips(roid); MathVector<dim>* globIPs = err_est_data->get(0)->side_global_ips(side, vCornerCoords); // loop IPs try { for (size_t sip = 0; sip < numSideIPs; sip++) { // get values of u at ip (interpolate) size_t nFct = u.num_fct(); std::vector<LocalVector::value_type> uAtIP = std::vector<LocalVector::value_type>(nFct); for (size_t fct = 0; fct < nFct; fct++) { uAtIP[fct] = 0.0; for (size_t sh = 0; sh < m_shapeValues.num_sh(); sh++) uAtIP[fct] += u(fct,sh) * m_shapeValues.shapeAtSideIP(sh,sip); } // ip coordinates const MathVector<dim>& ipCoords = globIPs[sip]; // elem subset int si = this->subset_handler().get_subset_index(side); FluxCond fc; if (!fluxDensityFct(uAtIP, elem, ipCoords, si, fc)) { UG_THROW("FV1InnerBoundaryElemDisc::compute_err_est_A_elem:" " Call to fluxDensityFct did not succeed."); } // subtract from estimator values // sign must be opposite of that in add_def_A_elem // as the difference between this and the actual flux of the unknown is calculated for (size_t j=0; j<fc.flux.size(); j++) { if (fc.from[j] != InnerBoundaryConstants::_IGNORE_) (*err_est_data->get(this->m_fctGrp[fc.from[j]])) (side,sip) -= scale * fc.flux[j]; if (fc.to[j] != InnerBoundaryConstants::_IGNORE_) (*err_est_data->get(this->m_fctGrp[fc.to[j]])) (side,sip) += scale * fc.flux[j]; } } } UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl << "Maybe wrong type of ErrEstData object? This implementation needs: MultipleSideAndElemErrEstData."); }
void GradientDataExport<dim>::eval_and_deriv(MathVector<dim> vValue[], const MathVector<dim> vGlobIP[], number time, int si, GridObject* elem, const MathVector<dim> vCornerCoords[], const MathVector<refDim> vLocIP[], const size_t nip, LocalVector* u, bool bDeriv, int s, std::vector<std::vector<MathVector<dim> > > vvvDeriv[], const MathMatrix<refDim, dim>* vJT) const { // abbreviation for component static const int _C_ = 0; // reference object id const ReferenceObjectID roid = elem->reference_object_id(); // local finite element id const LFEID& lfeID = this->function_group().local_finite_element_id(_C_); // access local vector by map u->access_by_map(this->map()); // request for trial space try{ const LocalShapeFunctionSet<refDim>& rTrialSpace = LocalFiniteElementProvider::get<refDim>(roid, lfeID); // Reference Mapping MathMatrix<dim, refDim> JTInv; std::vector<MathMatrix<refDim, dim> > vJTtmp; if(!vJT){ DimReferenceMapping<refDim, dim>& map = ReferenceMappingProvider::get<refDim, dim>(roid, vCornerCoords); vJTtmp.resize(nip); map.jacobian_transposed(&vJTtmp[0], vLocIP, nip); vJT = &vJTtmp[0]; } // storage for shape function at ip std::vector<MathVector<refDim> > vLocGrad; MathVector<refDim> locGrad; // loop ips for(size_t ip = 0; ip < nip; ++ip) { // evaluate at shapes at ip rTrialSpace.grads(vLocGrad, vLocIP[ip]); // compute grad at ip VecSet(locGrad, 0.0); for(size_t sh = 0; sh < vLocGrad.size(); ++sh) VecScaleAppend(locGrad, (*u)(_C_, sh), vLocGrad[sh]); Inverse(JTInv, vJT[ip]); MatVecMult(vValue[ip], JTInv, locGrad); // store derivative if(bDeriv) for(size_t sh = 0; sh < vLocGrad.size(); ++sh) MatVecMult(vvvDeriv[ip][_C_][sh], JTInv, vLocGrad[sh]); } } UG_CATCH_THROW("GradientDataExport: Trial space missing, Reference Object: " <<roid<<", Trial Space: "<<lfeID<<", refDim="<<refDim); }
void VTKOutput<TDim>:: print(const char* filename, Domain<TDim>& domain) { // get the grid associated to the solution MultiGrid& grid = *domain.grid(); MGSubsetHandler& sh = *domain.subset_handler(); // attach help indices typedef ug::Attachment<int> AVrtIndex; AVrtIndex aVrtIndex; Grid::VertexAttachmentAccessor<AVrtIndex> aaVrtIndex; grid.attach_to_vertices(aVrtIndex); aaVrtIndex.access(grid, aVrtIndex); // get rank of process int rank = 0; #ifdef UG_PARALLEL rank = pcl::ProcRank(); #endif const int si = -1; // get name for *.vtu file std::string name; try{ vtu_filename(name, filename, rank, si, sh.num_subsets()-1, -1); } UG_CATCH_THROW("VTK::print_subset: Can not write vtu - file."); // open the file try { VTKFileWriter File(name.c_str()); // header File << VTKFileWriter::normal; File << "<?xml version=\"1.0\"?>\n"; File << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" byte_order=\""; if(IsLittleEndian()) File << "LittleEndian"; else File << "BigEndian"; File << "\">\n"; // opening the grid File << " <UnstructuredGrid>\n"; // get dimension of grid-piece int dim = DimensionOfSubsets(sh); // write piece of grid if(dim >= 0) { try{ write_grid_piece<MGSubsetHandler> (File, aaVrtIndex, domain.position_accessor(), grid, sh, si, dim); } UG_CATCH_THROW("VTK::print: Can not write Subset: "<<si); } else { // if dim < 0, some is wrong with grid, except no element is in the grid if( ((si < 0) && grid.num<Vertex>() != 0) || ((si >=0) && sh.num<Vertex>(si) != 0)) { UG_THROW("VTK::print: Dimension of grid/subset not" " detected correctly although grid objects present."); } write_empty_grid_piece(File); } // write closing xml tags File << " </UnstructuredGrid>\n"; File << "</VTKFile>\n"; // detach help indices grid.detach_from_vertices(aVrtIndex); }
void ConvectionDiffusionFE<TDomain>:: ex_value(number vValue[], const MathVector<dim> vGlobIP[], number time, int si, const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const MathVector<TFEGeom::dim> vLocIP[], const size_t nip, bool bDeriv, std::vector<std::vector<number> > vvvDeriv[]) { // request geometry const TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); // reference element typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type; // reference dimension static const int refDim = reference_element_traits<TElem>::dim; // reference object id static const ReferenceObjectID roid = ref_elem_type::REFERENCE_OBJECT_ID; // FE ip if(vLocIP == geo.local_ips()) { // Loop ips for(size_t ip = 0; ip < geo.num_ip(); ++ip) { // compute concentration at ip vValue[ip] = 0.0; for(size_t sh = 0; sh < geo.num_sh(); ++sh) vValue[ip] += u(_C_, sh) * geo.shape(ip, sh); // compute derivative w.r.t. to unknowns iff needed if(bDeriv) for(size_t sh = 0; sh < geo.num_sh(); ++sh) vvvDeriv[ip][_C_][sh] = geo.shape(ip, sh); } } // general case else { // request for trial space try{ const LocalShapeFunctionSet<refDim>& rTrialSpace = LocalFiniteElementProvider::get<refDim>(roid, m_lfeID); // number of shape functions const size_t numSH = rTrialSpace.num_sh(); // storage for shape function at ip std::vector<number> vShape(numSH); // loop ips for(size_t ip = 0; ip < nip; ++ip) { // evaluate at shapes at ip rTrialSpace.shapes(vShape, vLocIP[ip]); // compute concentration at ip vValue[ip] = 0.0; for(size_t sh = 0; sh < numSH; ++sh) vValue[ip] += u(_C_, sh) * vShape[sh]; // compute derivative w.r.t. to unknowns iff needed // \todo: maybe store shapes directly in vvvDeriv if(bDeriv) for(size_t sh = 0; sh < numSH; ++sh) vvvDeriv[ip][_C_][sh] = vShape[sh]; } } UG_CATCH_THROW("ConvectionDiffusion::ex_value: trial space missing."); } }
void DimCRFVGeometry<TDim, TWorldDim>:: update_boundary_faces(GridObject* pElem, const MathVector<worldDim>* vCornerCoords, const ISubsetHandler* ish) { // get grid Grid& grid = *(ish->grid()); // vector of subset indices of side std::vector<int> vSubsetIndex; // get subset indices for sides (i.e. edge in 2d, faces in 3d) if(dim == 1) { std::vector<Vertex*> vVertex; CollectVertices(vVertex, grid, pElem); vSubsetIndex.resize(vVertex.size()); for(size_t i = 0; i < vVertex.size(); ++i) vSubsetIndex[i] = ish->get_subset_index(vVertex[i]); } if(dim == 2) { std::vector<Edge*> vEdges; CollectEdgesSorted(vEdges, grid, pElem); vSubsetIndex.resize(vEdges.size()); for(size_t i = 0; i < vEdges.size(); ++i) vSubsetIndex[i] = ish->get_subset_index(vEdges[i]); } if(dim == 3) { std::vector<Face*> vFaces; CollectFacesSorted(vFaces, grid, pElem); vSubsetIndex.resize(vFaces.size()); for(size_t i = 0; i < vFaces.size(); ++i) vSubsetIndex[i] = ish->get_subset_index(vFaces[i]); } try{ // const DimReferenceElement<dim>& rRefElem // = ReferenceElementProvider::get<dim>(m_roid); DimReferenceMapping<dim, worldDim>& rMapping = ReferenceMappingProvider::get<dim, worldDim>(m_roid); rMapping.update(vCornerCoords); const LocalShapeFunctionSet<dim>& TrialSpace = LocalFiniteElementProvider::get<dim>(m_roid, LFEID(LFEID::CROUZEIX_RAVIART, dim, 1)); // loop requested subset typename std::map<int, std::vector<BF> >::iterator it; for (it=m_mapVectorBF.begin() ; it != m_mapVectorBF.end(); ++it) { // get subset index const int bndIndex = (*it).first; // get vector of BF for element std::vector<BF>& vBF = (*it).second; // clear vector vBF.clear(); // current number of bf size_t curr_bf = 0; // loop sides of element for(size_t side = 0; side < vSubsetIndex.size(); ++side) { // skip non boundary sides if(vSubsetIndex[side] != bndIndex) continue; // number of corners of side // const int coOfSide = rRefElem.num(dim-1, side, 0); todo use somewhere? // resize vector vBF.resize(curr_bf + 1); // fill BF with data from associated SCV BF& bf = vBF[curr_bf]; bf.nodeID = m_vSCV[side].nodeID; bf.localIP = m_vSCV[side].vLocIP; bf.globalIP = m_vSCV[side].vGlobIP; bf.Normal = m_vSCV[side].Normal; // compute volume bf.Vol = VecTwoNorm(bf.Normal); bf.numCo = m_vSCV[side].numCorners-1; // compute shapes and grads bf.numSH = TrialSpace.num_sh(); TrialSpace.shapes(&(bf.vShape[0]), bf.localIP); TrialSpace.grads(&(bf.vLocalGrad[0]), bf.localIP); // get reference mapping rMapping.jacobian_transposed_inverse(bf.JtInv, bf.localIP); bf.detj = rMapping.sqrt_gram_det(bf.localIP); // compute global gradients for(size_t sh = 0 ; sh < bf.num_sh(); ++sh) MatVecMult(bf.vGlobalGrad[sh], bf.JtInv, bf.vLocalGrad[sh]); // increase curr_bf ++curr_bf; } } } UG_CATCH_THROW("DimCRFVGeometry: update failed."); }
void DimCRFVGeometry<TDim, TWorldDim>:: update_geometric_data(GridObject* pElem, const MathVector<worldDim>* vCornerCoords, const ISubsetHandler* ish) { // If already update for this element, do nothing if(m_pElem == pElem) return; else m_pElem = pElem; // refresh local data, if different roid given if(m_roid != pElem->reference_object_id()) { // remember new roid m_roid = (ReferenceObjectID) pElem->reference_object_id(); // update local data update_local_data(); } // get reference element try{ const DimReferenceElement<dim>& rRefElem = ReferenceElementProvider::get<dim>(m_roid); // compute barycenter coordinates globalBary = vCornerCoords[0]; for (size_t j=1;j<rRefElem.num(0);j++){ globalBary+=vCornerCoords[j]; } globalBary*=1./(number)rRefElem.num(0); // compute global informations for scvf for(size_t i = 0; i < num_scvf(); ++i) { for (size_t j=0;j<m_vSCVF[i].numCo-1;j++){ m_vSCVF[i].vGloPos[j]=vCornerCoords[rRefElem.id(dim-2,i,0,j)]; } m_vSCVF[i].vGloPos[m_vSCVF[i].numCo-1]=globalBary; AveragePositions(m_vSCVF[i].globalIP, m_vSCVF[i].vGloPos, m_vSCVF[i].numCo); ElementNormal<face_type0,worldDim>(m_vSCVF[i].Normal,m_vSCVF[i].vGloPos);// face_type0 identical to scvf type } // compute size of scv for(size_t i = 0; i < num_scv(); ++i) { // side nodes in reverse order to fulfill standard element order for (int j=0;j<m_vSCV[i].numCorners-1;j++){ m_vSCV[i].vGloPos[m_vSCV[i].numCorners-2-j]=vCornerCoords[rRefElem.id(dim-1,i,0,j)]; } AveragePositions(m_vGlobUnkCoords[i], m_vSCV[i].vGloPos, m_vSCV[i].numCorners-1); m_vSCV[i].vGlobIP = m_vGlobUnkCoords[i]; m_vSCV[i].vGloPos[m_vSCV[i].numCorners-1]=globalBary; // compute volume of scv and normal to associated element face //CRSCVSizeAndNormal<dim>(m_vSCV[i].Vol,m_vSCV[i].Normal,m_vSCV[i].vGloPos,m_vSCV[i].numCorners); if (m_vSCV[i].numCorners-1==dim){ m_vSCV[i].Vol = ElementSize<scv_type0,worldDim>(m_vSCV[i].vGloPos); ElementNormal<face_type0, worldDim>(m_vSCV[i].Normal, m_vSCV[i].vGloPos); } else { // m_vSCV[i].numCorners-2==dim , only possible in 3d (pyramid) m_vSCV[i].Vol = ElementSize<scv_type1,worldDim>(m_vSCV[i].vGloPos); ElementNormal<face_type1,worldDim>(m_vSCV[i].Normal, m_vSCV[i].vGloPos); }; // nodes are in reverse order therefore reverse sign to get outward normal m_vSCV[i].Normal*=-1; } } UG_CATCH_THROW("DimCRFVGeometry: geometric update failed."); }
void DimCRFVGeometry<TDim, TWorldDim>:: update_local_data() { // get reference element try{ m_rRefElem = ReferenceElementProvider::get<dim>(m_roid); // set number of scvf / scv of this roid m_numSCV = m_rRefElem.num(dim-1); // number of faces m_numSCVF = m_rRefElem.num(1); // number of edges // compute barycenter coordinates localBary = m_rRefElem.corner(0); for (size_t j=1;j<m_rRefElem.num(0);j++){ localBary+=m_rRefElem.corner(j); } localBary*=1./(number)m_rRefElem.num(0); // set up local informations for SubControlVolumeFaces (scvf) // each scvf is associated to one vertex (2d) / edge (3d) of the element for(size_t i = 0; i < m_numSCVF; ++i) { // this scvf separates the given edges/faces m_vSCVF[i].From = m_rRefElem.id(dim-2, i, dim-1, 0);// todo handle dim==1 m_vSCVF[i].To = m_rRefElem.id(dim-2, i, dim-1, 1); for (size_t j=0;j<m_vSCVF[i].numCo-1;j++){ m_vSCVF[i].vLocPos[j]=m_rRefElem.corner(m_rRefElem.id(dim-2,i,0,j)); } m_vSCVF[i].vLocPos[m_vSCVF[i].numCo-1]=localBary; AveragePositions(m_vSCVF[i].localIP, m_vSCVF[i].vLocPos, m_vSCVF[i].numCo); } // set up local informations for SubControlVolumes (scv) // each scv is associated to one edge(2d) / face(3d) of the element for(size_t i = 0; i < m_numSCV; ++i) { // store associated node m_vSCV[i].nodeID = i; m_vSCV[i].numCorners = m_rRefElem.num(dim-1,i,0)+1; for (int j=0;j<m_vSCV[i].numCorners-1;j++){ m_vSCV[i].vLocPos[m_vSCV[i].numCorners-2-j]=m_rRefElem.corner(m_rRefElem.id(dim-1,i,0,j)); } AveragePositions(m_vLocUnkCoords[i], m_vSCV[i].vLocPos, m_vSCV[i].numCorners-1); m_vSCV[i].vLocIP = m_vLocUnkCoords[i]; m_vSCV[i].vLocPos[m_vSCV[i].numCorners-1]=localBary; } ///////////////////////// // Shapes and Derivatives ///////////////////////// const LocalShapeFunctionSet<dim>& rTrialSpace = LocalFiniteElementProvider::get<dim>(m_roid, LFEID(LFEID::CROUZEIX_RAVIART, dim, 1)); m_nsh = rTrialSpace.num_sh(); for(size_t i = 0; i < m_numSCVF; ++i) { m_vSCVF[i].numSH = rTrialSpace.num_sh(); rTrialSpace.shapes(&(m_vSCVF[i].vShape[0]), m_vSCVF[i].local_ip()); rTrialSpace.grads(&(m_vSCVF[i].vLocalGrad[0]), m_vSCVF[i].local_ip()); } for(size_t i = 0; i < m_numSCV; ++i) { m_vSCV[i].numSH = rTrialSpace.num_sh(); rTrialSpace.shapes(&(m_vSCV[i].vShape[0]), m_vSCV[i].local_ip()); rTrialSpace.grads(&(m_vSCV[i].vLocalGrad[0]), m_vSCV[i].local_ip()); } } UG_CATCH_THROW("DimCRFVGeometry: update failed."); // copy ip positions in a list for Sub Control Volumes Faces (SCVF) for(size_t i = 0; i < m_numSCVF; ++i) m_vLocSCVF_IP[i] = scvf(i).local_ip(); m_numConstrainedDofs = 0; m_numConstrainedSCVF = 0; }
void DimCRFVGeometry<TDim, TWorldDim>:: update(GridObject* pElem, const MathVector<worldDim>* vCornerCoords, const ISubsetHandler* ish) { // If already update for this element, do nothing if(m_pElem == pElem) return; else m_pElem = pElem; // refresh local data, if different roid given if(m_roid != pElem->reference_object_id()) { // remember new roid m_roid = (ReferenceObjectID) pElem->reference_object_id(); // update local data update_local_data(); } // get reference element try{ const DimReferenceElement<dim>& m_rRefElem = ReferenceElementProvider::get<dim>(m_roid); // compute barycenter coordinates globalBary = vCornerCoords[0]; for (size_t j=1;j<m_rRefElem.num(0);j++){ globalBary+=vCornerCoords[j]; } globalBary*=1./(number)m_rRefElem.num(0); // compute global informations for scvf for(size_t i = 0; i < num_scvf(); ++i) { for (size_t j=0;j<m_vSCVF[i].numCo-1;j++){ m_vSCVF[i].vGloPos[j]=vCornerCoords[m_rRefElem.id(dim-2,i,0,j)]; } m_vSCVF[i].vGloPos[m_vSCVF[i].numCo-1]=globalBary; AveragePositions(m_vSCVF[i].globalIP, m_vSCVF[i].vGloPos, m_vSCVF[i].numCo); ElementNormal<face_type0,worldDim>(m_vSCVF[i].Normal,m_vSCVF[i].vGloPos);// face_type0 identical to scvf type } // compute size of scv for(size_t i = 0; i < num_scv(); ++i) { // side nodes in reverse order to fulfill standard element order for (int j=0;j<m_vSCV[i].numCorners-1;j++){ m_vSCV[i].vGloPos[m_vSCV[i].numCorners-2-j]=vCornerCoords[m_rRefElem.id(dim-1,i,0,j)]; } AveragePositions(m_vGlobUnkCoords[i], m_vSCV[i].vGloPos, m_vSCV[i].numCorners-1); m_vSCV[i].vGlobIP = m_vGlobUnkCoords[i]; m_vSCV[i].vGloPos[m_vSCV[i].numCorners-1]=globalBary; // compute volume of scv and normal to associated element face //CRSCVSizeAndNormal<dim>(m_vSCV[i].Vol,m_vSCV[i].Normal,m_vSCV[i].vGloPos,m_vSCV[i].numCorners); if (m_vSCV[i].numCorners-1==dim){ m_vSCV[i].Vol = ElementSize<scv_type0,worldDim>(m_vSCV[i].vGloPos); ElementNormal<face_type0, worldDim>(m_vSCV[i].Normal, m_vSCV[i].vGloPos); } else { // m_vSCV[i].numCorners-2==dim , only possible in 3d (pyramid) m_vSCV[i].Vol = ElementSize<scv_type1,worldDim>(m_vSCV[i].vGloPos); ElementNormal<face_type1,worldDim>(m_vSCV[i].Normal, m_vSCV[i].vGloPos); }; // nodes are in reverse order therefore reverse sign to get outward normal m_vSCV[i].Normal*=-1; } // get reference mapping DimReferenceMapping<dim, worldDim>& rMapping = ReferenceMappingProvider::get<dim, worldDim>(m_roid); rMapping.update(vCornerCoords); //\todo compute with on virt. call // compute jacobian for linear mapping if(rMapping.is_linear()) { MathMatrix<worldDim,dim> JtInv; rMapping.jacobian_transposed_inverse(JtInv, m_vSCVF[0].local_ip()); const number detJ = rMapping.sqrt_gram_det(m_vSCVF[0].local_ip()); for(size_t i = 0; i < num_scvf(); ++i) { m_vSCVF[i].JtInv = JtInv; m_vSCVF[i].detj = detJ; } for(size_t i = 0; i < num_scv(); ++i) { m_vSCV[i].JtInv = JtInv; m_vSCV[i].detj = detJ; } } // else compute jacobian for each integration point else { for(size_t i = 0; i < num_scvf(); ++i) { rMapping.jacobian_transposed_inverse(m_vSCVF[i].JtInv, m_vSCVF[i].local_ip()); m_vSCVF[i].detj = rMapping.sqrt_gram_det(m_vSCVF[i].local_ip()); } for(size_t i = 0; i < num_scv(); ++i) { rMapping.jacobian_transposed_inverse(m_vSCV[i].JtInv, m_vSCV[i].local_ip()); m_vSCV[i].detj = rMapping.sqrt_gram_det(m_vSCV[i].local_ip()); } } // compute global gradients for(size_t i = 0; i < num_scvf(); ++i) for(size_t sh = 0; sh < scvf(i).num_sh(); ++sh) MatVecMult(m_vSCVF[i].vGlobalGrad[sh], m_vSCVF[i].JtInv, m_vSCVF[i].vLocalGrad[sh]); for(size_t i = 0; i < num_scv(); ++i) for(size_t sh = 0; sh < scv(i).num_sh(); ++sh) MatVecMult(m_vSCV[i].vGlobalGrad[sh], m_vSCV[i].JtInv, m_vSCV[i].vLocalGrad[sh]); // copy ip points in list (SCVF) for(size_t i = 0; i < num_scvf(); ++i) m_vGlobSCVF_IP[i] = scvf(i).global_ip(); } UG_CATCH_THROW("DimCRFVGeometry: update failed."); // if no boundary subsets required, return if(num_boundary_subsets() == 0 || ish == NULL) return; else update_boundary_faces(pElem, vCornerCoords, ish); }
void ConvectionDiffusionFE<TDomain>:: ex_grad(MathVector<dim> vValue[], const MathVector<dim> vGlobIP[], number time, int si, const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const MathVector<TFEGeom::dim> vLocIP[], const size_t nip, bool bDeriv, std::vector<std::vector<MathVector<dim> > > vvvDeriv[]) { // request geometry const TFEGeom& geo = GeomProvider<TFEGeom>::get(m_lfeID, m_quadOrder); // reference element typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type; // reference dimension static const int refDim = reference_element_traits<TElem>::dim; // reference object id static const ReferenceObjectID roid = ref_elem_type::REFERENCE_OBJECT_ID; // FE if(vLocIP == geo.local_ips()) { // Loop ip for(size_t ip = 0; ip < geo.num_ip(); ++ip) { VecSet(vValue[ip], 0.0); for(size_t sh = 0; sh < geo.num_sh(); ++sh) VecScaleAppend(vValue[ip], u(_C_, sh), geo.global_grad(ip, sh)); if(bDeriv) for(size_t sh = 0; sh < geo.num_sh(); ++sh) vvvDeriv[ip][_C_][sh] = geo.global_grad(ip, sh); } } // general case else { // request for trial space try{ const LocalShapeFunctionSet<refDim>& rTrialSpace = LocalFiniteElementProvider::get<refDim>(roid, m_lfeID); // number of shape functions const size_t numSH = rTrialSpace.num_sh(); // storage for shape function at ip std::vector<MathVector<refDim> > vLocGrad(numSH); MathVector<refDim> locGrad; // Reference Mapping MathMatrix<dim, refDim> JTInv; ReferenceMapping<ref_elem_type, dim> mapping(vCornerCoords); // loop ips for(size_t ip = 0; ip < nip; ++ip) { // evaluate at shapes at ip rTrialSpace.grads(vLocGrad, vLocIP[ip]); // compute grad at ip VecSet(locGrad, 0.0); for(size_t sh = 0; sh < numSH; ++sh) VecScaleAppend(locGrad, u(_C_, sh), vLocGrad[sh]); // compute global grad mapping.jacobian_transposed_inverse(JTInv, vLocIP[ip]); MatVecMult(vValue[ip], JTInv, locGrad); // compute derivative w.r.t. to unknowns iff needed if(bDeriv) for(size_t sh = 0; sh < numSH; ++sh) MatVecMult(vvvDeriv[ip][_C_][sh], JTInv, vLocGrad[sh]); } } UG_CATCH_THROW("ConvectionDiffusion::ex_grad: trial space missing."); } };
void ConvectionDiffusionFE<TDomain>:: compute_err_est_rhs_elem(GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale) { typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type; err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");} MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid()); // SIDE TERMS // // get the sides of the element typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::side_type>::secure_container side_list; pErrEstGrid->associated_elements_sorted(side_list, (TElem*) elem); if (side_list.size() != (size_t) ref_elem_type::numSides) UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'"); // loop sides size_t passedIPs = 0; for (size_t side = 0; side < (size_t) ref_elem_type::numSides; side++) { // normal on side MathVector<dim> normal; SideNormal<ref_elem_type,dim>(normal, side, vCornerCoords); VecNormalize(normal, normal); try { for (size_t sip = 0; sip < err_est_data->num_side_ips(side_list[side]); sip++) { size_t ip = passedIPs + sip; // vector source // if (m_imVectorSource.data_given()) (*err_est_data)(side_list[side],sip) += scale * VecDot(m_imVectorSource[ip], normal); } passedIPs += err_est_data->num_side_ips(side_list[side]); } UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl << "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData."); } // VOLUME TERMS // if (!m_imSource.data_given()) return; typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list; pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem); if (elem_list.size() != 1) UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'"); // source // try { for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++) (*err_est_data)(elem_list[0],ip) += scale * m_imSource[ip]; } UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl << "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData."); }
void ConvectionDiffusionFE<TDomain>:: compute_err_est_A_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale) { typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type; err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get()); if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");} MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid()); // request geometry static const TFEGeom& geo = GeomProvider<TFEGeom>::get(); // SIDE TERMS // // get the sides of the element // We have to cast elem to a pointer of type SideAndElemErrEstData::elem_type // for the SideAndElemErrEstData::operator() to work properly. // This cannot generally be achieved by casting to TElem*, since this method is also registered for // lower-dimensional types TElem, and must therefore be compilable, even if it is never EVER to be executed. // The way we achieve this here, is by calling associated_elements_sorted() which has an implementation for // all possible types. Whatever comes out of it is of course complete nonsense if (and only if) // SideAndElemErrEstData::elem_type != TElem. To be on the safe side, we throw an error if the number of // entries in the list is not as it should be. typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::side_type>::secure_container side_list; pErrEstGrid->associated_elements_sorted(side_list, (TElem*) elem); if (side_list.size() != (size_t) ref_elem_type::numSides) UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'"); // some help variables MathVector<dim> fluxDensity, gradC, normal; // FIXME: The computation of the gradient has to be reworked. // In the case of P1 shape functions, it is valid. For Q1 shape functions, however, // the gradient is not constant (but bilinear) on the element - and along the sides. // We cannot use the FVGeom here. Instead, we need to calculate the gradient in each IP! // calculate grad u as average (over scvf) VecSet(gradC, 0.0); for(size_t ii = 0; ii < geo.num_ip(); ++ii) { for (size_t j=0; j<m_shapeValues.num_sh(); j++) VecScaleAppend(gradC, u(_C_,j), geo.global_grad(ii, j)); } VecScale(gradC, gradC, (1.0/geo.num_ip())); // calculate flux through the sides size_t passedIPs = 0; for (size_t side=0; side < (size_t) ref_elem_type::numSides; side++) { // normal on side SideNormal<ref_elem_type,dim>(normal, side, vCornerCoords); VecNormalize(normal, normal); try { for (size_t sip = 0; sip < err_est_data->num_side_ips(side_list[side]); sip++) { size_t ip = passedIPs + sip; VecSet(fluxDensity, 0.0); // diffusion // if (m_imDiffusion.data_given()) MatVecScaleMultAppend(fluxDensity, -1.0, m_imDiffusion[ip], gradC); // convection // if (m_imVelocity.data_given()) { number val = 0.0; for (size_t sh = 0; sh < m_shapeValues.num_sh(); sh++) val += u(_C_,sh) * m_shapeValues.shapeAtSideIP(sh,sip); VecScaleAppend(fluxDensity, val, m_imVelocity[ip]); } // general flux // if (m_imFlux.data_given()) VecAppend(fluxDensity, m_imFlux[ip]); (*err_est_data)(side_list[side],sip) += scale * VecDot(fluxDensity, normal); } passedIPs += err_est_data->num_side_ips(side_list[side]); } UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl << "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData."); } // VOLUME TERMS // typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list; pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem); if (elem_list.size() != 1) UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'"); try { for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++) { number total = 0.0; // diffusion // TODO ONLY FOR (PIECEWISE) CONSTANT DIFFUSION TENSOR SO FAR! // div(D*grad(c)) // nothing to do, as c is piecewise linear and div(D*grad(c)) disappears // if D is diagonal and c bilinear, this should also vanish (confirm this!) // convection // TODO ONLY FOR (PIECEWISE) CONSTANT OR DIVERGENCE-FREE // VELOCITY FIELDS SO FAR! // div(v*c) = div(v)*c + v*grad(c) -- gradC has been calculated above if (m_imVelocity.data_given()) total += VecDot(m_imVelocity[ip], gradC); // general flux // TODO ONLY FOR DIVERGENCE-FREE FLUX FIELD SO FAR! // nothing to do // reaction // if (m_imReactionRate.data_given()) { number val = 0.0; for (size_t sh = 0; sh < geo.num_sh(); sh++) val += u(_C_,sh) * m_shapeValues.shapeAtElemIP(sh,ip); total += m_imReactionRate[ip] * val; } if (m_imReaction.data_given()) { total += m_imReaction[ip]; } (*err_est_data)(elem_list[0],ip) += scale * total; } } UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl << "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData."); }