//	volume callbacks
void ISelector::volume_created(Grid* grid, Volume* vol,
								GridObject* pParent,
								bool replacesParent)
{
	assert((m_pGrid == grid) && "grids do not match.");
	
//TODO: this if could be removed if the subset-handler was only registered for
//		the elements that it supports. Note that a dynamic register/unregister
//		would be required...
	if(elements_are_supported(SE_VOLUME)){
	//	init the element
		mark_deselected(vol);
		if(autoselection_enabled())
			select(vol);
		else if((pParent != NULL) && selection_inheritance_enabled()){
			if(m_bStrictInheritanceEnabled){
				if(pParent->base_object_id() == VOLUME){
					select(vol, get_selection_status(static_cast<Volume*>(pParent)));
				}
			}
			else
				select(vol, get_selection_status(pParent));
		}
		else if(replacesParent){
			UG_ASSERT(pParent, "A parent has to exist if it shall be replaced");
			UG_ASSERT(dynamic_cast<Volume*>(pParent), "Only parents of the same type may be replaced.");
			select(vol, get_selection_status(static_cast<Volume*>(pParent)));
		}
	}
}
// permutes an IndexLayout for the permutation of indices
void PermuteIndicesInIndexLayout(	IndexLayout& layout,
									const std::vector<size_t>& vIndNew)
{
	typedef IndexLayout::Interface 	Interface;
	typedef IndexLayout::iterator 	InterfaceIter;
	typedef Interface::iterator		IndexIter;
	typedef Interface::Element		Index;

//	iterate over all interfaces
	for(InterfaceIter iiter = layout.begin();
		iiter != layout.end(); ++iiter)
	{
	//	get interface
		Interface& interface = layout.interface(iiter);

	//	iterate over all elements
		for(IndexIter indIter = interface.begin();
			indIter != interface.end(); ++indIter)
		{
		//	get old index
			Index oldIndex = interface.get_element(indIter);

		//	check index
			UG_ASSERT(oldIndex < vIndNew.size(), "Invalid index.");

		//	replace by new index
			interface.get_element(indIter) = vIndNew[oldIndex];
		}
	}
}
Exemple #3
0
void MultiGrid::element_created(TElem* elem, TParent* pParent,
								TElem* pReplaceMe)
{
	UG_ASSERT(pReplaceMe, "Only call this method with a valid element which shall be replaced.");
	int level = get_level(pReplaceMe);

//	register parent and child
	set_parent(elem, pParent);

	if(pParent)
	{
	//	add the element to the parents children list
	//	pParent should have an info object at this time!
		typename mginfo_traits<TParent>::info_type& parentInfo = get_info(pParent);
		parentInfo.replace_child(elem, pReplaceMe);
	}

//	put the element into the hierarchy
	level_required(level);
	m_hierarchy.assign_subset(elem, level);

//	explicitly copy the parent-type from pReplaceMe to the new vrt.
//	This has to be done explicitly since a parent may not exist locally in
//	a parallel environment.
	set_parent_type(elem, parent_type(pReplaceMe));
}
void
FracturedMediaRefiner<TGrid, TAPosition>::
set_position_attachment(TAPosition& aPos)
{
	UG_ASSERT(BaseClass::get_associated_grid(),
			  "The refiner has to be registered at a grid");
	m_aaPos.access(*BaseClass::get_associated_grid(), aPos);
}
Exemple #5
0
bool SolveDeficit(DenseMatrix< VariableArray2<double> > &A,
		DenseVector<VariableArray1<double> > &x, DenseVector<VariableArray1<double> > &rhs, double deficitTolerance)
{
	DenseMatrix< VariableArray2<double> > A2=A;
	DenseVector<VariableArray1<double> > rhs2=rhs;

	UG_ASSERT(A.num_rows() == rhs.size(), "");
	UG_ASSERT(A.num_cols() == x.size(), "");

	size_t iNonNullRows;
	x.resize(A.num_cols());
	for(size_t i=0; i<x.size(); i++)
		x[i] = 0.0;
	std::vector<size_t> interchange;
	if(Decomp(A, rhs, iNonNullRows, interchange, deficitTolerance) == false) return false;

//	A.maple_print("Adecomp");
//	rhs.maple_print("rhs decomp");

	for(int i=iNonNullRows-1; i>=0; i--)
	{
		double d=A(i,i);
		double s=0;
		for(size_t k=i+1; k<A.num_cols(); k++)
			s += A(i,k)*x[interchange[k]];
		x[interchange[i]] = (rhs[i] - s)/d;
	}
	DenseVector<VariableArray1<double> > f;
	f = A2*x - rhs2;
	if(VecNormSquared(f) > 1e-2)
	{
		UG_LOGN("iNonNullRows = " << iNonNullRows);
		UG_LOG("solving was wrong:");
		UG_LOGN(CPPString(A2, "Aold"));
		rhs2.maple_print("rhs");
		UG_LOGN(CPPString(A, "Adecomp"));
		rhs.maple_print("rhsDecomp");
		x.maple_print("x");
		f.maple_print("f");

	}

	return true;
}
Exemple #6
0
static void UpdateScriptAfterRegistryChange(ug::bridge::Registry* pReg)
{
	PROFILE_FUNC();
	UG_ASSERT(pReg == g_pRegistry, "static g_pRegistry does not match parameter pReg, someone messed up the registries!");
	
//	this can be called since CreateBindings automatically avoids
//	double registration
	ug::bridge::lua::CreateBindings_LUA(GetDefaultLuaState(),
										*pReg);
}
void
AdaptionSurfaceGridFunction<TDomain>::ValueAccessor::
access_inner(GridObject* elem)
{
	UG_ASSERT(m_fct < m_rASGF.m_aaValue[elem].size(), "Only storage for "
	          <<m_rASGF.m_aaValue[elem].size()<<" fcts, but fct-cmp "
	          <<m_fct<<" requested on "<< ElementDebugInfo(*m_rASGF.m_spDomain->grid(), elem))
	std::vector<number>& vVal = m_rASGF.m_aaValue[elem][m_fct];
	m_Val.resize(vVal.size());
	for(size_t i = 0; i < vVal.size(); ++i)
		m_Val[i] = &vVal[i];
}
void
AdaptionSurfaceGridFunction<TDomain>::ValueAccessor::
access_closure(TBaseElem* elem)
{

	typename Grid::traits<TSubBaseElem>::secure_container vSubElem;
	m_rASGF.m_spGrid->associated_elements_sorted(vSubElem, elem);

	std::vector<size_t> vOrientOffset;

	for(size_t i = 0; i < vSubElem.size(); ++i)
	{
	//	get subelement
		TSubBaseElem* subElem = vSubElem[i];
		UG_ASSERT(m_fct < m_rASGF.m_aaValue[subElem].size(), "Only storage for "
		          <<m_rASGF.m_aaValue[subElem].size()<<" fcts, but fct-cmp "
		          <<m_fct<<" requested on "<<ElementDebugInfo(*m_rASGF.m_spDomain->grid(), subElem))
		std::vector<number>& vVal = m_rASGF.m_aaValue[subElem][m_fct];

	//	get the orientation for this subelement
		ComputeOrientationOffset(vOrientOffset, elem, subElem, i,
								 m_rASGF.m_spDDInfo->lfeid(m_fct));

		UG_ASSERT(vOrientOffset.size() == vVal.size() ||
				  vOrientOffset.empty(), "Orientation wrong");

	//	cache access
		if(vOrientOffset.empty()){
			for(size_t j = 0; j < vVal.size(); ++j)
				m_Val.push_back(&vVal[ j ]);
		}else{
			for(size_t j = 0; j < vVal.size(); ++j)
				m_Val.push_back(&vVal[ vOrientOffset[j] ]);
		}
	}
}
number CalculateVolume(const Volume& vol,
		Grid::VertexAttachmentAccessor<APosition>& aaPos) {
	switch (vol.reference_object_id()) {
	case ROID_TETRAHEDRON:
		return CalculateVolume(static_cast<Tetrahedron>(vol), aaPos);
	case ROID_PRISM:
		return CalculateVolume(static_cast<Prism>(vol), aaPos);
	case ROID_PYRAMID:
		return CalculateVolume(static_cast<Pyramid>(vol), aaPos);
	case ROID_HEXAHEDRON:
		return CalculateVolume(static_cast<Hexahedron>(vol), aaPos);
	default:
		UG_ASSERT(false, "dont know how to calculate given volume.");
	}

	return NAN;
}
bool
FracturedMediaRefiner<TGrid, TAPosition>::
mark(Face* f, RefinementMark refMark)
{
//	make sure that the position accessor is valid
	UG_ASSERT(m_aaPos.valid(),
			  "Set a position attachment before refining!");

	bool wasMarked = BaseClass::is_marked(f);
	if(!BaseClass::mark(f, refMark))
		return false;

	if(!wasMarked){
		if(aspect_ratio(f) < m_aspectRatioThreshold)
			m_queDegeneratedFaces.push(f);
	}
	return true;
}
Exemple #11
0
void MessageHub::
unregister_callback_impl(MessageHub::CallbackId* cbId)
{
	if(cbId->m_hub == NULL){
		throw(Error("MessageHub::unregister_callback: Invalid callback-id. "
						"The callback was probably already unregistered.",
						MSG_HUB_BAD_CALLBACK_ID));
	}

	UG_ASSERT(cbId->m_hub == this, "Wrong MessageHub");

	CallbackEntryList& callbacks = m_callbackMap[cbId->m_msgTypeId];

//	clear the entry
	callbacks.erase(cbId->m_callbackEntryIter);

//	set the associated hub to NULL, since it was just unregistered
	cbId->m_hub = NULL;
}
void ConvectionDiffusionFVCR<TDomain>::
lin_def_mass(const LocalVector& u,
                       std::vector<std::vector<number> > vvvLinDef[],
                       const size_t nip)
{
//  get finite volume geometry
	static const TFVGeom& geo = GeomProvider<TFVGeom>::get();

// 	loop Sub Control Volumes (SCV)
	for(size_t co = 0; co < geo.num_scv(); ++co)
	{
	// 	get current SCV
		const typename TFVGeom::SCV& scv = geo.scv(co);

	// 	Check associated node
		UG_ASSERT(co == scv.node_id(), "Only one shape per SCV");

	// 	set lin defect
		vvvLinDef[co][_C_][co] = scv.volume();
	}
}
Exemple #13
0
bool SchurPrecond<TAlgebra>::
create_and_init_local_schur_complement(SmartPtr<MatrixOperator<matrix_type, vector_type> > A,
		std::vector<slice_desc_type> &skeletonMark)
{
	try{
	SCHUR_PROFILE_BEGIN(SchurPrecondInit_CreateInitLocalSchurComplement);

	m_spSchurComplementOp = make_sp(new SchurComplementOperator<TAlgebra>(A, skeletonMark));
	if (m_spSchurComplementOp.invalid())
	{
		UG_ASSERT(m_spSchurComplementOp.invalid(), "Failed creating operator!")
	}

//	set dirichlet solver for local Schur complement
	m_spSchurComplementOp->set_dirichlet_solver(m_spDirichletSolver);

	if(debug_writer().valid())
		m_spSchurComplementOp->set_debug(debug_writer());

//	init
	UG_DLOG(SchurDebug, 1, "\n%   - Init local Schur complement ... ");

	m_spSchurComplementOp->init();
	UG_DLOG(SchurDebug, 1, "done.\n");


//	1.4 check all procs
	/*bool bSuccess = true;
	if(!pcl::AllProcsTrue(bSuccess))
	{
		UG_LOG("ERROR in SchurPrecond::init: Some processes could not init"
				" local Schur complement.\n");
		return false;
	}*/
	return true;

	}UG_CATCH_THROW("SchurPrecond::" << __FUNCTION__ << " failed")
	return false;
}
void AdaptiveRegularRefiner_MultiGrid::
get_parents_of_marked_closure_elements(std::vector<GridObject*>& parents,
									   Selector::status_t mark)
{
	UG_ASSERT(multi_grid(), "A multi grid has to be assigned to the refiner.");
	MultiGrid& mg = *multi_grid();

	typedef typename BaseClass::selector_t::template traits<TElem>::iterator	TIter;
	for(TIter iter = m_selMarkedElements.begin<TElem>();
		iter != m_selMarkedElements.end<TElem>(); ++iter)
	{
		TElem* e = *iter;
		if(!m_closureElems.is_selected(e))
			continue;

		if(get_mark(e) & mark){
			GridObject* parent = mg.get_parent(e);
			if(parent && !m_closureElems.is_selected(parent))
				parents.push_back(parent);
		}
	}
}
Exemple #15
0
void InverseTetTransform(int* indsOut, const int* transformedInds){
	UG_ASSERT(indsOut != transformedInds, "The arrays have to differ!");
	for(int i = 0; i < NUM_VERTICES; ++i)
		indsOut[transformedInds[i]] = i;
}
Exemple #16
0
void CRFVGeometry<TElem, TWorldDim>::
update(GridObject* elem, const MathVector<worldDim>* vCornerCoords, const ISubsetHandler* ish)
{
	UG_ASSERT(dynamic_cast<TElem*>(elem) != NULL, "Wrong element type.");
	TElem* pElem = static_cast<TElem*>(elem);

// 	If already update for this element, do nothing
	if(m_pElem == pElem) return; else m_pElem = pElem;

	//  compute barycenter coordinates
	globalBary = vCornerCoords[0];
	m_vCo[0] = vCornerCoords[0];
	for (size_t j=1;j<m_rRefElem.num(0);j++){
	   globalBary+=vCornerCoords[j];
	   m_vCo[j] = vCornerCoords[j];
	}
	globalBary*=1./(number)m_rRefElem.num(0);

// 	compute global informations for scvf
	for(size_t i = 0; i < num_scvf(); ++i)
	{
		for (size_t j=0;j<m_vSCVF[i].numCo-1;j++){
			m_vSCVF[i].vGloPos[j]=vCornerCoords[m_rRefElem.id(dim-2,i,0,j)];
		}
		m_vSCVF[i].vGloPos[m_vSCVF[i].numCo-1]=globalBary;
		AveragePositions(m_vSCVF[i].globalIP, m_vSCVF[i].vGloPos, m_vSCVF[i].numCo);
		ElementNormal<face_type0,worldDim>(m_vSCVF[i].Normal,m_vSCVF[i].vGloPos);// face_type0 identical to scvf type
	}

// 	compute size of scv
	for(size_t i = 0; i < num_scv(); ++i)
	{
		// side nodes in reverse order to fulfill standard element order
		for (int j=0;j<m_vSCV[i].numCorners-1;j++){
			m_vSCV[i].vGloPos[m_vSCV[i].numCorners-2-j]=vCornerCoords[m_rRefElem.id(dim-1,i,0,j)];
		}
		AveragePositions(m_vGlobUnkCoords[i], m_vSCV[i].vGloPos, m_vSCV[i].numCorners-1);
		m_vSCV[i].vGlobIP = m_vGlobUnkCoords[i];

		m_vSCV[i].vGloPos[m_vSCV[i].numCorners-1]=globalBary;
		// 	compute volume of scv and normal to associated element face
		//CRSCVSizeAndNormal<dim>(m_vSCV[i].Vol,m_vSCV[i].Normal,m_vSCV[i].vGloPos,m_vSCV[i].numCorners);
		if (m_vSCV[i].numCorners-1==dim){
		     m_vSCV[i].Vol = ElementSize<scv_type0,worldDim>(m_vSCV[i].vGloPos);
		     ElementNormal<face_type0, worldDim>(m_vSCV[i].Normal, m_vSCV[i].vGloPos);
		} else { // m_vSCV[i].numCorners-2==dim , only possible in 3d (pyramid)
		     m_vSCV[i].Vol = ElementSize<scv_type1,worldDim>(m_vSCV[i].vGloPos);
		     ElementNormal<face_type1, worldDim>(m_vSCV[i].Normal, m_vSCV[i].vGloPos);
		};
		// nodes are in reverse order therefore reverse sign to get outward normal
		m_vSCV[i].Normal*=-1;
	}

// 	Shapes and Derivatives
	m_mapping.update(vCornerCoords);

//	compute jacobian for linear mapping
	if(ReferenceMapping<ref_elem_type, worldDim>::isLinear)
	{
		MathMatrix<worldDim,dim> JtInv;
		m_mapping.jacobian_transposed_inverse(JtInv, m_vSCVF[0].local_ip());
		const number detJ = m_mapping.sqrt_gram_det(m_vSCVF[0].local_ip());

		for(size_t i = 0; i < num_scvf(); ++i)
		{
			m_vSCVF[i].JtInv = JtInv;
			m_vSCVF[i].detj = detJ;
		}

		for(size_t i = 0; i < num_scv(); ++i)
		{
			m_vSCV[i].JtInv = JtInv;
			m_vSCV[i].detj = detJ;
		}
	}
//	else compute jacobian for each integration point
	else
	{
		for(size_t i = 0; i < num_scvf(); ++i)
		{
			m_mapping.jacobian_transposed_inverse(m_vSCVF[i].JtInv, m_vSCVF[i].local_ip());
			m_vSCVF[i].detj = m_mapping.sqrt_gram_det(m_vSCVF[i].local_ip());
		}
		for(size_t i = 0; i < num_scv(); ++i)
		{
			m_mapping.jacobian_transposed_inverse(m_vSCV[i].JtInv, m_vSCV[i].local_ip());
			m_vSCV[i].detj = m_mapping.sqrt_gram_det(m_vSCV[i].local_ip());
		}
	}

//	compute global gradients
	for(size_t i = 0; i < num_scvf(); ++i)
		for(size_t sh = 0; sh < scvf(i).num_sh(); ++sh)
			MatVecMult(m_vSCVF[i].vGlobalGrad[sh], m_vSCVF[i].JtInv, m_vSCVF[i].vLocalGrad[sh]);

	for(size_t i = 0; i < num_scv(); ++i)
		for(size_t sh = 0; sh < scv(i).num_sh(); ++sh)
			MatVecMult(m_vSCV[i].vGlobalGrad[sh], m_vSCV[i].JtInv, m_vSCV[i].vLocalGrad[sh]);

// 	copy ip points in list (SCVF)
	for(size_t i = 0; i < num_scvf(); ++i)
		m_vGlobSCVF_IP[i] = scvf(i).global_ip();

//	if no boundary subsets required, return
	if(num_boundary_subsets() == 0 || ish == NULL) return;
	else update_boundary_faces(pElem, vCornerCoords, ish);
}
bool AddEntriesToLevelIndexLayout(IndexLayout& indexLayoutOut,
                                  DoFDistribution& dofDistr, TLayout& elemLayout,
					  const std::map<int, std::vector<bool> >* pIgnoreMap = NULL)
{
//	iterator for grid element interfaces
	typedef typename TLayout::iterator InterfaceIterator;

//	type of grid element interfaces
	typedef typename TLayout::Interface ElemInterface;

//	iterator for grid elements
	typedef typename ElemInterface::iterator ElemIterator;

//	type of index interfaces
	typedef IndexLayout::Interface IndexInterface;

//	iterate over all grid element interfaces
	for(InterfaceIterator iIter = elemLayout.begin();
		iIter != elemLayout.end(); ++iIter)
	{
	//	get a grid element interface
		ElemInterface& elemInterface = elemLayout.interface(iIter);

	//	get a corresponding index interface
		IndexInterface& indexInterface = indexLayoutOut.interface(
											elemLayout.proc_id(iIter));

	//	if some elements shall be ignored, then we'll perform a special loop
		if(pIgnoreMap){
			std::map<int, std::vector<bool> >::const_iterator
				findIter = pIgnoreMap->find(elemInterface.get_target_proc());

			UG_ASSERT(findIter != pIgnoreMap->end(), "The vector has to exist");
			const std::vector<bool>& vec = findIter->second;

			UG_ASSERT(vec.size() == elemInterface.size(), "Sizes have to match!");

		//	iterate over entries in the grid element interface
			int counter = 0;
			for(ElemIterator eIter = elemInterface.begin();
				eIter != elemInterface.end(); ++eIter, ++counter)
			{
			//	if the corresponding vec-entry is true, then we'll ignore the elem.
				if(vec[counter])
					continue;

			//	get the grid element
				typename ElemInterface::Element elem = elemInterface.get_element(eIter);

			//	get the algebraic indices on the grid element
				std::vector<size_t> indices;
				dofDistr.inner_algebra_indices(elem, indices);

			//	add the indices to the interface
				for(size_t i = 0; i < indices.size(); ++i)
					indexInterface.push_back(indices[i]);
			}
		}
		else{
		//	iterate over entries in the grid element interface
			for(ElemIterator eIter = elemInterface.begin();
				eIter != elemInterface.end(); ++eIter)
			{
			//	get the grid element
				typename ElemInterface::Element elem = elemInterface.get_element(eIter);

			//	get the algebraic indices on the grid element
				std::vector<size_t> indices;
				dofDistr.inner_algebra_indices(elem, indices);

			//	add the indices to the interface
				for(size_t i = 0; i < indices.size(); ++i)
					indexInterface.push_back(indices[i]);
			}
		}
	}

//	touching an interface means creation. Thus we remove the empty interfaces
//	to avoid storage, communication (should not happen any longer) etc...
	pcl::RemoveEmptyInterfaces(elemLayout);

//	we're done
	return true;
}
void ParallelHNodeAdjuster::
ref_marks_changed(IRefiner& ref,
			   	  const std::vector<Vertex*>& vrts,
			   	  const std::vector<Edge*>& edges,
			   	  const std::vector<Face*>& faces,
			   	  const std::vector<Volume*>& vols)
{
	UG_DLOG(LIB_GRID, 1, "refMarkAdjuster-start: ParallelHNodeAdjuster::ref_marks_changed\n");
	UG_ASSERT(ref.grid(), "A refiner has to operate on a grid, before marks can be adjusted!");
	if(!ref.grid()){
		UG_DLOG(LIB_GRID, 1, "refMarkAdjuster-stop: ParallelHNodeAdjuster::ref_marks_changed\n");
		return;
	}
	
	Grid& grid = *ref.grid();
	if(!grid.is_parallel()){
		UG_DLOG(LIB_GRID, 1, "refMarkAdjuster-stop: ParallelHNodeAdjuster::ref_marks_changed\n");
		return;
	}

	DistributedGridManager& distGridMgr = *grid.distributed_grid_manager();
	GridLayoutMap& layoutMap = distGridMgr.grid_layout_map();

//	check whether new interface elements have been selected
	bool newInterfaceVrtsMarked = ContainsInterfaceElem(vrts, distGridMgr);
	bool newInterfaceEdgeMarked = ContainsInterfaceElem(edges, distGridMgr);
	bool newInterfaceFacesMarked = ContainsInterfaceElem(faces, distGridMgr);
	bool newInterfaceVolsMarked = ContainsInterfaceElem(vols, distGridMgr);

	bool newlyMarkedElems = newInterfaceVrtsMarked ||
							newInterfaceEdgeMarked ||
							newInterfaceFacesMarked ||
							newInterfaceVolsMarked;

	bool exchangeFlag = pcl::OneProcTrue(newlyMarkedElems);

	if(exchangeFlag){
		const byte consideredMarks = RM_REFINE | RM_ANISOTROPIC;
		ComPol_BroadcastRefineMarks<VertexLayout> compolRefVRT(ref, consideredMarks);
		ComPol_BroadcastRefineMarks<EdgeLayout> compolRefEDGE(ref, consideredMarks);
		ComPol_BroadcastRefineMarks<FaceLayout> compolRefFACE(ref, consideredMarks);

	//	send data SLAVE -> MASTER
		m_intfComVRT.exchange_data(layoutMap, INT_H_SLAVE, INT_H_MASTER,
									compolRefVRT);

		m_intfComEDGE.exchange_data(layoutMap, INT_H_SLAVE, INT_H_MASTER,
									compolRefEDGE);

		m_intfComFACE.exchange_data(layoutMap, INT_H_SLAVE, INT_H_MASTER,
									compolRefFACE);

		m_intfComVRT.communicate();
		m_intfComEDGE.communicate();
		m_intfComFACE.communicate();

	//	and now MASTER -> SLAVE (the selection has been adjusted on the fly)
		m_intfComVRT.exchange_data(layoutMap, INT_H_MASTER, INT_H_SLAVE,
									compolRefVRT);

		m_intfComEDGE.exchange_data(layoutMap, INT_H_MASTER, INT_H_SLAVE,
									compolRefEDGE);

		m_intfComFACE.exchange_data(layoutMap, INT_H_MASTER, INT_H_SLAVE,
									compolRefFACE);

		m_intfComVRT.communicate();
		m_intfComEDGE.communicate();
		m_intfComFACE.communicate();

		UG_DLOG(LIB_GRID, 1, "refMarkAdjuster-stop (force continue): ParallelHNodeAdjuster::ref_marks_changed\n");
	}

	UG_DLOG(LIB_GRID, 1, "refMarkAdjuster-stop: ParallelHNodeAdjuster::ref_marks_changed\n");
}
Exemple #19
0
inline Vertex* GetVertex(Face* face, size_t i)
{
	UG_ASSERT(i < face->num_vertices(), "Wrong number of vertex");
	return face->vertex(i);
}
Exemple #20
0
inline Vertex* GetVertex(Volume* vol, size_t i)
{
	UG_ASSERT(i < vol->num_vertices(), "Wrong number of vertex");
	return vol->vertex(i);
}
Exemple #21
0
inline Vertex* GetVertex(Edge* edge, size_t i)
{
	UG_ASSERT(i < edge->num_vertices(), "Wrong number of vertex");
	return edge->vertex(i);
}
Exemple #22
0
////////////////////////////////////////////////////////////////////////
//	GetVertex
inline Vertex* GetVertex(Vertex* vrt, size_t i)
{
	UG_ASSERT(i < 1, "A Vertex has only one vertex");
	return vrt;
}