Real MultiGrid::get_phi_at(Real x, Real y, Real z) {
    Real p, tmp;
    MultiGrid* g;
    p = 0.0;
    for (int l = 0; l < get_local_node_cnt(); l++) {
        g = dynamic_cast<MultiGrid*>(get_local_node(l));
        if (MPI_rank() == g->proc()) {
            for (int k = 1; k < PNX - 1; k++) {
                if (z >= g->MultiGrid::zf(k) && z < g->MultiGrid::zf(k + 1)) {
                    for (int j = 1; j < PNX - 1; j++) {
                        if (y >= g->MultiGrid::yf(j) && y < g->MultiGrid::yf(j + 1)) {
                            for (int i = 1; i < PNX - 1; i++) {
                        //        printf("%e %e %e\n", g->MultiGrid::xf(i), x, g->MultiGrid::xf(i + 1));
                               if (x >= g->MultiGrid::xf(i) && x < g->MultiGrid::xf(i + 1)) {
                                    if (!g->poisson_zone_is_refined(i, j, k)) {
                                        p = g->phi(i, j, k);
                                       // printf("%e\n", p);
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    tmp = p;
    MPI_Allreduce(&tmp, &p, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD );
    return p;
}
Esempio n. 2
0
void CopyGridLevel(MultiGrid& srcMG, Grid& destGrid,
				   ISubsetHandler& srcSH, ISubsetHandler& destSH,
				   int lvl, TAPos aPos)
{
	Grid::VertexAttachmentAccessor<TAPos> aaPos(destGrid, aPos);
	Grid::VertexAttachmentAccessor<TAPos> aaSrcPos(srcMG, aPos);
	GridObjectCollection goc = srcMG.get_grid_objects();

	AVertex aNewVrt;
	srcMG.attach_to_vertices(aNewVrt);
	Grid::VertexAttachmentAccessor<AVertex> aaNewVrt(srcMG, aNewVrt);

	for(int si = destSH.num_subsets(); si < srcSH.num_subsets(); ++si)
	{
		destSH.subset_info(si) = srcSH.subset_info(si);
	}

	for(VertexIterator vrtIter = goc.begin<Vertex>(lvl); vrtIter != goc.end<Vertex>(lvl); ++vrtIter)
	{
		Vertex* srcVrt  = *vrtIter;
		Vertex* destVrt = *destGrid.create_by_cloning(srcVrt);

		aaNewVrt[srcVrt] = destVrt;
		aaPos[destVrt] = aaSrcPos[srcVrt];
		destSH.assign_subset(destVrt, srcSH.get_subset_index(srcVrt));
	}

	CopyGridLevelElements<Edge>(srcMG, destGrid, srcSH, destSH, lvl, aNewVrt);
	CopyGridLevelElements<Face>(srcMG, destGrid, srcSH, destSH, lvl, aNewVrt);
	CopyGridLevelElements<Volume>(srcMG, destGrid, srcSH, destSH, lvl, aNewVrt);

	srcMG.detach_from_vertices(aNewVrt);
}
Esempio n. 3
0
bool SaveGridHierarchyTransformed(MultiGrid& mg, ISubsetHandler& sh,
								  const char* filename, number offset)
{
	PROFILE_FUNC_GROUP("grid");
	APosition aPos;
//	uses auto-attach
	Grid::AttachmentAccessor<Vertex, APosition> aaPos(mg, aPos, true);

//	copy the existing position to aPos. We take care of dimension differences.
//	Note:	if the method was implemented for domains, this could be implemented
//			in a nicer way.
	if(mg.has_vertex_attachment(aPosition))
		ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition, aPos);
	else if(mg.has_vertex_attachment(aPosition2))
		ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition2, aPos);
	else if(mg.has_vertex_attachment(aPosition1))
		ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition1, aPos);

//	iterate through all vertices and apply an offset depending on their level.
	for(size_t lvl = 0; lvl < mg.num_levels(); ++lvl){
		for(VertexIterator iter = mg.begin<Vertex>(lvl);
			iter != mg.end<Vertex>(lvl); ++iter)
		{
			aaPos[*iter].z() += (number)lvl * offset;
		}
	}

//	finally save the grid
	bool writeSuccess = SaveGridToFile(mg, sh, filename, aPos);

//	clean up
	mg.detach_from_vertices(aPos);

	return writeSuccess;
}
Esempio n. 4
0
bool TestGridLayoutMap(MultiGrid& mg, GridLayoutMap& glm)
{
	if(mg.has_vertex_attachment(aPosition))
		return TestGridLayoutMap(mg, glm, aPosition);
	else if(mg.has_vertex_attachment(aPosition2))
		return TestGridLayoutMap(mg, glm, aPosition2);
	else if(mg.has_vertex_attachment(aPosition1))
		return TestGridLayoutMap(mg, glm, aPosition1);
	else
		UG_LOG("ERROR in TestGridLayoutMap: A standard position attachment"
				" is required.\n");
	return false;
}
Esempio n. 5
0
bool SaveGridLevelToFile(MultiGrid& srcMG, ISubsetHandler& srcSH, int lvl, const char* filename)
{
//	check whether one of the standard attachments is attached and call
//	SaveGridLevel with that attachment
	if(srcMG.has_vertex_attachment(aPosition))
		return SaveGridLevel(srcMG, srcSH, lvl, filename, aPosition);
	if(srcMG.has_vertex_attachment(aPosition2))
		return SaveGridLevel(srcMG, srcSH, lvl, filename, aPosition2);
	if(srcMG.has_vertex_attachment(aPosition1))
		return SaveGridLevel(srcMG, srcSH, lvl, filename, aPosition1);

	return false;
}
Esempio n. 6
0
bool SaveSurfaceViewTransformed(MultiGrid& mg, const SurfaceView& sv,
								const char* filename, number offset)
{
	PROFILE_FUNC_GROUP("grid");

	APosition aPos;
//	uses auto-attach
	Grid::AttachmentAccessor<Vertex, APosition> aaPos(mg, aPos, true);

//	copy the existing position to aPos. We take care of dimension differences.
//	Note:	if the method was implemented for domains, this could be implemented
//			in a nicer way.
	if(mg.has_vertex_attachment(aPosition))
		ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition, aPos);
	else if(mg.has_vertex_attachment(aPosition2))
		ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition2, aPos);
	else if(mg.has_vertex_attachment(aPosition1))
		ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition1, aPos);

//	iterate through all vertices and apply an offset depending on their level.
	for(size_t lvl = 0; lvl < mg.num_levels(); ++lvl){
		for(VertexIterator iter = mg.begin<Vertex>(lvl);
			iter != mg.end<Vertex>(lvl); ++iter)
		{
			aaPos[*iter].z() += (number)lvl * offset;
		}
	}

//	create a subset handler which holds different subsets for the different interface types
	SubsetHandler sh(mg);

	AssignSubsetsBySurfaceViewState<Vertex>(sh, sv, mg);
	AssignSubsetsBySurfaceViewState<Edge>(sh, sv, mg);
	AssignSubsetsBySurfaceViewState<Face>(sh, sv, mg);
	AssignSubsetsBySurfaceViewState<Volume>(sh, sv, mg);

	AssignSubsetColors(sh);
	EraseEmptySubsets(sh);

//	finally save the grid
	bool writeSuccess = SaveGridToFile(mg, sh, filename, aPos);

//	clean up
	mg.detach_from_vertices(aPos);

	return writeSuccess;
}
void ConvectionDiffusionFE<TDomain>::
compute_err_est_M_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale)
{
// note: mass parts only enter volume term

	err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get());

	if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");}
	MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid());

	typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list;
	pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem);
	if (elem_list.size() != 1)
		UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");

//	request geometry
	static const TFEGeom& geo = GeomProvider<TFEGeom>::get();

// 	loop integration points
	try
	{
		for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++)
		{
			number total = 0.0;

		// mass scale //
			if (m_imMassScale.data_given())
			{
				number val = 0.0;
				for (size_t sh = 0; sh < geo.num_sh(); sh++)
					val += u(_C_,sh) * m_shapeValues.shapeAtElemIP(sh,ip);

				total += m_imMassScale[ip] * val;
			}

		// mass //
			if (m_imMass.data_given())
			{
				total += m_imMass[ip];
			}

			(*err_est_data)(elem_list[0],ip) += scale * total;
		}
	}
	UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
			<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
Esempio n. 8
0
bool CreateSmoothHierarchy(MultiGrid& mg, size_t numRefs)
{
	PROFILE_FUNC_GROUP("grid");
	IRefinementCallback* refCallback = NULL;
//	we're only checking for the main attachments here.
//todo: improve this - add a domain-based hierarchy creator.
	if(mg.has_vertex_attachment(aPosition1))
		refCallback = new SubdivisionLoopProjector<APosition1>(mg, aPosition1, aPosition1);
	else if(mg.has_vertex_attachment(aPosition2))
		refCallback = new SubdivisionLoopProjector<APosition2>(mg, aPosition2, aPosition2);
	else if(mg.has_vertex_attachment(aPosition))
		refCallback = new SubdivisionLoopProjector<APosition>(mg, aPosition, aPosition);
		
	if(!refCallback){
		UG_LOG("No standard position attachment found. Aborting.\n");
		return false;
	}
	
	GlobalMultiGridRefiner ref(mg, refCallback);

	for(size_t lvl = 0; lvl < numRefs; ++lvl){
		ref.refine();
	}

	if(mg.has_vertex_attachment(aPosition1))
		ProjectToLimitPLoop(mg, aPosition1, aPosition1);
	else if(mg.has_vertex_attachment(aPosition2))
		ProjectToLimitPLoop(mg, aPosition2, aPosition2);
	else if(mg.has_vertex_attachment(aPosition))
		ProjectToLimitPLoop(mg, aPosition, aPosition);

	delete refCallback;
	return true;
}
Esempio n. 9
0
void TestSubdivision(const char* fileIn, const char* fileOut, int numRefs)
{
	PROFILE_FUNC_GROUP("grid");
//todo: Callbacks have to make sure that their attachment is accessible in the grid.
//		even if they were initialized before the attachment was attached to the grid.
	MultiGrid mg;
	SubsetHandler sh(mg);
	SubdivisionLoopProjector<APosition> refCallback(mg, aPosition, aPosition);
	GlobalMultiGridRefiner ref(mg, &refCallback);
	
	if(LoadGridFromFile(mg, sh, fileIn)){
		for(int lvl = 0; lvl < numRefs; ++lvl){
			ref.refine();
		}

		ProjectToLimitPLoop(mg, aPosition, aPosition);
		SaveGridToFile(mg, mg.get_hierarchy_handler(), fileOut);

	}
	else{
		UG_LOG("Load failed. aborting...\n");
	}
}
Esempio n. 10
0
static void AssignSubsetsByInterfaceType(SubsetHandler& sh, MultiGrid& mg)
{
	const int siNormal = 0;
	const int siHMaster = 1;
	const int siHSlave = 1 << 1;
	const int siVMaster = 1 << 2;
	const int siVSlave = 1 << 3;

	const char* subsetNames[] = {"normal", "hmaster", "hslave", "hslave+hmaster",
						  "vmaster", "vmaster+hmaster", "vmaster+hslave",
						  "vmaster+hslave+hmaster", "vslave", "vslave+hmaster",
						  "vslave+hslave", "vslave+hslave+hmaster",
						  "vslave+vmaster", "vslave+vmaster+hmaster",
						  "vslave+vmaster+hslave", "vslave+vmaster+hmaster+hslave"};

	for(int i = 0; i < 16; ++i)
		sh.subset_info(i).name = subsetNames[i];

	typedef typename Grid::traits<TElem>::iterator TIter;
	for(TIter iter = mg.begin<TElem>(); iter != mg.end<TElem>(); ++iter){
		int status = ES_NONE;

		#ifdef UG_PARALLEL
			DistributedGridManager* distGridMgr = mg.distributed_grid_manager();
			if(distGridMgr)
				status = distGridMgr->get_status(*iter);
		#endif

		int index = siNormal;
		if(status & ES_H_MASTER)
			index |= siHMaster;
		if(status & ES_H_SLAVE)
			index |= siHSlave;
		if(status & ES_V_MASTER)
			index |= siVMaster;
		if(status & ES_V_SLAVE)
			index |= siVSlave;

		sh.assign_subset(*iter, index);
	}
}
Esempio n. 11
0
void CopyGridLevelElements(MultiGrid& srcMG, Grid& destGrid,
				           ISubsetHandler& srcSH, ISubsetHandler& destSH,
						   int lvl, AVertex& aNewVrt)
{
	Grid::VertexAttachmentAccessor<AVertex> aaNewVrt(srcMG, aNewVrt);
	GridObjectCollection goc = srcMG.get_grid_objects();
	CustomVertexGroup vrts;

	typedef typename Grid::traits<TElem>::iterator iter_t;

	for(iter_t eIter = goc.begin<TElem>(lvl); eIter != goc.end<TElem>(lvl); ++eIter)
	{
		TElem* e = *eIter;
		vrts.resize(e->num_vertices());

		for(size_t iv = 0; iv < e->num_vertices(); ++iv)
		{
			vrts.set_vertex(iv, aaNewVrt[e->vertex(iv)]);
		}

		TElem* ne = *destGrid.create_by_cloning(e, vrts);
		destSH.assign_subset(ne, srcSH.get_subset_index(e));
	}
}
Esempio n. 12
0
int main(int argc, char* argv[])
{

#ifdef CH_MPI
  MPI_Init (&argc, &argv);
#endif

  // test parameters
  const int nGrids = 3;
  const int nCells0 = 32;
  // xLo has to be zero in order for DiriBc to work.
  const RealVect xLo = RealVect::Zero;
  const Real xHi = 1.0;
  const Box box0(IntVect::Zero, (nCells0-1)*IntVect::Unit);
  const int nGhosts = 1;
  const int resNT = 2; // norm Type
  const int errNT = 0;
  // A test is considered as a failure
  // if its convergence rate is smaller than below.
  const Real targetConvergeRate = 1.75;

  // solver parameters
  // To converge within 10 V-Cycles in 1D,
  //  nRelax=3 is the minimum number of relaxations.
  const int nRelax = 3; // m_pre=m_post
  // cycle Type, 1 : V-Cycle; -1 : FMG-Cycle
  const int cycleType[2] =
  {
    1, -1
  };
  const std::string cycleStr[2] =
  {
    "   V" , " FMG"
  };

  // test results holder
  const int nCycles[2] =
  {
    9, 5
  };
  const int maxCycles = 10; // > max(nCycles)
  //  Real resNorm[nGrids][nCycles+1], errNorm[nGrids][nCycles+1];
  Real resNorm[nGrids][maxCycles], errNorm[nGrids][maxCycles];
  Real convergeRate[nGrids-1][2];
  const Real log2r = 1.0/log(2.0);

  // status records the number of errors detected.
  int status = 0;
  for (int j=0; j<2; j++)
  {
    pout() << "\n**************************************************\n"
           << "\nTesting MultiGrid::oneCycle(correction, residual)\n"
           << " cycle type = " << cycleStr[j]
           << "; m_pre = m_post = " << nRelax << "\n";

    for (int iGrid=0; iGrid<nGrids; iGrid++)
    {
      int ref = 1;
      for (int i=0; i<iGrid; i++)
        ref*=2;
      const Real dx = xHi/nCells0/ref;
      const Box domain = refine(box0,ref);
      const Box ghostBox = grow(domain,nGhosts);

      pout() << "\n----------------------------------------------------\n";
      pout() << "nCells = " << nCells0*ref << " ; dx = " << dx << " \n";

      FArrayBox phi(ghostBox, 1);
      FArrayBox correction(ghostBox, 1);
      FArrayBox rhs(domain, 1);
      FArrayBox error(domain, 1);
      FArrayBox phiExact(domain, 1);
      FArrayBox residual(domain, 1);

      // set initial guess
      phi.setVal(0.0);
      // set RHS and the exact solution
      for (BoxIterator bit(domain); bit.ok(); ++bit)
        {
          const RealVect offset = bit()-domain.smallEnd();
          const RealVect x = xLo + dx*(0.5+offset);
          rhs(bit()) = rhsFunc( x );
          phiExact(bit()) = exactSolution( x );
        }

      // Initialize big objects
      NewPoissonOpFactory opFactory;
      opFactory.define(dx*RealVect(IntVect::Unit), constDiriBC);
      MultiGrid<FArrayBox> solver;
      BiCGStabSolver<FArrayBox> bottomSolver;
      bottomSolver.m_verbosity = 0;
      MGLevelOp<FArrayBox>* op = opFactory.MGnewOp(domain,0);
      solver.m_numMG = 1;
      solver.m_bottom = 1;
      solver.m_pre = nRelax;
      solver.m_post = nRelax;
      solver.m_cycle = cycleType[j];
      solver.define(opFactory, &bottomSolver, domain);

      // put the data into residual-correction form
      op->residual(residual, phi, rhs);
      resNorm[iGrid][0] = residual.norm(resNT);
      op->axby(error, phi, phiExact, 1, -1);
      errNorm[iGrid][0] = error.norm(errNT);
      solver.init(correction, residual);

      // Solve the problem using MultiGrid::oneCycle
      for (int i=0; i<nCycles[j]; i++)
        {
          correction.setVal(0.0);
          solver.oneCycle(correction, residual);
          op->incr(phi, correction, 1);
          op->residual(residual, phi, rhs);
          resNorm[iGrid][i+1] = residual.norm(resNT);
          op->axby(error, phi, phiExact, 1, -1);
          errNorm[iGrid][i+1] = error.norm(errNT);
        }
      delete op;

      // output a table of results
      pout()<< cycleStr[j] << "-Cycle N.O. |  residual " << resNT
            << "-norm  |  Error " << errNT << "-norm  \n";
      for (int i=0; i<nCycles[j]+1; i++)
        {
          pout() << "         " << i << "      |    " << resNorm[iGrid][i]
                 << "        |    " << errNorm[iGrid][i]  << "\n";
        }
    } // end grid loop

    pout() << "\nConvergence Rate based on the error in the last cycle:\n";
    for (int i=0; i<nGrids-1; i++)
    {
      Real ratio = errNorm[i][nCycles[j]]/errNorm[i+1][nCycles[j]];
      convergeRate[i][j] = log(ratio)*log2r;
      if (convergeRate[i][j] < targetConvergeRate)
      {
        status += 1;
      }
      pout() << "    " << convergeRate[i][j] << "\n";
    }
  }// end cycle type

  if (status==0)
  {
    pout() <<  "All tests passed!\n";
  }
  else
  {
    pout() <<  status << " tests failed!\n";
  }

#ifdef CH_MPI
  MPI_Finalize ();
#endif

  return status;
}
void ConvectionDiffusionFE<TDomain>::
compute_err_est_rhs_elem(GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale)
{
	typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;

	err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get());

	if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");}
	MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid());

// SIDE TERMS //

//	get the sides of the element
	typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::side_type>::secure_container side_list;
	pErrEstGrid->associated_elements_sorted(side_list, (TElem*) elem);
	if (side_list.size() != (size_t) ref_elem_type::numSides)
		UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");

// loop sides
	size_t passedIPs = 0;
	for (size_t side = 0; side < (size_t) ref_elem_type::numSides; side++)
	{
		// normal on side
		MathVector<dim> normal;
		SideNormal<ref_elem_type,dim>(normal, side, vCornerCoords);
		VecNormalize(normal, normal);

		try
		{
			for (size_t sip = 0; sip < err_est_data->num_side_ips(side_list[side]); sip++)
			{
				size_t ip = passedIPs + sip;

			// vector source //
				if (m_imVectorSource.data_given())
					(*err_est_data)(side_list[side],sip) += scale * VecDot(m_imVectorSource[ip], normal);
			}

			passedIPs += err_est_data->num_side_ips(side_list[side]);
		}
		UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
				<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
	}

// VOLUME TERMS //

	if (!m_imSource.data_given()) return;

	typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list;
	pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem);
	if (elem_list.size() != 1)
		UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");

// source //
	try
	{
		for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++)
			(*err_est_data)(elem_list[0],ip) += scale * m_imSource[ip];
	}
	UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
			<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
void ConvectionDiffusionFE<TDomain>::
compute_err_est_A_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale)
{
	typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;

	err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get());

	if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");}
	MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid());

//	request geometry
	static const TFEGeom& geo = GeomProvider<TFEGeom>::get();


// SIDE TERMS //

//	get the sides of the element
	//	We have to cast elem to a pointer of type SideAndElemErrEstData::elem_type
	//	for the SideAndElemErrEstData::operator() to work properly.
	//	This cannot generally be achieved by casting to TElem*, since this method is also registered for
	//	lower-dimensional types TElem, and must therefore be compilable, even if it is never EVER to be executed.
	//	The way we achieve this here, is by calling associated_elements_sorted() which has an implementation for
	//	all possible types. Whatever comes out of it is of course complete nonsense if (and only if)
	//	SideAndElemErrEstData::elem_type != TElem. To be on the safe side, we throw an error if the number of
	//	entries in the list is not as it should be.

	typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::side_type>::secure_container side_list;
	pErrEstGrid->associated_elements_sorted(side_list, (TElem*) elem);
	if (side_list.size() != (size_t) ref_elem_type::numSides)
		UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");

// 	some help variables
	MathVector<dim> fluxDensity, gradC, normal;

	// FIXME: The computation of the gradient has to be reworked.
	// In the case of P1 shape functions, it is valid. For Q1 shape functions, however,
	// the gradient is not constant (but bilinear) on the element - and along the sides.
	// We cannot use the FVGeom here. Instead, we need to calculate the gradient in each IP!

	// calculate grad u as average (over scvf)
	VecSet(gradC, 0.0);
	for(size_t ii = 0; ii < geo.num_ip(); ++ii)
	{
		for (size_t j=0; j<m_shapeValues.num_sh(); j++)
				VecScaleAppend(gradC, u(_C_,j), geo.global_grad(ii, j));
	}
	VecScale(gradC, gradC, (1.0/geo.num_ip()));

// calculate flux through the sides
	size_t passedIPs = 0;
	for (size_t side=0; side < (size_t) ref_elem_type::numSides; side++)
	{
		// normal on side
		SideNormal<ref_elem_type,dim>(normal, side, vCornerCoords);
		VecNormalize(normal, normal);

		try
		{
			for (size_t sip = 0; sip < err_est_data->num_side_ips(side_list[side]); sip++)
			{
				size_t ip = passedIPs + sip;

				VecSet(fluxDensity, 0.0);

			// diffusion //
				if (m_imDiffusion.data_given())
					MatVecScaleMultAppend(fluxDensity, -1.0, m_imDiffusion[ip], gradC);

			// convection //
				if (m_imVelocity.data_given())
				{
					number val = 0.0;
					for (size_t sh = 0; sh < m_shapeValues.num_sh(); sh++)
						val += u(_C_,sh) * m_shapeValues.shapeAtSideIP(sh,sip);

					VecScaleAppend(fluxDensity, val, m_imVelocity[ip]);
				}

			// general flux //
				if (m_imFlux.data_given())
					VecAppend(fluxDensity, m_imFlux[ip]);

				(*err_est_data)(side_list[side],sip) += scale * VecDot(fluxDensity, normal);
			}

			passedIPs += err_est_data->num_side_ips(side_list[side]);
		}
		UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
				<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
	}

// VOLUME TERMS //

	typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list;
	pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem);
	if (elem_list.size() != 1)
		UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");

	try
	{
		for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++)
		{
			number total = 0.0;

		// diffusion //	TODO ONLY FOR (PIECEWISE) CONSTANT DIFFUSION TENSOR SO FAR!
		// div(D*grad(c))
		// nothing to do, as c is piecewise linear and div(D*grad(c)) disappears
		// if D is diagonal and c bilinear, this should also vanish (confirm this!)

		// convection // TODO ONLY FOR (PIECEWISE) CONSTANT OR DIVERGENCE-FREE
					  //      VELOCITY FIELDS SO FAR!
		// div(v*c) = div(v)*c + v*grad(c) -- gradC has been calculated above
			if (m_imVelocity.data_given())
				total += VecDot(m_imVelocity[ip], gradC);

		// general flux // TODO ONLY FOR DIVERGENCE-FREE FLUX FIELD SO FAR!
		// nothing to do

		// reaction //
			if (m_imReactionRate.data_given())
			{
				number val = 0.0;
				for (size_t sh = 0; sh < geo.num_sh(); sh++)
					val += u(_C_,sh) * m_shapeValues.shapeAtElemIP(sh,ip);

				total += m_imReactionRate[ip] * val;
			}

			if (m_imReaction.data_given())
			{
				total += m_imReaction[ip];
			}

			(*err_est_data)(elem_list[0],ip) += scale * total;
		}
	}
	UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
			<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
Esempio n. 15
0
bool SaveGridHierarchy(MultiGrid& mg, const char* filename)
{
    PROFILE_FUNC_GROUP("grid");
    return SaveGridToFile(mg, mg.get_hierarchy_handler(), filename);
}
Esempio n. 16
0
bool AddEntriesToSurfaceIndexLayout(IndexLayout& indexLayoutOut,
                                    DoFDistribution& dofDistr,
                                    TLayout& elemLayout,
                                    MultiGrid& mg,
                                    DistributedGridManager& dGrMgr)
{
//	iterator for grid element interfaces
	typedef typename TLayout::iterator InterfaceIterator;

//	type of grid element interfaces
	typedef typename TLayout::Interface ElemInterface;

//	iterator for grid elements
	typedef typename ElemInterface::iterator ElemIterator;

//	type of index interfaces
	typedef IndexLayout::Interface IndexInterface;

//	iterate over all grid element interfaces
	for(InterfaceIterator iIter = elemLayout.begin();
		iIter != elemLayout.end(); ++iIter)
	{
	//	get a grid element interface
		ElemInterface& elemInterface = elemLayout.interface(iIter);

	//	get a corresponding index interface
		IndexInterface& indexInterface = indexLayoutOut.interface(
											elemLayout.proc_id(iIter));

	//	iterate over entries in the grid element interface
		for(ElemIterator eIter = elemInterface.begin();
			eIter != elemInterface.end(); ++eIter)
		{
		//	get the grid element
			typename ElemInterface::Element elem = elemInterface.get_element(eIter);

		//	check if element is on surface (i.e. has no children). Shadows are
		//	not taken into account here, since their indices are already added
		//	to the interface by the shadowing objects
			if(mg.has_children(elem)) {continue;}

		//	check if element is a ghost element, i.e. it is a surface element
		//	but only due to a hierarchical cut of the grid in order to
		//	refine it further on another process. These cuts lead to so called
		//	vertical interfaces.
			if(dGrMgr.is_ghost(elem)) {continue;}

		//	get the algebraic indices on the grid element
			std::vector<size_t> indices;
			dofDistr.inner_algebra_indices(elem, indices);

		//	add the indices to the interface
			for(size_t i = 0; i < indices.size(); ++i)
			{
				indexInterface.push_back(indices[i]);
			}
		}
	}

//	touching an interface means creation. Thus we remove the empty interfaces
//	to avoid storage, communication (should not happen any longer) etc...
	pcl::RemoveEmptyInterfaces(elemLayout);

//	we're done
	return true;
}