void FluidSimulator::project(float *u, float *v, float *p, float *div){ float h = 1.0/N; for(int i =1; i <N+1; i++){ for(int j = 1; j<M+1; j++){ div[AT(i,j)] = -.5*h*(u[AT(i+1,j)] - u[AT(i-1,j)]+ v[AT(i,j+1)] - v[AT(i,j-1)]); p[AT(i,j)] = 0; } } set_boundary(0, div); set_boundary(0, p); for(int k = 0; k < 20; k++){ for(int i = 1; i <N+1; i++){ for(int j = 1; j<M+1; j++){ p[AT(i,j)] = (div[AT(i,j)] + p[AT(i-1,j)] + p[AT(i+1,j)]+ p[AT(i,j-1)] + p[AT(i,j+1)])/4.0; } } set_boundary(0, p); } for(int i=1; i<N+1; i++){ for(int j=1; j<M+1;j++){ u[AT(i,j)] -= 0.5*(p[AT(i+1,j)] - p[AT(i-1,j)])/h; v[AT(i,j)] -= 0.5*(p[AT(i,j+1)] - p[AT(i,j-1)])/h; } } set_boundary(1, u); set_boundary(2, v); }
// amortized log(container size) void insert_line(T a, T b) { auto it = this->insert({ a, b, false }); while (set_boundary(it, next(it))) this->erase(next(it)); if (covered(it)) set_boundary(prev(it), it = this->erase(it)); while (it != this->begin() && covered(prev(it))) { this->erase(prev(it)); set_boundary(prev(it), it); } }
void FluidSimulator::advection(int boundary, float *d, float *d0, float *u, float *v){ int i0=0, j0=0, i1=0, j1=1; float dt = FluidSimulator::dt; float x, y, dt0; float s0=0.0, s1=0.0, t0=0.0, t1=0.0; for(int i = 1; i <N+1; i++){ for(int j = 1; j<M+1; j++){ x = i-dt0*u[AT(i,j)]; y = j-dt0*v[AT(i,j)]; //clamp if(x<0.5) x = 0.5; if(x>N+.5) x=N+.5; if(y<.5) y=.5; if(y>N+.5) y = N+.5; i0 = (int)x; j0 = (int)y; i1 = i0+1; j1 = j0+1; //find interpolation values s1 = x-i0; s0 = 1-s1; t1 = y-j0; t0 = 1-t1; d[AT(i,j)] = s0*(t0*d0[AT(i0,j0)] + t1*d0[AT(i0,j1)])+ s1 *(t0*d0[AT(i1,j0)] + t1*d0[AT(i1,j1)]); } } set_boundary(boundary, d); }
void solve_with_Cpp(MultiFab& soln, MultiFab& gphi, Real a, Real b, MultiFab& alpha, PArray<MultiFab>& beta, MultiFab& rhs, const BoxArray& bs, const Geometry& geom) { BL_PROFILE("solve_with_Cpp()"); BndryData bd(bs, 1, geom); set_boundary(bd, rhs, 0); ABecLaplacian abec_operator(bd, dx); abec_operator.setScalars(a, b); abec_operator.setCoefficients(alpha, beta); MultiGrid mg(abec_operator); mg.setVerbose(verbose); mg.solve(soln, rhs, tolerance_rel, tolerance_abs); PArray<MultiFab> grad_phi(BL_SPACEDIM, PArrayManage); for (int n = 0; n < BL_SPACEDIM; ++n) grad_phi.set(n, new MultiFab(BoxArray(soln.boxArray()).surroundingNodes(n), 1, 0)); #if (BL_SPACEDIM == 2) abec_operator.compFlux(grad_phi[0],grad_phi[1],soln); #elif (BL_SPACEDIM == 3) abec_operator.compFlux(grad_phi[0],grad_phi[1],grad_phi[2],soln); #endif // Average edge-centered gradients to cell centers. BoxLib::average_face_to_cellcenter(gphi, grad_phi, geom); }
void solve(MultiFab& soln, const MultiFab& anaSoln, Real a, Real b, MultiFab& alpha, MultiFab beta[], MultiFab& rhs, const BoxArray& bs, const Geometry& geom, solver_t solver) { BL_PROFILE("solve"); soln.setVal(0.0); const Real run_strt = ParallelDescriptor::second(); BndryData bd(bs, 1, geom); set_boundary(bd, rhs); ABecLaplacian abec_operator(bd, dx); abec_operator.setScalars(a, b); abec_operator.setCoefficients(alpha, beta); MultiGrid mg(abec_operator); mg.setMaxIter(maxiter); mg.setVerbose(verbose); mg.setFixedIter(1); mg.solve(soln, rhs, tolerance_rel, tolerance_abs); Real run_time = ParallelDescriptor::second() - run_strt; ParallelDescriptor::ReduceRealMax(run_time, ParallelDescriptor::IOProcessorNumber()); if (ParallelDescriptor::IOProcessor()) { std::cout << "Run time : " << run_time << std::endl; } }
/* main integration function. Updates phi(x, t) to phi(x, t+dt) */ static void update_phi(float *x, float *phi, size_t size, float t, float mu, float eps2, float u) { /* Our matrices here are of size `size - 2`, because the first and * last values of phi are taken care of by the boundary conditions */ /* size_t mat_size = size - 2; */ float mu_eps2 = mu * eps2; size_t mat_size = size - 2; float** A = create_contiguous_array_2D(mat_size); init_A(A, mat_size, mu, eps2); float *b = create_init_array(mat_size, 0); /* at the boundary */ float phi_0, phi_n; set_boundary(&phi_0, &phi_n); b[0] = mu_eps2 * phi_0; b[mat_size - 1] = mu_eps2 * phi_n; for (int i = 0; i < mat_size; ++i) { b[i] = phi[i+1] + h(phi[i+1], u); } solve(A, &phi[1], b, mat_size); phi[0] = phi_0; phi[size-1] = phi_n; }
/*************************************************************** * Ensures the velocity field is non divergent i.e. incompressible: * Solves a poisson equation to compute a gradient field and then * subtracts this from the current velocity field to obtain an * incompressible field. When solving for pressure it sets the * cells pressure field. ***************************************************************/ void FGS_Fluid_Solver2DS :: project (FLUID_DATA ux, FLUID_DATA uy, FLUID_DATA pressure, FLUID_DATA divergence){ double h = 1.0 / N * -0.5; double half_width = width * 0.5; double half_height = height * 0.5; int cell; double north, south, east, west; for (int i=1 ; i <= width; i++ ) for (int j=1 ; j <= height; j++ ){ cell = GET_INDEX(i,j); north = grid[GET_INDEX(i,j-1)].data[uy]; south = grid[GET_INDEX(i,j+1)].data[uy]; east = grid[GET_INDEX(i+1,j)].data[ux]; west = grid[GET_INDEX(i-1,j)].data[ux]; grid[cell].data[divergence] = h * (east-west+south-north); grid[cell].data[pressure] = 0; } set_boundary (SET_FOR_NON_VELOCITY_COMPONENT, divergence); set_boundary (SET_FOR_NON_VELOCITY_COMPONENT, pressure); computing_pressure = true; linear_solve (SET_FOR_NON_VELOCITY_COMPONENT, pressure, divergence, 1, 0.25); computing_pressure = false; for (int i = 1; i <= width; i++) for (int j = 1; j <= height; j++){ cell = GET_INDEX(i,j); north = grid[GET_INDEX(i,j-1)].data[pressure]; south = grid[GET_INDEX(i,j+1)].data[pressure]; east = grid[GET_INDEX(i+1,j)].data[pressure]; west = grid[GET_INDEX(i-1,j)].data[pressure]; double u_in_cell = grid[cell].data[ux], v_in_cell = grid[cell].data[uy]; grid[cell].data[ux] = u_in_cell - (half_width * (east - west)); grid[cell].data[uy] = v_in_cell - (half_height * (south - north)); } set_boundary(SET_FOR_HORIZONTAL_COMPONENT, ux); set_boundary(SET_FOR_VERTICAL_COMPONENT, uy); }
void solve_with_hypre(MultiFab& soln, Real a, Real b, MultiFab& alpha, PArray<MultiFab>& beta, MultiFab& rhs, const BoxArray& bs, const Geometry& geom) { BL_PROFILE("solve_with_hypre()"); BndryData bd(bs, 1, geom); set_boundary(bd, rhs, 0); HypreABecLap hypreSolver(bs, geom); hypreSolver.setScalars(a, b); hypreSolver.setACoeffs(alpha); hypreSolver.setBCoeffs(beta); hypreSolver.setVerbose(verbose); hypreSolver.solve(soln, rhs, tolerance_rel, tolerance_abs, maxiter, bd); }
void StamFluidSystem::project( Array3fRef &u,Array3fRef &v,Array3fRef &w,Array3fRef &div,Array3fRef &p ) { int i, j, k, m; for (i = 1; i <=gridDim.x; i++) { for (j = 1; j <=gridDim.y; j++){ for(k=1;k<=gridDim.z;k++){ (*div)(i,j,k) = -0.5f* (((*u)(i + 1, j,k) - (*u)(i - 1, j,k))/gridDim.x +((*v)(i, j + 1,k) - (*v)(i, j - 1,k))/gridDim.y +((*w)(i, j, k+1) - (*w)(i, j, k-1))/gridDim.z); (*p)(i,j,k) = 0; } } } set_boundary(0, div); set_boundary(0, p); linear_solve(0,p,div,1.0f,6.0f); for (i = 1; i <= gridDim.x; i++) { for (j = 1; j <= gridDim.y; j++) { for(k=1;k<=gridDim.z;k++){ (*u)(i, j,k) -= 0.5 * ((*p)(i + 1,j,k )-(*p)(i - 1,j,k))*gridDim.x; (*v)(i, j,k) -= 0.5 * ((*p)(i, j + 1,k)-(*p)(i, j - 1,k))*gridDim.y; (*w)(i, j,k) -= 0.5 * ((*p)(i, j, k+1)-(*p)(i, j, k-1))*gridDim.z; } } } set_boundary(1, u); set_boundary(2, v); set_boundary(3, w); }
void FluidSimulator::diffusion(int boundary, float* x, float *x0, float diffusion_rate){ float dt = FluidSimulator::dt; float a= dt *diffusion_rate*N*M; //use Gauss-Seidel relaxation to ensure this won't explode for(int k = 0; k<20; k++){ for(int i = 1; i <N+1; i++){ for(int j = 1; j<M+1; j++){ x[AT(i,j)] = (x0[AT(i,j)] + a*(x[AT(i-1,j)] + x[AT(i+1,j)]+ x[AT(i,j-1)] + x[AT(i,j+1)]))/(1+4*a); } } set_boundary(boundary, x); } }
/*************************************************************** * Computes a Gauss seidel relaxation to solve AX=B for the * unknowns X using forward substitution (the higher the number * of iterations the closer the resultant vector X will be * to convergence). ***************************************************************/ void FGS_Fluid_Solver2DS :: linear_solve (BOUNDARY_CONDITION b, FLUID_DATA to, FLUID_DATA from, double a, double coef){ for (int k = 0 ; k < iterations; k++){ for (int i = 1 ; i <= width; i++){ for (int j = 1 ; j <= height; j++){ int cell_ij_index = GET_INDEX(i, j); double w = grid[GET_INDEX(i-1, j)].data[to], e = grid[GET_INDEX(i+1, j)].data[to], n = grid[GET_INDEX(i, j-1)].data[to], s = grid[GET_INDEX(i, j+1)].data[to], c0 = grid[cell_ij_index].data[from]; grid[cell_ij_index].data[to] = coef * (c0 + a * (w+e+s+n)); if (computing_pressure) grid[cell_ij_index].data[PRESSURE] = grid[cell_ij_index].data[to]; } } set_boundary(b, to); } }
/*************************************************************** * Computes for each cell a backwards particle trace: Linearly * interpolates a virtual particle from the center of each cell * backwards in time (dt) using the velocity in the cell (Uxy) * and copies the value (density or velocity) from this * 'previous' position into the cell. Also computes the average * density if called from the density step. ***************************************************************/ void FGS_Fluid_Solver2DS :: advect (BOUNDARY_CONDITION b, FLUID_DATA to, FLUID_DATA from, FLUID_DATA ux, FLUID_DATA uy){ int i0, j0, i1, j1; double x, y, s0, t0, s1, t1, deltaT0, nw, ne, se, sw; deltaT0 = deltaT*N; for (int i = 1; i <= width; i++) { for (int j = 1; j <= height; j++) { int cell_ij_index = GET_INDEX(i,j); x = i-deltaT0 * grid[cell_ij_index].data[ux]; y = j-deltaT0 * grid[cell_ij_index].data[uy]; if (x < 0.5 ) x = 0.5; else if (x > width + 0.5 ) x = width+0.5; if (y < 0.5 ) y = 0.5; else if (y > height + 0.5) y = height+0.5; i0 = ((int) x); j0 = ((int) y); i1 = i0 + 1; j1 = j0 + 1; s1 = x-i0; s0 = 1-s1; t1 = y-j0; t0 = 1-t1; nw = grid[GET_INDEX(i0, j0)].data[from]; ne = grid[GET_INDEX(i1, j0)].data[from]; se = grid[GET_INDEX(i1, j1)].data[from]; sw = grid[GET_INDEX(i0, j1)].data[from]; grid[cell_ij_index].data[to] = s0 * (t0 * nw + t1 * sw) + s1 * (t0 * ne + t1 * se); if(computing_density) d_av += grid[cell_ij_index].data[to]; } } if(computing_density) d_av *= inv_dims; set_boundary (b, to); }
void StamFluidSystem::linear_solve(int bnd, Array3fRef &x, const Array3fRef &x0, float a, float div) { int i,j,k,m; for (m = 0; m < iterations; m++) { for (i = 1; i <= gridDim.x; i++) { for (j = 1; j <= gridDim.y; j++) { for(k=1; k<=gridDim.z; k++){ (*x)(i,j,k) = ((*x0)(i,j,k) + a * ( (*x)(i-1, j, k) + (*x)(i+1, j, k) + (*x)(i, j-1, k) + (*x)(i, j+1, k) + (*x)(i, j, k-1) + (*x)(i, j, k+1) )) /div; } } } set_boundary(bnd, x); } }
double advance_system(int Nx, int Ny, Vars * U, double dx, double dy, int my_id, int zones_to_do, int num_procs, MPI_Datatype mpi_Vars){ int N = Nx; Vars * U_n, * Un, * U_temp; int i, j, k, ierr, inst[2], bound; U_n = malloc(N*N*sizeof(Vars)); Un = malloc(N*N*sizeof(Vars)); U_temp = malloc(zones_to_do*(N-4)*sizeof(Vars)); bound = 0; MPI_Status status; Flux FL, FR, GL, GR; double maxalphax, maxalphay, dt; if(my_id == 0){ int start, end; start = 2; for(k=num_procs-1;k>0;--k){ /*loop over all processors*/ end = start + zones_to_do - 1; inst[0] = start; inst[1] = end; /*Send Start, End indices*/ ierr = MPI_Send( &inst[0], 2, MPI_INT, k, send_data_tag, MPI_COMM_WORLD); start = end + 1; } end = start + zones_to_do -1; /*Calculate dt*/ /*start calculating maxalpha in my zones*/ double my_maxalpha = 0.; double maxalpha; for(i=2;i<Nx-2;++i){ for(j=2;j<Nx-2;++j){ maxalphax = Maxalpha(U[(i-2)*Ny+j],U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j], 1); maxalphay = Maxalpha(U[(i-2)*Ny+j],U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j], -1); my_maxalpha = MAX(my_maxalpha, maxalphax, maxalphay); } } /* /\*recieve maxalphas from other processors and compare to mine*\/ */ /* for(k=1;k<num_procs;++k){ */ /* /\*recieve start, end for what the process did*\/ */ /* ierr = MPI_Recv( inst, 2, MPI_INT, k, send_data_tag, MPI_COMM_WORLD, &status); */ /* /\*recieve alpha*\/ */ /* ierr = MPI_Recv( &maxalpha, 1, MPI_DOUBLE, k, send_data_tag, MPI_COMM_WORLD, &status); */ /* /\*Add values to U_n*\/ */ /* //printf("dt = %f\n", maxalpha); */ /* if(maxalpha > my_maxalpha) my_maxalpha = maxalpha; */ /* } */ dt = .5*dx/my_maxalpha; //printf("dt = %f\n", my_maxalpha); /*Broadcast dt*/ ierr = MPI_Bcast(&dt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); Set_B2A(Nx, Ny, U, U_n); ///////////////////////////////////////////////// /*Calculate U1*/ for(i=2;i<Nx-2;++i){ for(j=start;j<end+1;++j){ FL = Fhll(U[(i-2)*Ny+j],U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j]); FR = Fhll(U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j],U[(i+2)*Ny+j]); GL = Ghll(U[i*Ny+j-2],U[i*Ny+j-1],U[i*Ny+j],U[i*Ny+j+1]); GR = Ghll(U[i*Ny+j-1],U[i*Ny+j],U[i*Ny+j+1],U[i*Ny+j+2]); U_n[i*Ny+j] = make_U1(dt, U[i*Ny+j], L(FL, FR, GL, GR, dy, dx)); } } /*recieve U_temps from other procs to complete U1*/ for(k=1;k<num_procs;++k){ /*recieve start, end for what the process did*/ ierr = MPI_Recv( inst, 2, MPI_INT, k, send_data_tag, MPI_COMM_WORLD, &status); /*recieve U_temp*/ ierr = MPI_Recv( U_temp, zones_to_do*(N-4), mpi_Vars, k, send_data_tag, MPI_COMM_WORLD, &status); /*Add values to U_n*/ for(i=2;i<Nx-2;++i){ for(j=inst[0];j<inst[1]+1;++j){ U_n[i*Ny+j] = U_temp[(i-2)*zones_to_do+j-inst[0]]; } } } //set_boundary(U_n, Nx, Ny, bound); Set_B2A(Nx, Ny, U_n, Un); /*Broadcast U_n*/ ierr = MPI_Bcast(U_n, N*N, mpi_Vars, 0, MPI_COMM_WORLD); //something wrong between here...// ///////////////////////////////////////////////// /*Start Calculating U2*/ for(i=2;i<Nx-2;++i){ for(j=start;j<end+1;++j){ FL = Fhll(U_n[(i-2)*Ny+j],U_n[(i-1)*Ny+j],U_n[i*Ny+j],U_n[(i+1)*Ny+j]); FR = Fhll(U_n[(i-1)*Ny+j],U_n[i*Ny+j],U_n[(i+1)*Ny+j],U_n[(i+2)*Ny+j]); GL = Ghll(U_n[i*Ny+j-2],U_n[i*Ny+j-1],U_n[i*Ny+j],U_n[i*Ny+j+1]); GR = Ghll(U_n[i*Ny+j-1],U_n[i*Ny+j],U_n[i*Ny+j+1],U_n[i*Ny+j+2]); Un[i*Ny+j] = make_U2(dt, U[i*Ny+j], U_n[i*Ny+j], L(FL, FR, GL, GR, dy, dx)); } } /*recieve U_temps from other procs to complete U2*/ for(k=1;k<num_procs;++k){ /*recieve start, end for what the process did*/ ierr = MPI_Recv( inst, 2, MPI_INT, k, send_data_tag, MPI_COMM_WORLD, &status); /*recieve U_temp*/ ierr = MPI_Recv( U_temp, zones_to_do*(N-4), mpi_Vars, k, send_data_tag, MPI_COMM_WORLD, &status); /*Add values to U_n*/ for(i=2;i<Nx-2;++i){ for(j=inst[0];j<inst[1]+1;++j){ Un[i*Ny+j] = U_temp[(i-2)*zones_to_do + j - inst[0]]; } } } //set_boundary(Un, Nx, Ny, bound); Set_B2A(Nx, Ny, Un, U_n); /*Broadcast Un*/ ierr = MPI_Bcast(Un, N*N, mpi_Vars, 0, MPI_COMM_WORLD); ///////////////////////////////////////////////// /*Start calculating U at next timestep into U_n*/ for(i=2;i<Nx-2;++i){ for(j=start;j<end+1;++j){ FL = Fhll(Un[(i-2)*Ny+j],Un[(i-1)*Ny+j],Un[i*Ny+j],Un[(i+1)*Ny+j]); FR = Fhll(Un[(i-1)*Ny+j],Un[i*Ny+j],Un[(i+1)*Ny+j],Un[(i+2)*Ny+j]); GL = Ghll(Un[i*Ny+j-2],Un[i*Ny+j-1],Un[i*Ny+j],Un[i*Ny+j+1]); GR = Ghll(Un[i*Ny+j-1],Un[i*Ny+j],Un[i*Ny+j+1],Un[i*Ny+j+2]); U_n[i*Ny+j] = make_UN(dt, U[i*Ny+j], Un[i*Ny+j], L(FL, FR, GL, GR, dy, dx)); } } /*recieve U_temps from other procs to complete U1*/ for(k=1;k<num_procs;++k){ /*recieve start, end for what the process did*/ ierr = MPI_Recv( inst, 2, MPI_INT, k, send_data_tag, MPI_COMM_WORLD, &status); /*recieve U_temp*/ ierr = MPI_Recv( U_temp, zones_to_do*(N-4), mpi_Vars, k, send_data_tag, MPI_COMM_WORLD, &status); /*Add values to U_n*/ for(i=2;i<Nx-2;++i){ for(j=inst[0];j<inst[1]+1;++j){ U_n[i*Ny+j] = U_temp[(i-2)*zones_to_do+j-inst[0]]; } } } ///and here/// Set_B2A(Nx, Ny, U_n, U); set_boundary(U, Nx, Ny, bound); } else{ /*I am a Slave*/ /*Recieve my start and end indices*/ ierr = MPI_Recv( inst, 2, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD, &status); /* /\*Calculate maxalpha in my zones*\/ */ /* double maxalpha = 0.; */ /* for(i=2;i<Nx-2;++i){ */ /* for(j=inst[0];j<inst[1]+1;++j){ */ /* maxalphax = Maxalpha(U[(i-2)*Ny+j],U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j], 1); */ /* maxalphay = Maxalpha(U[(i-2)*Ny+j],U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j], -1); */ /* maxalpha = MAX(maxalpha, maxalphax, maxalphay); */ /* //printf("slave alpha = %f %d %d\n", maxalpha, i, j); */ /* } */ /* } */ /* /\*send my result*\/ */ /* ierr = MPI_Send( &inst[0], 2, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD); */ /* ierr = MPI_Send( &maxalpha, 1, MPI_DOUBLE, 0, send_data_tag, MPI_COMM_WORLD); */ /* /\*recieve broadcast of dt from root*\/ */ ierr = MPI_Bcast(&dt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); ///////////////////////////////////////////////// /*Calculate my part of U1, put it in U_temp*/ for(i=2;i<Nx-2;++i){ for(j=inst[0];j<inst[1]+1;++j){ FL = Fhll(U[(i-2)*Ny+j],U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j]); FR = Fhll(U[(i-1)*Ny+j],U[i*Ny+j],U[(i+1)*Ny+j],U[(i+2)*Ny+j]); GL = Ghll(U[i*Ny+j-2],U[i*Ny+j-1],U[i*Ny+j],U[i*Ny+j+1]); GR = Ghll(U[i*Ny+j-1],U[i*Ny+j],U[i*Ny+j+1],U[i*Ny+j+2]); U_temp[(i-2)*zones_to_do+j-inst[0]] = make_U1(dt, U[i*Ny+j], L(FL, FR, GL, GR, dy, dx)); } } /*Send my part of U1 back to Root*/ ierr = MPI_Send( &inst[0], 2, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD); ierr = MPI_Send( &U_temp[0],(N-4)*zones_to_do , mpi_Vars, 0, send_data_tag, MPI_COMM_WORLD); /*Recieve Broadcast of U_n from Root*/ ierr = MPI_Bcast(U_n, N*N, mpi_Vars, 0, MPI_COMM_WORLD); ///////////////////////////////////////////////// /*Calculate my part of U2, put it in U_temp*/ for(i=2;i<Nx-2;++i){ for(j=inst[0];j<inst[1]+1;++j){ FL = Fhll(U_n[(i-2)*Ny+j],U_n[(i-1)*Ny+j],U_n[i*Ny+j],U_n[(i+1)*Ny+j]); FR = Fhll(U_n[(i-1)*Ny+j],U_n[i*Ny+j],U_n[(i+1)*Ny+j],U_n[(i+2)*Ny+j]); GL = Ghll(U_n[i*Ny+j-2],U_n[i*Ny+j-1],U_n[i*Ny+j],U_n[i*Ny+j+1]); GR = Ghll(U_n[i*Ny+j-1],U_n[i*Ny+j],U_n[i*Ny+j+1],U_n[i*Ny+j+2]); U_temp[(i-2)*zones_to_do+j-inst[0]] = make_U2(dt, U[i*Ny+j], U_n[i*Ny+j], L(FL, FR, GL, GR, dy, dx)); } } /*Send my part of U2 back to Root*/ ierr = MPI_Send( &inst[0], 2, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD); ierr = MPI_Send( &U_temp[0], zones_to_do*(N-4), mpi_Vars, 0, send_data_tag, MPI_COMM_WORLD); /*Recieve Broadcast of Un from Root*/ ierr = MPI_Bcast(Un, N*N, mpi_Vars, 0, MPI_COMM_WORLD); ///////////////////////////////////////////////// /*Calculate my part of U at nex timestep, put it in U_temp*/ for(i=2;i<Nx-2;++i){ for(j=inst[0];j<inst[1]+1;++j){ FL = Fhll(Un[(i-2)*Ny+j],Un[(i-1)*Ny+j],Un[i*Ny+j],Un[(i+1)*Ny+j]); FR = Fhll(Un[(i-1)*Ny+j],Un[i*Ny+j],Un[(i+1)*Ny+j],Un[(i+2)*Ny+j]); GL = Ghll(Un[i*Ny+j-2],Un[i*Ny+j-1],Un[i*Ny+j],Un[i*Ny+j+1]); GR = Ghll(Un[i*Ny+j-1],Un[i*Ny+j],Un[i*Ny+j+1],Un[i*Ny+j+2]); U_temp[(i-2)*zones_to_do+j-inst[0]] = make_UN(dt, U[i*Ny+j], Un[i*Ny+j], L(FL, FR, GL, GR, dy, dx)); } } /*Send my part of UN back to Root*/ ierr = MPI_Send( &inst[0], 2, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD); ierr = MPI_Send( &U_temp[0], zones_to_do*(N-4), mpi_Vars, 0, send_data_tag, MPI_COMM_WORLD); } free(U_n); free(Un); free(U_temp); return(dt); }
void StamFluidSystem::transport( int bnd, Array3fRef &d,Array3fRef &d0, Array3fRef &u,Array3fRef &v,Array3fRef &w, float dt ) { float i0,j0,k0,i1,j1,k1; float dtx = dt * gridDim.x; float dty = dt * gridDim.y; float dtz = dt * gridDim.z; float s0, s1, t0, t1, u0, u1; float tmp1, tmp2, tmp3, x, y, z; float Nfloat = gridDim.x; float ifloat, jfloat, kfloat; int i, j, k; for(k = 1, kfloat = 1; k <=gridDim.z; k++, kfloat++) { for(j = 1, jfloat = 1; j <=gridDim.y; j++, jfloat++) { for(i = 1, ifloat = 1; i <=gridDim.x; i++, ifloat++) { tmp1 = dtx * (*u)(i, j, k); tmp2 = dty * (*v)(i, j, k); tmp3 = dtz * (*w)(i, j, k); x = ifloat - tmp1; y = jfloat - tmp2; z = kfloat - tmp3; if(x < 0.5f) x = 0.5f; if(x > Nfloat + 0.5f) x = Nfloat + 0.5f; i0 = floorf(x); i1 = i0 + 1.0f; if(y < 0.5f) y = 0.5f; if(y > Nfloat + 0.5f) y = Nfloat + 0.5f; j0 = floorf(y); j1 = j0 + 1.0f; if(z < 0.5f) z = 0.5f; if(z > Nfloat + 0.5f) z = Nfloat + 0.5f; k0 = floorf(z); k1 = k0 + 1.0f; s1 = x - i0; s0 = 1.0f - s1; t1 = y - j0; t0 = 1.0f - t1; u1 = z - k0; u0 = 1.0f - u1; int i0i = i0; int i1i = i1; int j0i = j0; int j1i = j1; int k0i = k0; int k1i = k1; (*d)(i, j, k) = (*d)(i,j,k)=d0->trilerp(i0i,j0i,k0i,s0,t0,u0); /*s0 * ( t0 * (u0 * (*d0)(i0i, j0i, k0i) +u1 * (*d0)(i0i, j0i, k1i)) +( t1 * (u0 * (*d0)(i0i, j1i, k0i) +u1 * (*d0)(i0i, j1i, k1i)))) +s1 * ( t0 * (u0 * (*d0)(i1i, j0i, k0i) +u1 * (*d0)(i1i, j0i, k1i)) +( t1 * (u0 * (*d0)(i1i, j1i, k0i) +u1 * (*d0)(i1i, j1i, k1i)))); */ } } } set_boundary(bnd, d); /*int i,j,k; Vec3i index; Vec3f p0,p1=Vec3f::zero(),ifloat; for(i=1;i<=gridDim.x;i++){ for(j=1;j<=gridDim.y;j++){ for(k=1;k<=gridDim.z;k++){ p0=Vec3f(i,j,k); p0+=0.5f; p0*=cellDim; traceParticle(p0,-dt,p1); this->interpolate_index(p1,index,ifloat); (*d)(i,j,k)=d0->trilerp(index.x,index.y,index.z,ifloat.x,ifloat.y,ifloat.z); } } } set_boundary(bnd,d); */ }
void solve_with_HPGMG(MultiFab& soln, MultiFab& gphi, Real a, Real b, MultiFab& alpha, PArray<MultiFab>& beta, MultiFab& beta_cc, MultiFab& rhs, const BoxArray& bs, const Geometry& geom, int n_cell) { BndryData bd(bs, 1, geom); set_boundary(bd, rhs, 0); ABecLaplacian abec_operator(bd, dx); abec_operator.setScalars(a, b); abec_operator.setCoefficients(alpha, beta); int minCoarseDim; if (domain_boundary_condition == BC_PERIODIC) { minCoarseDim = 2; // avoid problems with black box calculation of D^{-1} for poisson with periodic BC's on a 1^3 grid } else { minCoarseDim = 1; // assumes you can drop order on the boundaries } level_type level_h; mg_type MG_h; int numVectors = 12; int my_rank = 0, num_ranks = 1; #ifdef BL_USE_MPI MPI_Comm_size (MPI_COMM_WORLD, &num_ranks); MPI_Comm_rank (MPI_COMM_WORLD, &my_rank); #endif /* BL_USE_MPI */ const double h0 = dx[0]; // Create the geometric structure of the HPGMG grid using the RHS MultiFab as // a template. This doesn't copy any actual data. CreateHPGMGLevel(&level_h, rhs, n_cell, max_grid_size, my_rank, num_ranks, domain_boundary_condition, numVectors, h0); // Set up the coefficients for the linear operator L. SetupHPGMGCoefficients(a, b, alpha, beta_cc, &level_h); // Now that the HPGMG grid is built, populate it with RHS data. ConvertToHPGMGLevel(rhs, n_cell, max_grid_size, &level_h, VECTOR_F); #ifdef USE_HELMHOLTZ if (ParallelDescriptor::IOProcessor()) { std::cout << "Creating Helmholtz (a=" << a << ", b=" << b << ") test problem" << std::endl;; } #else if (ParallelDescriptor::IOProcessor()) { std::cout << "Creating Poisson (a=" << a << ", b=" << b << ") test problem" << std::endl;; } #endif /* USE_HELMHOLTZ */ if (level_h.boundary_condition.type == BC_PERIODIC) { double average_value_of_f = mean (&level_h, VECTOR_F); if (average_value_of_f != 0.0) { if (ParallelDescriptor::IOProcessor()) { std::cerr << "WARNING: Periodic boundary conditions, but f does not sum to zero... mean(f)=" << average_value_of_f << std::endl; } //shift_vector(&level_h,VECTOR_F,VECTOR_F,-average_value_of_f); } } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - rebuild_operator(&level_h,NULL,a,b); // i.e. calculate Dinv and lambda_max MGBuild(&MG_h,&level_h,a,b,minCoarseDim,ParallelDescriptor::Communicator()); // build the Multigrid Hierarchy //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (ParallelDescriptor::IOProcessor()) std::cout << std::endl << std::endl << "===== STARTING SOLVE =====" << std::endl << std::flush; MGResetTimers (&MG_h); zero_vector (MG_h.levels[0], VECTOR_U); #ifdef USE_FCYCLES FMGSolve (&MG_h, 0, VECTOR_U, VECTOR_F, a, b, tolerance_abs, tolerance_rel); #else MGSolve (&MG_h, 0, VECTOR_U, VECTOR_F, a, b, tolerance_abs, tolerance_rel); #endif /* USE_FCYCLES */ MGPrintTiming (&MG_h, 0); // don't include the error check in the timing results //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (ParallelDescriptor::IOProcessor()) std::cout << std::endl << std::endl << "===== Performing Richardson error analysis ==========================" << std::endl; // solve A^h u^h = f^h // solve A^2h u^2h = f^2h // solve A^4h u^4h = f^4h // error analysis... MGResetTimers(&MG_h); const double dtol = tolerance_abs; const double rtol = tolerance_rel; int l;for(l=0;l<3;l++){ if(l>0)restriction(MG_h.levels[l],VECTOR_F,MG_h.levels[l-1],VECTOR_F,RESTRICT_CELL); zero_vector(MG_h.levels[l],VECTOR_U); #ifdef USE_FCYCLES FMGSolve(&MG_h,l,VECTOR_U,VECTOR_F,a,b,dtol,rtol); #else MGSolve(&MG_h,l,VECTOR_U,VECTOR_F,a,b,dtol,rtol); #endif } richardson_error(&MG_h,0,VECTOR_U); // Now convert solution from HPGMG back to rhs MultiFab. ConvertFromHPGMGLevel(soln, &level_h, VECTOR_U); const double norm_from_HPGMG = norm(&level_h, VECTOR_U); const double mean_from_HPGMG = mean(&level_h, VECTOR_U); const Real norm0 = soln.norm0(); const Real norm2 = soln.norm2(); if (ParallelDescriptor::IOProcessor()) { std::cout << "mean from HPGMG: " << mean_from_HPGMG << std::endl; std::cout << "norm from HPGMG: " << norm_from_HPGMG << std::endl; std::cout << "norm0 of RHS copied to MF: " << norm0 << std::endl; std::cout << "norm2 of RHS copied to MF: " << norm2 << std::endl; } // Write the MF to disk for comparison with the in-house solver if (plot_soln) { writePlotFile("SOLN-HPGMG", soln, geom); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - MGDestroy(&MG_h); destroy_level(&level_h); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - PArray<MultiFab> grad_phi(BL_SPACEDIM, PArrayManage); for (int n = 0; n < BL_SPACEDIM; ++n) grad_phi.set(n, new MultiFab(BoxArray(soln.boxArray()).surroundingNodes(n), 1, 0)); #if (BL_SPACEDIM == 2) abec_operator.compFlux(grad_phi[0],grad_phi[1],soln); #elif (BL_SPACEDIM == 3) abec_operator.compFlux(grad_phi[0],grad_phi[1],grad_phi[2],soln); #endif // Average edge-centered gradients to cell centers. BoxLib::average_face_to_cellcenter(gphi, grad_phi, geom); }
void solve4(MultiFab& soln, const MultiFab& anaSoln, Real a, Real b, MultiFab& alpha, MultiFab& beta, MultiFab& rhs, const BoxArray& bs, const Geometry& geom) { std::string ss = "CPP"; soln.setVal(0.0); const Real run_strt = ParallelDescriptor::second(); BndryData bd(bs, 1, geom); set_boundary(bd, rhs, 0); ABec4 abec_operator(bd, dx); abec_operator.setScalars(a, b); MultiFab betaca(bs,1,2); ABec4::cc2ca(beta,betaca,0,0,1); MultiFab alphaca(bs,1,2); ABec4::cc2ca(alpha,alphaca,0,0,1); abec_operator.setCoefficients(alphaca, betaca); MultiFab rhsca(bs,1,0); ABec4::cc2ca(rhs,rhsca,0,0,1); MultiFab out(bs,1,0); compute_analyticSolution(soln,Array<Real>(BL_SPACEDIM,0.5)); MultiFab solnca(bs,1,2); solnca.setVal(0); MultiGrid mg(abec_operator); mg.setVerbose(verbose); mg.solve(solnca, rhsca, tolerance_rel, tolerance_abs); Real run_time = ParallelDescriptor::second() - run_strt; ParallelDescriptor::ReduceRealMax(run_time, ParallelDescriptor::IOProcessorNumber()); if (ParallelDescriptor::IOProcessor()) { std::cout << " Run time : " << run_time << std::endl; } if (plot_soln) { writePlotFile("SOLN-"+ss, solnca, geom); } if (plot_err || comp_norm) { soln.minus(anaSoln, 0, Ncomp, 0); // soln contains errors now MultiFab& err = soln; if (plot_err) { writePlotFile("ERR-"+ss, soln, geom); } if (comp_norm) { Real twoNorm = err.norm2(); Real maxNorm = err.norm0(); err.setVal(1.0); Real vol = err.norm2(); twoNorm /= vol; if (ParallelDescriptor::IOProcessor()) { std::cout << " 2 norm error : " << twoNorm << std::endl; std::cout << " max norm error: " << maxNorm << std::endl; } } } }
bool covered(auto y) { return y != this->begin() && set_boundary(prev(y), y); }
/*ARGSUSED*/ EXPORT INTERFACE *i_zoom_interface( INTERFACE *given_intfc, RECT_GRID *gr, double *L, double *U, double **Q) { INTERFACE *cur_intfc; INTERFACE *zoom_intfc; RECT_GRID *t_gr; int dim = given_intfc->dim; int i, j; double **Qi = NULL; static double **pc = NULL; debug_print("zoom","Entered zoom_interface()\n"); cur_intfc = current_interface(); if ((zoom_intfc = copy_interface(given_intfc)) == NULL) { Error(ERROR,"Unable to copy interface."); clean_up(ERROR); } if (debugging("zoom")) { (void) output(); (void) printf("INTERFACE before zoom:\n\n"); print_interface(zoom_intfc); } if (Q != NULL) { static double** M = NULL; if (M == NULL) bi_array(&M,MAXD,MAXD,FLOAT); Qi = M; for (i = 0; i < dim; i++) for (j = 0; j < dim; j++) Qi[i][j] = Q[j][i]; } if (pc == NULL) bi_array(&pc,MAXNCORNERS,MAXD,FLOAT); calculate_box(L,U,pc,Q,Qi,dim); /* Shrink topological grid to cutting boundary */ t_gr = &topological_grid(zoom_intfc); rotate_and_zoom_rect_grid(t_gr,L,U,Q); switch(dim) { case 1: /* TODO */ return NULL; case 2: insert_cuts_and_bdry2d(zoom_intfc,pc); clip_interface2d(zoom_intfc); break; case 3: /* TODO */ return NULL; } rotate_interface(zoom_intfc,pc[0],Qi); if (set_boundary(zoom_intfc,t_gr,component(pc[0],given_intfc), grid_tolerance(gr) != FUNCTION_SUCCEEDED)) { screen("ERROR in i_zoom_interface(), set_boundary failed\n"); clean_up(ERROR); } set_current_interface(cur_intfc); if (debugging("zoom")) { (void) printf("INTERFACE after zoom:\n\n"); print_interface(zoom_intfc); } debug_print("zoom","Leaving zoom_interface()\n"); return zoom_intfc; } /*end i_zoom_interface*/