static Real hst_dPhi(const GridS *pG, const int i, const int j, const int k) { Real nJ,Q,cs,dPhi,Phi1; Real kx,kxt,ky,k2,k20; Real x1,x2,x3,zmax; int nwx,nwy; int ks = pG->ks, ke = pG->ke; cc_pos(pG,i,j,k,&x1,&x2,&x3); nJ = par_getd("problem","nJ"); Q = par_getd("problem","Q"); nwx = par_geti_def("problem","nwx",-6); nwy = par_geti_def("problem","nwy",1); zmax = par_getd("domain1","x3max"); kx = nwx*2*PI; ky = nwy*2*PI; kxt = kx+qshear*ky*pG->time; k2 =kxt*kxt+ky*ky; k20 =kx*kx+ky*ky; cs = sqrt(4.0-2.0*qshear)/(PI*nJ*Q); Phi1 = -4.0*PI*nJ*cs*cs/k20*(pG->U[k][j][i].d-1.0); #ifdef SELF_GRAVITY_USING_FFT_DISK Phi1 *= 1-0.5*(exp(-sqrt(k20)*(zmax-fabs(x3)))+exp(-sqrt(k20)*(zmax+fabs(x3)))); #endif dPhi = (pG->Phi[k][j][i]-Phi1)/(pG->U[k][j][i].d-1)*k2; dPhi /= 1-0.5*(exp(-sqrt(k2)*(zmax-fabs(x3)))+exp(-sqrt(k2)*(zmax+fabs(x3)))); return dPhi; }
static Real hst_m2(const GridS *pG, const int i, const int j, const int k) { Real kx,kxt,ky; Real x1,x2,x3; int nwx,nwy; Real nJ,Q,beta,cs,B0; nJ = par_getd("problem","nJ"); Q = par_getd("problem","Q"); beta = par_getd("problem","beta"); cs = sqrt(4.0-2.0*qshear)/PI/nJ/Q; B0 = cs/sqrt(beta); cc_pos(pG,i,j,k,&x1,&x2,&x3); nwx = par_geti_def("problem","nwx",-6); nwy = par_geti_def("problem","nwy",1); kx = nwx*2*PI; ky = nwy*2*PI; kxt = kx+qshear*ky*pG->time; return -(pG->U[k][j][i].B2c-B0)/kxt/B0/cos(kxt*x1+ky*x2); }
void problem_read_restart(MeshS *pM, FILE *fp) { int nl,nd,BCFlag_ix1,BCFlag_ox1; /* Read Omega, and with viscosity and/or resistivity, read eta_Ohm and nu_V */ #ifdef SHEARING_BOX Omega_0 = par_getd_def("problem","omega",1.0e-3); qshear = par_getd_def("problem","qshear",1.5); #endif Mp = par_getd_def("problem","Mplanet",0.0); Xplanet = par_getd_def("problem","Xplanet",0.0); Yplanet = par_getd_def("problem","Yplanet",0.0); Zplanet = par_getd_def("problem","Zplanet",0.0); Rsoft = par_getd_def("problem","Rsoft",0.1); ramp_time = 0.0; insert_time = par_getd_def("problem","insert_time",0.0); #ifdef VISCOSITY nu_iso = par_getd_def("problem","nu_iso",0.0); nu_aniso = par_getd_def("problem","nu_aniso",0.0); #endif /* enroll gravitational potential of planet & shearing-box potential fns */ StaticGravPot = PlanetPot; ShearingBoxPot = UnstratifiedDisk; /* enroll new history variables */ dump_history_enroll(hst_rho_Vx_dVy, "<rho Vx dVy>"); dump_history_enroll(hst_rho_dVy2, "<rho dVy^2>"); #ifdef ADIABATIC dump_history_enroll(hst_E_total, "<E + rho Phi>"); #endif BCFlag_ix1 = par_geti_def("domain1","bc_ix1",0); BCFlag_ox1 = par_geti_def("domain1","bc_ox1",0); for (nl=0; nl<(pM->NLevels); nl++){ for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){ if (pM->Domain[nl][nd].Disp[0] == 0 && BCFlag_ix1 != 4) bvals_mhd_fun(&(pM->Domain[nl][nd]), left_x1, constant_iib); if (pM->Domain[nl][nd].MaxX[0] == pM->Domain[nl][nd].RootMaxX[0] && BCFlag_ox1 != 4) bvals_mhd_fun(&(pM->Domain[nl][nd]), right_x1, constant_oib); } } return; }
static Real hst_uy(const GridS *pG, const int i, const int j, const int k) { Real kx,kxt,ky; Real x1,x2,x3; int nwx,nwy; cc_pos(pG,i,j,k,&x1,&x2,&x3); nwx = par_geti_def("problem","nwx",-6); nwy = par_geti_def("problem","nwy",1); kx = nwx*2*PI; ky = nwy*2*PI; kxt = kx+qshear*ky*pG->time; return (pG->U[k][j][i].M2/pG->U[k][j][i].d-1)/sin(kxt*x1+ky*x2); }
int commit_parameters(){ int out_level = par_geti_def("log","out_level",0); int err_level = par_geti_def("log","err_level",0); ath_log_set_level(out_level, err_level); if(has_external_gravitational_potential) { StaticGravPot = grav_pot; } CourNo = par_getd("time","cour_no"); #ifdef ISOTHERMAL Iso_csound = par_getd("problem","iso_csound"); Iso_csound2 = Iso_csound*Iso_csound; #else Gamma = par_getd("problem","gamma"); Gamma_1 = Gamma - 1.0; Gamma_2 = Gamma - 2.0; #endif init_domain(&level0_Grid, &level0_Domain); init_grid(&level0_Grid, &level0_Domain); if ((Potentials = (Real***)calloc_3d_array( level0_Grid.Nx3 + 2 * nghost, level0_Grid.Nx2 + 2 * nghost, level0_Grid.Nx1 + 2 * nghost, sizeof(Real))) == NULL) { return -1; } return 0; }
void problem_read_restart(MeshS *pM, FILE *fp) { DomainS *pD = (DomainS*)&(pM->Domain[0][0]); GridS *pG = pD->Grid; ShearingBoxPot = StratifiedDisk; Omega_0 = par_getd("problem","omega"); qshear = par_getd_def("problem","qshear",1.5); ipert = par_geti_def("problem","ipert",1); x1min = pG->MinX[0]; x1max = pG->MaxX[0]; Lx = x1max - x1min; x2min = pG->MinX[1]; x2max = pG->MaxX[1]; Ly = x2max - x2min; x3min = pM->RootMinX[2]; x3max = pM->RootMaxX[2]; Lz = x3max - x3min; Lg = nghost*pG->dx3; /* size of the ghost zone */ vsc1 = par_getd_def("problem","vsc1",0.05); /* in unit of iso_sound (N.B.!) */ vsc2 = par_getd_def("problem","vsc2",0.0); vsc1 = vsc1 * Iso_csound; vsc2 = vsc2 * Iso_csound; Npar = (int)(sqrt(par_geti("particle","parnumgrid"))); nlis = par_geti_def("problem","nlis",pG->Nx[0]*pG->Nx[1]*pG->Nx[2]); ntrack = par_geti_def("problem","ntrack",2000); dump_history_enroll(hst_rho_Vx_dVy, "<rho Vx dVy>"); return; }
void problem(DomainS *pDomain) { GridS *pGrid = pDomain->Grid; int is = pGrid->is, ie = pGrid->ie; int js = pGrid->js, je = pGrid->je; int ks = pGrid->ks, ke = pGrid->ke; int i,j,k,BCFlag; Real x1,x2,x3; Real den = 1.0, pres = 1.0e-6; static int frst=1; /* flag so new history variables enrolled only once */ #ifdef SHEARING_BOX /* specify xy (r-phi) plane */ ShBoxCoord = xy; #endif /* Read problem parameters. Note Omega_0 set to 10^{-3} by default */ #ifdef SHEARING_BOX Omega_0 = par_getd_def("problem","omega",1.0e-3); qshear = par_getd_def("problem","qshear",1.5); #endif Mp = par_getd_def("problem","Mplanet",0.0); Xplanet = par_getd_def("problem","Xplanet",0.0); Yplanet = par_getd_def("problem","Yplanet",0.0); Zplanet = par_getd_def("problem","Zplanet",0.0); Rsoft = par_getd_def("problem","Rsoft",0.1); ramp_time = 0.0; insert_time = par_getd_def("problem","insert_time",0.0); /* Compute field strength based on beta. */ #ifdef ISOTHERMAL pres = Iso_csound2; #endif for (k=ks; k<=ke; k++) { for (j=js; j<=je; j++) { for (i=is; i<=ie; i++) { cc_pos(pGrid,i,j,k,&x1,&x2,&x3); /* Initialize d, M, and P. With FARGO do not initialize the background shear */ pGrid->U[k][j][i].d = den; // pGrid->U[k][j][i].d = 1.0+.5*(1-.02)*(tanh((x1-10.0)/3.5)-tanh((x1+10.0)/3.5))-.5*1.1*(tanh((x1-10.0)/15.0)-tanh((x1+10.0)/15.0)); pGrid->U[k][j][i].M1 = 0.0; pGrid->U[k][j][i].M2 = 0.0; #ifdef SHEARING_BOX #ifndef FARGO pGrid->U[k][j][i].M2 -= den*(qshear*Omega_0*x1); #endif #endif pGrid->U[k][j][i].M3 = 0.0; #ifdef ADIABATIC pGrid->U[k][j][i].E = pres/Gamma_1 + 0.5*(SQR(pGrid->U[k][j][i].M1) + SQR(pGrid->U[k][j][i].M2) + SQR(pGrid->U[k][j][i].M3))/den; #endif } }} /* enroll gravitational potential of planet & shearing-box potential fns */ StaticGravPot = PlanetPot; ShearingBoxPot = UnstratifiedDisk; /* enroll new history variables, only once */ if (frst == 1) { dump_history_enroll(hst_rho_Vx_dVy, "<rho Vx dVy>"); dump_history_enroll(hst_rho_dVy2, "<rho dVy^2>"); #ifdef ADIABATIC dump_history_enroll(hst_E_total, "<E + rho Phi>"); #endif frst = 0; } /* With viscosity and/or resistivity, read diffusion coeffs */ #ifdef VISCOSITY nu_iso = par_getd_def("problem","nu_iso",0.0); nu_aniso = par_getd_def("problem","nu_aniso",0.0); #endif /* Enroll outflow BCs if perdiodic BCs NOT selected. This assumes the root * level grid is specified by the <domain1> block in the input file */ BCFlag = par_geti_def("domain1","bc_ix1",0); if (BCFlag != 4) { if (pDomain->Disp[0] == 0) bvals_mhd_fun(pDomain, left_x1, constant_iib); } BCFlag = par_geti_def("domain1","bc_ox1",0); if (BCFlag != 4) { if (pDomain->MaxX[0] == pDomain->RootMaxX[0]) bvals_mhd_fun(pDomain, right_x1, constant_oib); } return; }
void init_mesh(MeshS *pM) { int nblock,num_domains,nd,nl,level,maxlevel=0,nd_this_level; int nDim,nDim_test,dim; int *next_domainid; char block[80]; int ncd,ir,irefine,l,m,n,roffset; int i,Nx[3],izones; div_t xdiv[3]; /* divisor with quot and rem members */ Real root_xmin[3], root_xmax[3]; /* min/max of x in each dir on root grid */ int Nproc_Comm_world=1,nproc=0,next_procID; SideS D1,D2; DomainS *pD, *pCD; #ifdef MPI_PARALLEL int ierr,child_found,groupn,Nranks,Nranks0,max_rank,irank,*ranks; MPI_Group world_group; /* Get total # of processes, in MPI_COMM_WORLD */ ierr = MPI_Comm_size(MPI_COMM_WORLD, &Nproc_Comm_world); #endif /* Start by initializing some quantaties in Mesh structure */ pM->time = 0.0; pM->nstep = 0; pM->outfilename = par_gets("job","problem_id"); /*--- Step 1: Figure out how many levels and domains there are. --------------*/ /* read levels of each domain block in input file and calculate max level */ num_domains = par_geti("job","num_domains"); #ifndef STATIC_MESH_REFINEMENT if (num_domains > 1) ath_error("[init_mesh]: num_domains=%d; for num_domains > 1 configure with --enable-smr\n",num_domains); #endif for (nblock=1; nblock<=num_domains; nblock++){ sprintf(block,"domain%d",nblock); if (par_exist(block,"level") == 0) ath_error("[init_mesh]: level does not exist in block %s\n",block); level = par_geti(block,"level"); maxlevel = MAX(maxlevel,level); } /* set number of levels in Mesh, and allocate DomainsPerLevel array */ pM->NLevels = maxlevel + 1; /* level counting starts at 0 */ pM->DomainsPerLevel = (int*)calloc_1d_array(pM->NLevels,sizeof(int)); if (pM->DomainsPerLevel == NULL) ath_error("[init_mesh]: malloc returned a NULL pointer\n"); /* Now figure out how many domains there are at each level */ for (nl=0; nl<=maxlevel; nl++){ nd_this_level=0; for (nblock=1; nblock<=num_domains; nblock++){ sprintf(block,"domain%d",nblock); if (par_geti(block,"level") == nl) nd_this_level++; } /* Error if there are any levels with no domains. Else set DomainsPerLevel */ if (nd_this_level == 0) { ath_error("[init_mesh]: Level %d has zero domains\n",nl); } else { pM->DomainsPerLevel[nl] = nd_this_level; } } /*--- Step 2: Set up root level. --------------------------------------------*/ /* Find the <domain> block in the input file corresponding to the root level, * and set root level properties in Mesh structure */ if (pM->DomainsPerLevel[0] != 1) ath_error("[init_mesh]: Level 0 has %d domains\n",pM->DomainsPerLevel[0]); for (nblock=1; nblock<=num_domains; nblock++){ sprintf(block,"domain%d",nblock); level = par_geti(block,"level"); if (level == 0){ root_xmin[0] = par_getd(block,"x1min"); root_xmax[0] = par_getd(block,"x1max"); root_xmin[1] = par_getd(block,"x2min"); root_xmax[1] = par_getd(block,"x2max"); root_xmin[2] = par_getd(block,"x3min"); root_xmax[2] = par_getd(block,"x3max"); Nx[0] = par_geti(block,"Nx1"); Nx[1] = par_geti(block,"Nx2"); Nx[2] = par_geti(block,"Nx3"); /* number of dimensions of root level, to test against all other inputs */ nDim=0; for (i=0; i<3; i++) if (Nx[i]>1) nDim++; if (nDim==0) ath_error("[init_mesh] None of Nx1,Nx2,Nx3 > 1\n"); /* some error tests of root grid */ for (i=0; i<3; i++) { if (Nx[i] < 1) { ath_error("[init_mesh]: Nx%d in %s must be >= 1\n",(i+1),block); } if(root_xmax[i] < root_xmin[i]) { ath_error("[init_mesh]: x%dmax < x%dmin in %s\n",(i+1),block); } } if (nDim==1 && Nx[0]==1) { ath_error("[init_mesh]:1D requires Nx1>1: in %s Nx1=1,Nx2=%d,Nx3=%d\n", block,Nx[1],Nx[2]); } if (nDim==2 && Nx[2]>1) {ath_error( "[init_mesh]:2D requires Nx1,Nx2>1: in %s Nx1=%d,Nx2=%d,Nx3=%d\n", block,Nx[0],Nx[1],Nx[2]); } /* Now that everything is OK, set root grid properties in Mesh structure */ for (i=0; i<3; i++) { pM->Nx[i] = Nx[i]; pM->RootMinX[i] = root_xmin[i]; pM->RootMaxX[i] = root_xmax[i]; pM->dx[i] = (root_xmax[i] - root_xmin[i])/(Real)(Nx[i]); } /* Set BC flags on root domain */ pM->BCFlag_ix1 = par_geti_def(block,"bc_ix1",0); pM->BCFlag_ix2 = par_geti_def(block,"bc_ix2",0); pM->BCFlag_ix3 = par_geti_def(block,"bc_ix3",0); pM->BCFlag_ox1 = par_geti_def(block,"bc_ox1",0); pM->BCFlag_ox2 = par_geti_def(block,"bc_ox2",0); pM->BCFlag_ox3 = par_geti_def(block,"bc_ox3",0); } } /*--- Step 3: Allocate and initialize domain array. --------------------------*/ /* Allocate memory and set pointers for Domain array in Mesh. Since the * number of domains nd depends on the level nl, this is a strange array * because it is not [nl]x[nd]. Rather it is nl pointers to nd[nl] Domains. * Compare to the calloc_2d_array() function in ath_array.c */ if((pM->Domain = (DomainS**)calloc((maxlevel+1),sizeof(DomainS*))) == NULL){ ath_error("[init_mesh] failed to allocate memory for %d Domain pointers\n", (maxlevel+1)); } if((pM->Domain[0]=(DomainS*)calloc(num_domains,sizeof(DomainS))) == NULL){ ath_error("[init_mesh] failed to allocate memory for Domains\n"); } for(nl=1; nl<=maxlevel; nl++) pM->Domain[nl] = (DomainS*)((unsigned char *)pM->Domain[nl-1] + pM->DomainsPerLevel[nl-1]*sizeof(DomainS)); /* Loop over every <domain> block in the input file, and initialize each Domain * in the mesh hierarchy (the Domain array), including the root level Domain */ next_domainid = (int*)calloc_1d_array(pM->NLevels,sizeof(int)); for(nl=0; nl<=maxlevel; nl++) next_domainid[nl] = 0; for (nblock=1; nblock<=num_domains; nblock++){ sprintf(block,"domain%d",nblock); /* choose nd coordinate in Domain array for this <domain> block according * to the order it appears in input */ nl = par_geti(block,"level"); if (next_domainid[nl] > (pM->DomainsPerLevel[nl])-1) ath_error("[init_mesh]: Exceeded available domain ids on level %d\n",nl); nd = next_domainid[nl]; next_domainid[nl]++; irefine = 1; for (ir=1;ir<=nl;ir++) irefine *= 2; /* C pow fn only takes doubles !! */ /* Initialize level, number, input <domain> block number, and total number of * cells in this Domain */ pM->Domain[nl][nd].Level = nl; pM->Domain[nl][nd].DomNumber = nd; pM->Domain[nl][nd].InputBlock = nblock; pM->Domain[nl][nd].Nx[0] = par_geti(block,"Nx1"); pM->Domain[nl][nd].Nx[1] = par_geti(block,"Nx2"); pM->Domain[nl][nd].Nx[2] = par_geti(block,"Nx3"); /* error tests: dimensions of domain */ nDim_test=0; for (i=0; i<3; i++) if (pM->Domain[nl][nd].Nx[i]>1) nDim_test++; if (nDim_test != nDim) { ath_error("[init_mesh]: in %s grid is %dD, but in root level it is %dD\n", block,nDim_test,nDim); } for (i=0; i<3; i++) { if (pM->Domain[nl][nd].Nx[i] < 1) { ath_error("[init_mesh]: %s/Nx%d = %d must be >= 1\n", block,(i+1),pM->Domain[nl][nd].Nx[i]); } } if (nDim==1 && pM->Domain[nl][nd].Nx[0]==1) {ath_error( "[init_mesh]: 1D requires Nx1>1 but in %s Nx1=1,Nx2=%d,Nx3=%d\n", block,pM->Domain[nl][nd].Nx[1],pM->Domain[nl][nd].Nx[2]); } if (nDim==2 && pM->Domain[nl][nd].Nx[2]>1) {ath_error( "[init_mesh]:2D requires Nx1,Nx2 > 1 but in %s Nx1=%d,Nx2=%d,Nx3=%d\n", block,pM->Domain[nl][nd].Nx[0],pM->Domain[nl][nd].Nx[1], pM->Domain[nl][nd].Nx[2]); } for (i=0; i<nDim; i++) { xdiv[i] = div(pM->Domain[nl][nd].Nx[i], irefine); if (xdiv[i].rem != 0){ ath_error("[init_mesh]: %s/Nx%d = %d must be divisible by %d\n", block,(i+1),pM->Domain[nl][nd].Nx[i],irefine); } } /* Set cell size based on level of domain, but only if Ncell > 1 */ for (i=0; i<3; i++) { if (pM->Domain[nl][nd].Nx[i] > 1) { pM->Domain[nl][nd].dx[i] = pM->dx[i]/(Real)(irefine); } else { pM->Domain[nl][nd].dx[i] = pM->dx[i]; } } /* Set displacement of Domain from origin. By definition, root level has 0 * displacement, so only read for levels other than root */ for (i=0; i<3; i++) pM->Domain[nl][nd].Disp[i] = 0; if (nl != 0) { if (par_exist(block,"iDisp") == 0) ath_error("[init_mesh]: iDisp does not exist in block %s\n",block); pM->Domain[nl][nd].Disp[0] = par_geti(block,"iDisp"); /* jDisp=0 if problem is only 1D */ if (pM->Nx[1] > 1) { if (par_exist(block,"jDisp") == 0) ath_error("[init_mesh]: jDisp does not exist in block %s\n",block); pM->Domain[nl][nd].Disp[1] = par_geti(block,"jDisp"); } /* kDisp=0 if problem is only 2D */ if (pM->Nx[2] > 1) { if (par_exist(block,"kDisp") == 0) ath_error("[init_mesh]: kDisp does not exist in block %s\n",block); pM->Domain[nl][nd].Disp[2] = par_geti(block,"kDisp"); } } for (i=0; i<nDim; i++) { xdiv[i] = div(pM->Domain[nl][nd].Disp[i], irefine); if (xdiv[i].rem != 0){ ath_error("[init_mesh]: %s/Disp%d = %d must be divisible by %d\n", block,(i+1),pM->Domain[nl][nd].Disp[i],irefine); } } /* Use cell size and displacement from origin to compute min/max of x1/x2/x3 on * this domain. Ensure that if Domain touches root grid boundary, the min/max * of this Domain are set IDENTICAL to values in root grid */ for (i=0; i<3; i++){ if (pM->Domain[nl][nd].Disp[i] == 0) { pM->Domain[nl][nd].MinX[i] = root_xmin[i]; } else { pM->Domain[nl][nd].MinX[i] = root_xmin[i] + ((Real)(pM->Domain[nl][nd].Disp[i]))*pM->Domain[nl][nd].dx[i]; } izones= (pM->Domain[nl][nd].Disp[i] + pM->Domain[nl][nd].Nx[i])/irefine; if(izones == pM->Nx[i]){ pM->Domain[nl][nd].MaxX[i] = root_xmax[i]; } else { pM->Domain[nl][nd].MaxX[i] = pM->Domain[nl][nd].MinX[i] + ((Real)(pM->Domain[nl][nd].Nx[i]))*pM->Domain[nl][nd].dx[i]; } pM->Domain[nl][nd].RootMinX[i] = root_xmin[i]; pM->Domain[nl][nd].RootMaxX[i] = root_xmax[i]; } } /*---------- end loop over domain blocks in input file ------------------*/ /*--- Step 4: Check that domains on the same level are non-overlapping. ------*/ /* Compare the integer coordinates of the sides of Domains at the same level. * Print error if Domains overlap or touch. */ for (nl=maxlevel; nl>0; nl--){ /* start at highest level, and skip root */ for (nd=0; nd<(pM->DomainsPerLevel[nl])-1; nd++){ for (i=0; i<3; i++) { D1.ijkl[i] = pM->Domain[nl][nd].Disp[i]; D1.ijkr[i] = pM->Domain[nl][nd].Disp[i] + pM->Domain[nl][nd].Nx[i]; } for (ncd=nd+1; ncd<(pM->DomainsPerLevel[nl]); ncd++) { for (i=0; i<3; i++) { D2.ijkl[i] = pM->Domain[nl][ncd].Disp[i]; D2.ijkr[i] = pM->Domain[nl][ncd].Disp[i] + pM->Domain[nl][ncd].Nx[i]; } if (D1.ijkl[0] <= D2.ijkr[0] && D1.ijkr[0] >= D2.ijkl[0] && D1.ijkl[1] <= D2.ijkr[1] && D1.ijkr[1] >= D2.ijkl[1] && D1.ijkl[2] <= D2.ijkr[2] && D1.ijkr[2] >= D2.ijkl[2]){ ath_error("Domains %d and %d at same level overlap or touch\n", pM->Domain[nl][nd].InputBlock,pM->Domain[nl][ncd].InputBlock); } } }} /*--- Step 5: Check for illegal geometry of child/parent Domains -------------*/ for (nl=0; nl<maxlevel; nl++){ for (nd=0; nd<pM->DomainsPerLevel[nl]; nd++){ pD = (DomainS*)&(pM->Domain[nl][nd]); /* set ptr to this Domain */ for (i=0; i<3; i++) { D1.ijkl[i] = pD->Disp[i]; D1.ijkr[i] = pD->Disp[i] + pD->Nx[i]; } for (ncd=0; ncd<pM->DomainsPerLevel[nl+1]; ncd++){ pCD = (DomainS*)&(pM->Domain[nl+1][ncd]); /* set ptr to potential child*/ for (i=0; i<3; i++) { D2.ijkl[i] = pCD->Disp[i]/2; D2.ijkr[i] = 1; if (pCD->Nx[i] > 1) D2.ijkr[i] = (pCD->Disp[i] + pCD->Nx[i])/2; } if (D1.ijkl[0] <= D2.ijkr[0] && D1.ijkr[0] >= D2.ijkl[0] && D1.ijkl[1] <= D2.ijkr[1] && D1.ijkr[1] >= D2.ijkl[1] && D1.ijkl[2] <= D2.ijkr[2] && D1.ijkr[2] >= D2.ijkl[2]){ /* check for child Domains that touch edge of parent (and are not at edges of * root), extends past edge of parent, or are < nghost/2 from edge of parent */ for (dim=0; dim<nDim; dim++){ irefine = 1; for (i=1;i<=nl;i++) irefine *= 2; /* parent refinement lev */ roffset = (pCD->Disp[dim] + pCD->Nx[dim])/(2*irefine) - pM->Nx[dim]; if (((D2.ijkl[dim] == D1.ijkl[dim]) && (pD->Disp[dim] != 0)) || ((D2.ijkr[dim] == D1.ijkr[dim]) && (roffset != 0))) { for (i=0; i<nDim; i++) { D1.ijkl[i] /= irefine; /* report indices scaled to root */ D1.ijkr[i] /= irefine; D2.ijkl[i] /= irefine; D2.ijkr[i] /= irefine; } ath_error("[init_mesh] child Domain D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d] touches parent D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d]\n", pCD->InputBlock,D2.ijkl[0],D2.ijkr[0],D2.ijkl[1],D2.ijkr[1], D2.ijkl[2],D2.ijkr[2],pD->InputBlock,D1.ijkl[0],D1.ijkr[0], D1.ijkl[1],D1.ijkr[1],D1.ijkl[2],D1.ijkr[2]); } if ((D2.ijkl[dim] < D1.ijkl[dim]) || (D2.ijkr[dim] > D1.ijkr[dim])) { for (i=0; i<nDim; i++) { D1.ijkl[i] /= irefine; /* report indices scaled to root */ D1.ijkr[i] /= irefine; D2.ijkl[i] /= irefine; D2.ijkr[i] /= irefine; } ath_error("[init_mesh] child Domain D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d] extends past parent D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d]\n", pCD->InputBlock,D2.ijkl[0],D2.ijkr[0],D2.ijkl[1],D2.ijkr[1], D2.ijkl[2],D2.ijkr[2],pD->InputBlock,D1.ijkl[0],D1.ijkr[0], D1.ijkl[1],D1.ijkr[1],D1.ijkl[2],D1.ijkr[2]); } if (((2*(D2.ijkl[dim]-D1.ijkl[dim]) < nghost) && (2*(D2.ijkl[dim]-D1.ijkl[dim]) > 0 )) || ((2*(D1.ijkr[dim]-D2.ijkr[dim]) < nghost) && (2*(D1.ijkr[dim]-D2.ijkr[dim]) > 0 ))) { for (i=0; i<nDim; i++) { D1.ijkl[i] /= irefine; /* report indices scaled to root */ D1.ijkr[i] /= irefine; D2.ijkl[i] /= irefine; D2.ijkr[i] /= irefine; } ath_error("[init_mesh] child Domain D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d] closer than nghost/2 to parent D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d]\n", pCD->InputBlock,D2.ijkl[0],D2.ijkr[0],D2.ijkl[1],D2.ijkr[1], D2.ijkl[2],D2.ijkr[2],pD->InputBlock,D1.ijkl[0],D1.ijkr[0], D1.ijkl[1],D1.ijkr[1],D1.ijkl[2],D1.ijkr[2]); } } } } }} /*--- Step 6: Divide each Domain into Grids, and allocate to processor(s) ---*/ /* Get the number of Grids in each direction. These are given either in the * <domain?> block in the input file, or by automatic decomposition given the * number of processor desired for this domain. */ next_procID = 0; /* start assigning processors to Grids at ID=0 */ for (nl=0; nl<=maxlevel; nl++){ for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){ pD = (DomainS*)&(pM->Domain[nl][nd]); /* set ptr to this Domain */ sprintf(block,"domain%d",pD->InputBlock); #ifndef MPI_PARALLEL for (i=0; i<3; i++) pD->NGrid[i] = 1; #else nproc = par_geti_def(block,"AutoWithNProc",0); /* Read layout of Grids from input file */ if (nproc == 0){ pD->NGrid[0] = par_geti_def(block,"NGrid_x1",1); pD->NGrid[1] = par_geti_def(block,"NGrid_x2",1); pD->NGrid[2] = par_geti_def(block,"NGrid_x3",1); if (pD->NGrid[0] == 0) ath_error("[init_mesh] Cannot enter NGrid_x1=0 in %s\n",block); if (pD->NGrid[1] == 0) ath_error("[init_mesh] Cannot enter NGrid_x2=0 in %s\n",block); if (pD->NGrid[2] == 0) ath_error("[init_mesh] Cannot enter NGrid_x3=0 in %s\n",block); } /* Auto decompose Domain into Grids. To use this option, set "AutoWithNProc" * to number of processors desired for this Domain */ else if (nproc > 0){ if(dom_decomp(pD->Nx[0],pD->Nx[1],pD->Nx[2],nproc, &(pD->NGrid[0]),&(pD->NGrid[1]),&(pD->NGrid[2]))) ath_error("[init_mesh]: Error in automatic Domain decomposition\n"); /* Store the domain decomposition in the par database */ par_seti(block,"NGrid_x1","%d",pD->NGrid[0],"x1 decomp"); par_seti(block,"NGrid_x2","%d",pD->NGrid[1],"x2 decomp"); par_seti(block,"NGrid_x3","%d",pD->NGrid[2],"x3 decomp"); } else { ath_error("[init_mesh] invalid AutoWithNProc=%d in %s\n",nproc,block); } #endif /* MPI_PARALLEL */ /* test for conflicts between number of grids and dimensionality */ for (i=0; i<3; i++){ if(pD->NGrid[i] > 1 && pD->Nx[i] <= 1) ath_error("[init_mesh]: %s/NGrid_x%d = %d and Nx%d = %d\n",block, (i+1),pD->NGrid[i],(i+1),pD->Nx[i]); } /* check there are more processors than Grids needed by this Domain. */ nproc = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]); if(nproc > Nproc_Comm_world) ath_error( "[init_mesh]: %d Grids requested by block %s and only %d procs\n" ,nproc,block,Nproc_Comm_world); /* Build 3D array to store data on Grids in this Domain */ if ((pD->GData = (GridsDataS***)calloc_3d_array(pD->NGrid[2],pD->NGrid[1], pD->NGrid[0],sizeof(GridsDataS))) == NULL) ath_error( "[init_mesh]: GData calloc returned a NULL pointer\n"); /* Divide the domain into blocks */ for (i=0; i<3; i++) { xdiv[i] = div(pD->Nx[i], pD->NGrid[i]); } /* Distribute cells in Domain to Grids. Assign each Grid to a processor ID in * the MPI_COMM_WORLD communicator. For single-processor jobs, there is only * one ID=0, and the GData array will have only one element. */ for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<(pD->NGrid[0]); l++){ for (i=0; i<3; i++) pD->GData[n][m][l].Nx[i] = xdiv[i].quot; pD->GData[n][m][l].ID_Comm_world = next_procID++; if (next_procID > ((Nproc_Comm_world)-1)) next_procID=0; }}} /* If the Domain is not evenly divisible put the extra cells on the first * Grids in each direction, maintaining the load balance as much as possible */ for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<xdiv[0].rem; l++){ pD->GData[n][m][l].Nx[0]++; } } } xdiv[0].rem=0; for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<xdiv[1].rem; m++) { for(l=0; l<(pD->NGrid[0]); l++){ pD->GData[n][m][l].Nx[1]++; } } } xdiv[1].rem=0; for(n=0; n<xdiv[2].rem; n++){ for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<(pD->NGrid[0]); l++){ pD->GData[n][m][l].Nx[2]++; } } } xdiv[2].rem=0; /* Initialize displacements from origin for each Grid */ for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<(pD->NGrid[1]); m++){ pD->GData[n][m][0].Disp[0] = pD->Disp[0]; for(l=1; l<(pD->NGrid[0]); l++){ pD->GData[n][m][l].Disp[0] = pD->GData[n][m][l-1].Disp[0] + pD->GData[n][m][l-1].Nx[0]; } } } for(n=0; n<(pD->NGrid[2]); n++){ for(l=0; l<(pD->NGrid[0]); l++){ pD->GData[n][0][l].Disp[1] = pD->Disp[1]; for(m=1; m<(pD->NGrid[1]); m++){ pD->GData[n][m][l].Disp[1] = pD->GData[n][m-1][l].Disp[1] + pD->GData[n][m-1][l].Nx[1]; } } } for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<(pD->NGrid[0]); l++){ pD->GData[0][m][l].Disp[2] = pD->Disp[2]; for(n=1; n<(pD->NGrid[2]); n++){ pD->GData[n][m][l].Disp[2] = pD->GData[n-1][m][l].Disp[2] + pD->GData[n-1][m][l].Nx[2]; } } } } /* end loop over ndomains */ } /* end loop over nlevels */ /* check that total number of Grids was partitioned evenly over total number of * MPI processes available (equal to one for single processor jobs) */ if (next_procID != 0) ath_error("[init_mesh]:total # of Grids != total # of MPI procs\n"); /*--- Step 7: Allocate a Grid for each Domain on this processor --------------*/ for (nl=0; nl<=maxlevel; nl++){ for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){ pD = (DomainS*)&(pM->Domain[nl][nd]); /* set ptr to this Domain */ sprintf(block,"domain%d",pD->InputBlock); pD->Grid = NULL; /* Loop over GData array, and if there is a Grid assigned to this proc, * allocate it */ for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<(pD->NGrid[0]); l++){ if (pD->GData[n][m][l].ID_Comm_world == myID_Comm_world) { if ((pD->Grid = (GridS*)malloc(sizeof(GridS))) == NULL) ath_error("[init_mesh]: Failed to malloc a Grid for %s\n",block); } }}} } } /*--- Step 8: Create an MPI Communicator for each Domain ---------------------*/ #ifdef MPI_PARALLEL /* Allocate memory for ranks[] array */ max_rank = 0; for (nl=0; nl<=maxlevel; nl++){ for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){ pD = (DomainS*)&(pM->Domain[nl][nd]); /* set ptr to this Domain */ Nranks = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]); max_rank = MAX(max_rank, Nranks); }} ranks = (int*)calloc_1d_array(max_rank,sizeof(int)); /* Extract handle of group defined by MPI_COMM_WORLD communicator */ ierr = MPI_Comm_group(MPI_COMM_WORLD, &world_group); for (nl=0; nl<=maxlevel; nl++){ for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){ pD = (DomainS*)&(pM->Domain[nl][nd]); /* set ptr to this Domain */ /* Load integer array with ranks of processes in MPI_COMM_WORLD updating Grids * on this Domain. The ranks of these processes in the new Comm_Domain * communicator created below are equal to the indices of this array */ Nranks = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]); groupn = 0; for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<(pD->NGrid[0]); l++){ ranks[groupn] = pD->GData[n][m][l].ID_Comm_world; pD->GData[n][m][l].ID_Comm_Domain = groupn; groupn++; }}} /* Create a new group for this Domain; use it to create a new communicator */ ierr = MPI_Group_incl(world_group,Nranks,ranks,&(pD->Group_Domain)); ierr = MPI_Comm_create(MPI_COMM_WORLD,pD->Group_Domain,&(pD->Comm_Domain)); }} free_1d_array(ranks); #endif /* MPI_PARALLEL */ /*--- Step 9: Create MPI Communicators for Child and Parent Domains ----------*/ #if defined(MPI_PARALLEL) && defined(STATIC_MESH_REFINEMENT) /* Initialize communicators to NULL, since not all Domains use them, and * allocate memory for ranks[] array */ for (nl=0; nl<=maxlevel; nl++){ for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){ pM->Domain[nl][nd].Comm_Parent = MPI_COMM_NULL; pM->Domain[nl][nd].Comm_Children = MPI_COMM_NULL; } } if (maxlevel > 0) { ranks = (int*)calloc_1d_array(Nproc_Comm_world,sizeof(int)); } /* For each Domain up to (maxlevel-1), initialize communicator with children */ for (nl=0; nl<maxlevel; nl++){ for (nd=0; nd<pM->DomainsPerLevel[nl]; nd++){ pD = (DomainS*)&(pM->Domain[nl][nd]); /* set ptr to this Domain */ child_found = 0; /* Load integer array with ranks of processes in MPI_COMM_WORLD updating Grids * on this Domain, in case a child Domain is found. Set IDs in Comm_Children * communicator based on index in rank array, in case child found. If no * child is found these ranks will never be used. */ Nranks = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]); groupn = 0; for(n=0; n<(pD->NGrid[2]); n++){ for(m=0; m<(pD->NGrid[1]); m++){ for(l=0; l<(pD->NGrid[0]); l++){ ranks[groupn] = pD->GData[n][m][l].ID_Comm_world; pD->GData[n][m][l].ID_Comm_Children = groupn; groupn++; }}} /* edges of this Domain */ for (i=0; i<3; i++) { D1.ijkl[i] = pD->Disp[i]; D1.ijkr[i] = pD->Disp[i] + pD->Nx[i]; } /* Loop over all Domains at next level, looking for children of this Domain */ for (ncd=0; ncd<pM->DomainsPerLevel[nl+1]; ncd++){ pCD = (DomainS*)&(pM->Domain[nl+1][ncd]); /* set ptr to potential child*/ /* edges of potential child Domain */ for (i=0; i<3; i++) { D2.ijkl[i] = pCD->Disp[i]/2; D2.ijkr[i] = 1; if (pCD->Nx[i] > 1) D2.ijkr[i] = (pCD->Disp[i] + pCD->Nx[i])/2; } if (D1.ijkl[0] < D2.ijkr[0] && D1.ijkr[0] > D2.ijkl[0] && D1.ijkl[1] < D2.ijkr[1] && D1.ijkr[1] > D2.ijkl[1] && D1.ijkl[2] < D2.ijkr[2] && D1.ijkr[2] > D2.ijkl[2]){ child_found = 1; /* Child found. Add child processors to ranks array, but only if they are * different from processes currently there (including parent and any previously * found children). Set IDs associated with Comm_Parent communicator, since on * the child Domain this is the same as the Comm_Children communicator on the * parent Domain */ for(n=0; n<(pCD->NGrid[2]); n++){ for(m=0; m<(pCD->NGrid[1]); m++){ for(l=0; l<(pCD->NGrid[0]); l++){ irank = -1; for (i=0; i<Nranks; i++) { if(pCD->GData[n][m][l].ID_Comm_world == ranks[i]) irank = i; } if (irank == -1) { ranks[groupn] = pCD->GData[n][m][l].ID_Comm_world; pCD->GData[n][m][l].ID_Comm_Parent = groupn; groupn++; Nranks++; } else { pCD->GData[n][m][l].ID_Comm_Parent = ranks[irank]; } }}} } } /* After looping over all potential child Domains, create a new communicator if * a child was found */ if (child_found == 1) { ierr = MPI_Group_incl(world_group, Nranks, ranks, &(pD->Group_Children)); ierr = MPI_Comm_create(MPI_COMM_WORLD,pD->Group_Children, &pD->Comm_Children); /* Loop over children to set Comm_Parent communicators */ for (ncd=0; ncd<pM->DomainsPerLevel[nl+1]; ncd++){ pCD = (DomainS*)&(pM->Domain[nl+1][ncd]); for (i=0; i<3; i++) { D2.ijkl[i] = pCD->Disp[i]/2; D2.ijkr[i] = 1; if (pCD->Nx[i] > 1) D2.ijkr[i] = (pCD->Disp[i] + pCD->Nx[i])/2; } if (D1.ijkl[0] < D2.ijkr[0] && D1.ijkr[0] > D2.ijkl[0] && D1.ijkl[1] < D2.ijkr[1] && D1.ijkr[1] > D2.ijkl[1] && D1.ijkl[2] < D2.ijkr[2] && D1.ijkr[2] > D2.ijkl[2]){ pCD->Comm_Parent = pD->Comm_Children; } } } }} #endif /* MPI_PARALLEL & STATIC_MESH_REFINEMENT */ free(next_domainid); return; }
void problem(DomainS *pDomain) { GridS *pGrid = pDomain->Grid; ConsS **Soln; int i, is = pGrid->is, ie = pGrid->ie; int j, js = pGrid->js, je = pGrid->je; int k, ks = pGrid->ks, ke = pGrid->ke; int nx1, nx2; int dir; Real angle; /* Angle the wave direction makes with the x1-direction */ Real x1size,x2size,x1,x2,x3,cs,sn; Real v_par, v_perp, den, pres; Real lambda; /* Wavelength */ #ifdef RESISTIVITY Real v_A, kva, omega_h, omega_l, omega_r; #endif nx1 = (ie - is + 1) + 2*nghost; nx2 = (je - js + 1) + 2*nghost; if (pGrid->Nx[1] == 1) { ath_error("[problem] Grid must be 2D"); } if ((Soln = (ConsS**)calloc_2d_array(nx2,nx1,sizeof(ConsS))) == NULL) ath_error("[problem]: Error allocating memory for Soln\n"); if (pDomain->Level == 0){ if ((RootSoln =(ConsS**)calloc_2d_array(nx2,nx1,sizeof(ConsS)))==NULL) ath_error("[problem]: Error allocating memory for RootSoln\n"); } /* An angle = 0.0 is a wave aligned with the x1-direction. */ /* An angle = 90.0 is a wave aligned with the x2-direction. */ angle = par_getd("problem","angle"); x1size = pDomain->RootMaxX[0] - pDomain->RootMinX[0]; x2size = pDomain->RootMaxX[1] - pDomain->RootMinX[1]; /* Compute the sin and cos of the angle and the wavelength. */ /* Put one wavelength in the grid */ if (angle == 0.0) { sin_a = 0.0; cos_a = 1.0; lambda = x1size; } else if (angle == 90.0) { sin_a = 1.0; cos_a = 0.0; lambda = x2size; } else { /* We put 1 wavelength in each direction. Hence the wavelength * lambda = (pDomain->RootMaxX[0] - pDomain->RootMinX[0])*cos_a * AND lambda = (pDomain->RootMaxX[1] - pDomain->RootMinX[1])*sin_a; * are both satisfied. */ if(x1size == x2size){ cos_a = sin_a = sqrt(0.5); } else{ angle = atan((double)(x1size/x2size)); sin_a = sin(angle); cos_a = cos(angle); } /* Use the larger angle to determine the wavelength */ if (cos_a >= sin_a) { lambda = x1size*cos_a; } else { lambda = x2size*sin_a; } } /* Initialize k_parallel */ k_par = 2.0*PI/lambda; b_par = par_getd("problem","b_par"); den = 1.0; ath_pout(0,"va_parallel = %g\nlambda = %g\n",b_par/sqrt(den),lambda); b_perp = par_getd("problem","b_perp"); v_perp = b_perp/sqrt((double)den); dir = par_geti_def("problem","dir",1); /* right(1)/left(2) polarization */ if (dir == 1) /* right polarization */ fac = 1.0; else /* left polarization */ fac = -1.0; #ifdef RESISTIVITY Q_Hall = par_getd("problem","Q_H"); d_ind = 0.0; v_A = b_par/sqrt((double)den); if (Q_Hall > 0.0) { kva = k_par*v_A; omega_h = 1.0/Q_Hall; omega_r = 0.5*SQR(kva)/omega_h*(sqrt(1.0+SQR(2.0*omega_h/kva)) + 1.0); omega_l = 0.5*SQR(kva)/omega_h*(sqrt(1.0+SQR(2.0*omega_h/kva)) - 1.0); if (dir == 1) /* right polarization (whistler wave) */ v_perp = v_perp * kva / omega_r; else /* left polarization */ v_perp = v_perp * kva / omega_l; } #endif /* The gas pressure and parallel velocity are free parameters. */ pres = par_getd("problem","pres"); v_par = par_getd("problem","v_par"); /* Use the vector potential to initialize the interface magnetic fields * The iterface fields are located at the left grid cell face normal */ for (k=ks; k<=ke; k++) { for (j=js; j<=je+1; j++) { for (i=is; i<=ie+1; i++) { cc_pos(pGrid,i,j,k,&x1,&x2,&x3); cs = cos(k_par*(x1*cos_a + x2*sin_a)); x1 -= 0.5*pGrid->dx1; x2 -= 0.5*pGrid->dx2; pGrid->B1i[k][j][i] = -(A3(x1,(x2+pGrid->dx2)) - A3(x1,x2))/pGrid->dx2; pGrid->B2i[k][j][i] = (A3((x1+pGrid->dx1),x2) - A3(x1,x2))/pGrid->dx1; pGrid->B3i[k][j][i] = b_perp*cs; } } } if (pGrid->Nx[2] > 1) { for (j=js; j<=je+1; j++) { for (i=is; i<=ie+1; i++) { cc_pos(pGrid,i,j,k,&x1,&x2,&x3); cs = cos(k_par*(x1*cos_a + x2*sin_a)); pGrid->B3i[ke+1][j][i] = b_perp*cs; } } } /* Now initialize the cell centered quantities */ for (k=ks; k<=ke; k++) { for (j=js; j<=je; j++) { for (i=is; i<=ie; i++) { cc_pos(pGrid,i,j,k,&x1,&x2,&x3); sn = sin(k_par*(x1*cos_a + x2*sin_a)) * fac; cs = cos(k_par*(x1*cos_a + x2*sin_a)); Soln[j][i].d = den; Soln[j][i].M1 = den*(v_par*cos_a + v_perp*sn*sin_a); Soln[j][i].M2 = den*(v_par*sin_a - v_perp*sn*cos_a); Soln[j][i].M3 = -den*v_perp*cs; pGrid->U[k][j][i].d = Soln[j][i].d; pGrid->U[k][j][i].M1 = Soln[j][i].M1; pGrid->U[k][j][i].M2 = Soln[j][i].M2; pGrid->U[k][j][i].M3 = Soln[j][i].M3; Soln[j][i].B1c = 0.5*(pGrid->B1i[k][j][i] + pGrid->B1i[k][j][i+1]); Soln[j][i].B2c = 0.5*(pGrid->B2i[k][j][i] + pGrid->B2i[k][j+1][i]); Soln[j][i].B3c = b_perp*cs; pGrid->U[k][j][i].B1c = Soln[j][i].B1c; pGrid->U[k][j][i].B2c = Soln[j][i].B2c; pGrid->U[k][j][i].B3c = Soln[j][i].B3c; #ifndef ISOTHERMAL Soln[j][i].E = pres/Gamma_1 + 0.5*(SQR(pGrid->U[k][j][i].B1c) + SQR(pGrid->U[k][j][i].B2c) + SQR(pGrid->U[k][j][i].B3c) ) + 0.5*(SQR(pGrid->U[k][j][i].M1) + SQR(pGrid->U[k][j][i].M2) + SQR(pGrid->U[k][j][i].M3) )/den; pGrid->U[k][j][i].E = Soln[j][i].E; #endif } } } /* save solution on root grid */ if (pDomain->Level == 0) { for (j=js; j<=je; j++) { for (i=is; i<=ie; i++) { RootSoln[j][i].d = Soln[j][i].d ; RootSoln[j][i].M1 = Soln[j][i].M1; RootSoln[j][i].M2 = Soln[j][i].M2; RootSoln[j][i].M3 = Soln[j][i].M3; #ifndef ISOTHERMAL RootSoln[j][i].E = Soln[j][i].E ; #endif /* ISOTHERMAL */ #ifdef MHD RootSoln[j][i].B1c = Soln[j][i].B1c; RootSoln[j][i].B2c = Soln[j][i].B2c; RootSoln[j][i].B3c = Soln[j][i].B3c; #endif #if (NSCALARS > 0) for (n=0; n<NSCALARS; n++) RootSoln[j][i].s[n] = Soln[j][i].s[n]; #endif }} } return; }
void init_output(MeshS *pM) { int i,j,outn,maxout; char block[80], *fmt, defid[10]; OutputS new_out; int usr_expr_flag; maxout = par_geti_def("job","maxout",MAXOUT_DEFAULT); /* allocate output array */ if((OutArray = (OutputS *)malloc(maxout*sizeof(OutputS))) == NULL){ ath_error("[init_output]: Error allocating output array\n"); } /*--- loop over maxout output blocks, reading parameters into a temporary -----* *--- OutputS called new_out --------------------------------------------------*/ for (outn=1; outn<=maxout; outn++) { sprintf(block,"output%d",outn); /* An output format or output name is required. * If neither is present we write an error message and move on. */ if((par_exist(block,"out_fmt") == 0) && (par_exist(block,"name") == 0)){ ath_perr(-1,"[init_output]: neither %s/out_fmt, nor %s/name exist\n", block, block); continue; } /* Zero (NULL) all members of the temporary OutputS structure "new_out" */ memset(&new_out,0,sizeof(OutputS)); /* The next output time and number */ new_out.t = par_getd_def(block,"time",pM->time); new_out.num = par_geti_def(block,"num",0); new_out.dt = par_getd(block,"dt"); new_out.n = outn; /* level and domain number can be specified with SMR */ new_out.nlevel = par_geti_def(block,"level",-1); new_out.ndomain = par_geti_def(block,"domain",-1); if (par_exist(block,"dat_fmt")) new_out.dat_fmt = par_gets(block,"dat_fmt"); /* set id in output filename to input string if present, otherwise use "outN" * as default, where N is output number */ sprintf(defid,"out%d",outn); new_out.id = par_gets_def(block,"id",defid); if(par_exist(block,"out_fmt")) fmt = new_out.out_fmt = par_gets(block,"out_fmt"); /* out: controls what variable can be output (all, prim, or any of expr_*) * out_fmt: controls format of output (single variable) or dump (all cons/prim) * if "out" doesn't exist, we assume 'cons' variables are meant to be dumped */ new_out.out = par_gets_def(block,"out","cons"); #ifdef PARTICLES /* check input for particle binning (=1, default) or not (=0) */ new_out.out_pargrid = par_geti_def(block,"pargrid", check_particle_binning(new_out.out)); if ((new_out.out_pargrid < 0) || (new_out.out_pargrid >1)) { ath_perr(-1,"[init_output]: %s/pargrid must be 0 or 1\n", block); continue; } /* set particle property selection function. By default, will select all the * particles. Used only when particle output is called, otherwise useless. */ if(par_exist(block,"par_prop")) { new_out.par_prop = get_usr_par_prop(par_gets(block,"par_prop")); if (new_out.par_prop == NULL) { ath_pout(0,"[init_output]: Particle selection function not found! \ Now use the default one.\n"); new_out.par_prop = property_all; } }
void problem(DomainS *pDomain) { GridS *pG = pDomain->Grid; int is = pG->is, ie = pG->ie; int js = pG->js, je = pG->je; int ks = pG->ks, ke = pG->ke; int ixs,jxs,kxs,i,j,k; long int iseed = -1; /* Initialize on the first call to ran2 */ Real x1,x2,x3,xmin,xmax,Lx,Ly,Lz; Real rd, rp, rvx, rvy, rvz, rbx, rby, rbz; Real beta,B0,P0,kx,ky,kz,amp,press; Real Q,nJ,cs,cs2; Real time0,kxt; #ifdef SELF_GRAVITY Real Gcons; #endif int nwx,nwy,nwz; /* input number of waves per Lx,Ly,Lz [default=1] */ double rval; if(pG->Nx[2] == 1) ShBoxCoord = xy; /* 2D xy-plane */ /* Read problem parameters. */ Omega_0 = par_getd("problem","omega"); qshear = par_getd("problem","qshear"); amp = par_getd("problem","amp"); /* Read parameters for magnetic field */ beta = par_getd("problem","beta"); /* Read parameters for self gravity */ Q=par_getd("problem","Q"); nJ= par_getd("problem","nJ"); time0=par_getd_def("problem","time0",0.0); cs=sqrt(4.0-2.0*qshear)/PI/nJ/Q; cs2=SQR(cs); #ifdef SELF_GRAVITY Gcons = nJ*cs2; grav_mean_rho = 1.0; #ifndef SELF_GRAVITY_USING_FFT_DISK if(pG->Nx[2] >1) grav_mean_rho = 1.0; #endif /* Set gravity constant*/ four_pi_G = 4.0*PI*Gcons; #endif /* SELF_GRAVITY */ B0 = cs/sqrt(beta); #ifndef BAROTROPIC P0 = cs2/Gamma; #endif /* Ensure a different initial random seed for each process in an MPI calc. */ ixs = pG->Disp[0]; jxs = pG->Disp[1]; kxs = pG->Disp[2]; iseed = -1 - (ixs + pDomain->Nx[0]*(jxs + pDomain->Nx[1]*kxs)); Lx = pDomain->RootMaxX[0] - pDomain->RootMinX[0]; /* initialize wavenumbers, given input number of waves per L */ nwx = par_geti_def("problem","nwx",-6); nwy = par_geti_def("problem","nwy",1); ky = nwy*2.0*PI; kx = nwx*2.0*PI; kxt = kx+qshear*Omega_0*ky*time0; pG->time=time0; for (k=ks; k<=ke; k++) { for (j=js; j<=je; j++) { for (i=is; i<=ie; i++) { cc_pos(pG,i,j,k,&x1,&x2,&x3); if (((i-pG->Disp[0]) == 58) && ((j-pG->Disp[1]) == 16)) printf("i=%d j=%d k=%d x1=%e x2=%e\n",i,j,k,x1,x2); rd = 1.0+amp*cos(kxt*x1+ky*x2); rvx = amp*kx/ky*sin(kxt*x1+ky*x2); rvy = amp*sin(kxt*x1+ky*x2); rvz = 0.0; rp = cs2*(rd-1.0); rbx = amp*nwy*cos(kxt*(x1-0.5*pG->dx1)+ky*x2); rby = -amp*nwx*cos(kxt*x1+ky*(x2-0.5*pG->dx2)); rbz = 0.0; pG->U[k][j][i].d = rd; pG->U[k][j][i].M1 = rd*rvx; pG->U[k][j][i].M2 = rd*rvy; #ifndef FARGO pG->U[k][j][i].M2 -= rd*(qshear*Omega_0*x1); #endif pG->U[k][j][i].M3 = rd*rvz; #ifdef ADIABATIC pG->U[k][j][i].E = (P0+rp)/Gamma_1 + 0.5*(SQR(pG->U[k][j][i].M1) + SQR(pG->U[k][j][i].M2) + SQR(pG->U[k][j][i].M3))/rd; #endif #ifdef MHD pG->B1i[k][j][i] = rbx; pG->B2i[k][j][i] = B0+rby; pG->B3i[k][j][i] = 0.0; if (i==ie) cc_pos(pG,ie+1,j,k,&x1,&x2,&x3); rbx = amp*nwy*cos(kx*(x1-0.5*pG->dx1)+ky*x2); if (j==je) cc_pos(pG,i,je+1,k,&x1,&x2,&x3); rby = -amp*nwx*cos(kx*x1+ky*(x2-0.5*pG->dx2)); if (i==ie) pG->B1i[k][j][ie+1] = rbx; if (j==je) pG->B2i[k][je+1][i] = B0+rby; if (pG->Nx[2] > 1 && k==ke) pG->B3i[ke+1][j][i] = 0.0; #endif /* MHD */ } } } #ifdef MHD for (k=ks; k<=ke; k++) { for (j=js; j<=je; j++) { for (i=is; i<=ie; i++) { pG->U[k][j][i].B1c = 0.5*(pG->B1i[k][j][i]+pG->B1i[k][j][i+1]); pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]); if (pG->Nx[2] >1) pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]); else pG->U[k][j][i].B3c =pG->B3i[k][j][i]; #ifdef ADIABATIC pG->U[k][j][i].E += 0.5*(SQR(pG->U[k][j][i].B1c) + SQR(pG->U[k][j][i].B2c) + SQR(pG->U[k][j][i].B3c)); #endif } } } #endif /* MHD */ /* enroll gravitational potential function */ ShearingBoxPot = UnstratifiedDisk; /* enroll new history variables, only once with SMR */ dVol = pDomain->Nx[0]*pDomain->Nx[1]*pDomain->Nx[2]; /* history dump for linear perturbation amplitude. See Kim & Ostriker 2001 */ dump_history_enroll(hst_sigma, "<sigma>"); dump_history_enroll(hst_ux, "<ux>"); dump_history_enroll(hst_uy, "<uy>"); #ifdef MHD dump_history_enroll(hst_m1, "<m1>"); dump_history_enroll(hst_m2, "<m2>"); #endif /* history dump for peturbed quantities at a specific grid point */ dump_history_enroll(hst_dSigma, "<dSigma>"); dump_history_enroll(hst_Vx, "<Vx>"); dump_history_enroll(hst_dVy, "<dVy>"); #ifdef MHD dump_history_enroll(hst_Bx, "<Bx>"); dump_history_enroll(hst_dBy, "<dBy>"); #endif /* MHD */ #ifdef SELF_GRAVITY dump_history_enroll(hst_Phi, "<Phi>"); dump_history_enroll(hst_dPhi, "<dPhi>"); #endif #ifdef ADIABATIC dump_history_enroll(hst_dE, "<dE>"); #endif printf("=== end of problem setting ===\n"); return; }
void problem(DomainS *pDomain) { GridS *pGrid = pDomain->Grid; int i,j,k,ks,pt,tsmode; long p,q; Real ScaleHg,tsmin,tsmax,tscrit,amin,amax,Hparmin,Hparmax; Real *ep,*ScaleHpar,epsum,mratio,pwind,rhoaconv,etavk; Real *epsilon,*uxNSH,*uyNSH,**wxNSH,**wyNSH; Real rhog,h,x1,x2,x3,t,x1p,x2p,x3p,zmin,zmax,dx3_1,b; long int iseed = myID_Comm_world; /* Initialize on the first call to ran2 */ if (pDomain->Nx[2] == 1) { ath_error("[par_strat3d]: par_strat3d only works for 3D problem.\n"); } #ifdef MPI_PARALLEL if (pDomain->NGrid[2] > 2) { ath_error( "[par_strat3d]: The z-domain can not be decomposed into more than 2 grids\n"); } #endif /* Initialize boxsize */ x1min = pGrid->MinX[0]; x1max = pGrid->MaxX[0]; Lx = x1max - x1min; x2min = pGrid->MinX[1]; x2max = pGrid->MaxX[1]; Ly = x2max - x2min; x3min = par_getd("domain1","x3min"); x3max = par_getd("domain1","x3max"); Lz = x3max - x3min; Lg = nghost*pGrid->dx3; /* size of the ghost zone */ ks = pGrid->ks; /* Read initial conditions */ Omega_0 = par_getd("problem","omega"); qshear = par_getd_def("problem","qshear",1.5); ipert = par_geti_def("problem","ipert",1); vsc1 = par_getd_def("problem","vsc1",0.05); /* in unit of iso_sound (N.B.!) */ vsc2 = par_getd_def("problem","vsc2",0.0); vsc1 = vsc1 * Iso_csound; vsc2 = vsc2 * Iso_csound; ScaleHg = Iso_csound/Omega_0; /* particle number */ Npar = (long)(par_geti("particle","parnumgrid")); pGrid->nparticle = Npar*npartypes; for (i=0; i<npartypes; i++) grproperty[i].num = Npar; if (pGrid->nparticle+2 > pGrid->arrsize) particle_realloc(pGrid, pGrid->nparticle+2); ep = (Real*)calloc_1d_array(npartypes, sizeof(Real)); ScaleHpar = (Real*)calloc_1d_array(npartypes, sizeof(Real)); epsilon = (Real*)calloc_1d_array(npartypes, sizeof(Real)); wxNSH = (Real**)calloc_2d_array(pGrid->Nx[2]+1, npartypes,sizeof(Real)); wyNSH = (Real**)calloc_2d_array(pGrid->Nx[2]+1, npartypes,sizeof(Real)); uxNSH = (Real*)calloc_1d_array(pGrid->Nx[2]+1, sizeof(Real)); uyNSH = (Real*)calloc_1d_array(pGrid->Nx[2]+1, sizeof(Real)); /* particle stopping time */ tsmode = par_geti("particle","tsmode"); if (tsmode == 3) {/* fixed stopping time */ tsmin = par_getd("problem","tsmin"); /* in code unit */ tsmax = par_getd("problem","tsmax"); tscrit= par_getd("problem","tscrit"); for (i=0; i<npartypes; i++) { tstop0[i] = tsmin*exp(i*log(tsmax/tsmin)/MAX(npartypes-1,1.0)); grproperty[i].rad = tstop0[i]; /* use fully implicit integrator for well coupled particles */ if (tstop0[i] < tscrit) grproperty[i].integrator = 3; } } else { amin = par_getd("problem","amin"); amax = par_getd("problem","amax"); for (i=0; i<npartypes; i++) grproperty[i].rad = amin*exp(i*log(amax/amin)/MAX(npartypes-1,1.0)); if (tsmode <= 2) {/* Epstein/General regime */ /* conversion factor for rhoa */ rhoaconv = par_getd_def("problem","rhoaconv",1.0); for (i=0; i<npartypes; i++) grrhoa[i]=grproperty[i].rad*rhoaconv; } if (tsmode == 1) /* General drag formula */ alamcoeff = par_getd("problem","alamcoeff"); } /* particle scale height */ Hparmax = par_getd("problem","hparmax"); /* in unit of gas scale height */ Hparmin = par_getd("problem","hparmin"); for (i=0; i<npartypes; i++) ScaleHpar[i] = Hparmax* exp(-i*log(Hparmax/Hparmin)/MAX(npartypes-1,1.0)); #ifdef FEEDBACK mratio = par_getd_def("problem","mratio",0.0); /* total mass fraction */ pwind = par_getd_def("problem","pwind",0.0); /* power law index */ if (mratio < 0.0) ath_error("[par_strat2d]: mratio must be positive!\n"); epsum = 0.0; for (i=0; i<npartypes; i++) { ep[i] = pow(grproperty[i].rad,pwind); epsum += ep[i]; } for (i=0; i<npartypes; i++) { ep[i] = mratio*ep[i]/epsum; grproperty[i].m = sqrt(2.0*PI)*ScaleHg/Lz*ep[i]* pGrid->Nx[0]*pGrid->Nx[1]*pGrid->Nx[2]/Npar; } #else mratio = 0.0; for (i=0; i<npartypes; i++) ep[i] = 0.0; #endif /* NSH equilibrium */ for (k=pGrid->ks; k<=pGrid->ke+1; k++) { h = pGrid->MinX[2] + (k-pGrid->ks)*pGrid->dx3; q = k - ks; etavk = fabs(vsc1+vsc2*SQR(h)); for (i=0; i<npartypes; i++) { epsilon[i] = ep[i]/ScaleHpar[i]*exp(-0.5*SQR(h/ScaleHg) *(SQR(1.0/ScaleHpar[i])-1.0))/erf(Lz/(sqrt(8.0)*ScaleHpar[i]*ScaleHg)); if (tsmode != 3) tstop0[i] = get_ts(pGrid,i,exp(-0.5*SQR(h/ScaleHg)),Iso_csound,etavk); } MultiNSH(npartypes, tstop0, epsilon, etavk, &uxNSH[q], &uyNSH[q], wxNSH[q], wyNSH[q]); } /* Now set initial conditions for the gas */ for (k=pGrid->ks; k<=pGrid->ke; k++) { for (j=pGrid->js; j<=pGrid->je; j++) { for (i=pGrid->is; i<=pGrid->ie; i++) { cc_pos(pGrid,i,j,k,&x1,&x2,&x3); rhog = exp(-0.5*SQR(x3/ScaleHg)); pGrid->U[k][j][i].d = rhog; if (ipert != 1) {/* NSH velocity */ pGrid->U[k][j][i].M1 = 0.5*rhog*(uxNSH[k-ks]+uxNSH[k-ks+1]); pGrid->U[k][j][i].M2 = 0.5*rhog*(uyNSH[k-ks]+uyNSH[k-ks+1]); } else { pGrid->U[k][j][i].M1 = 0.0; pGrid->U[k][j][i].M2 = 0.0; } pGrid->U[k][j][i].M3 = 0.0; #ifndef FARGO pGrid->U[k][j][i].M2 -= qshear*rhog*Omega_0*x1; #endif }}} /* Now set initial conditions for the particles */ p = 0; dx3_1 = 1.0/pGrid->dx3; zmin = pGrid->MinX[2]; zmax = pGrid->MaxX[2]; for (q=0; q<Npar; q++) { for (pt=0; pt<npartypes; pt++) { x1p = x1min + Lx*ran2(&iseed); x2p = x2min + Ly*ran2(&iseed); x3p = ScaleHpar[pt]*ScaleHg*Normal(&iseed); while ((x3p >= zmax) || (x3p < zmin)) x3p = ScaleHpar[pt]*ScaleHg*Normal(&iseed); pGrid->particle[p].property = pt; pGrid->particle[p].x1 = x1p; pGrid->particle[p].x2 = x2p; pGrid->particle[p].x3 = x3p; if (ipert != 1) {/* NSH velocity */ cellk(pGrid, x3p, dx3_1, &k, &b); k = k-pGrid->ks; b = b - pGrid->ks; pGrid->particle[p].v1 = (k+1-b)*wxNSH[k][pt]+(b-k)*wxNSH[k+1][pt]; pGrid->particle[p].v2 = (k+1-b)*wyNSH[k][pt]+(b-k)*wyNSH[k+1][pt]; } else { pGrid->particle[p].v1 = 0.0; pGrid->particle[p].v2 = vsc1+vsc2*SQR(x2p); } pGrid->particle[p].v3 = 0.0; #ifndef FARGO pGrid->particle[p].v2 -= qshear*Omega_0*x1p; #endif pGrid->particle[p].pos = 1; /* grid particle */ pGrid->particle[p].my_id = p; #ifdef MPI_PARALLEL pGrid->particle[p].init_id = myID_Comm_world; #endif p++; }} /* enroll gravitational potential function, shearing sheet BC functions */ ShearingBoxPot = StratifiedDisk; dump_history_enroll(hst_rho_Vx_dVy, "<rho Vx dVy>"); /* set the # of the particles in list output * (by default, output 1 particle per cell) */ nlis = par_geti_def("problem","nlis",pGrid->Nx[0]*pGrid->Nx[1]*pGrid->Nx[2]); /* set the number of particles to keep track of */ ntrack = par_geti_def("problem","ntrack",2000); /* set the threshold particle density */ dpar_thresh = par_geti_def("problem","dpar_thresh",10.0); /* finalize */ free(ep); free(ScaleHpar); free(epsilon); free_2d_array(wxNSH); free_2d_array(wyNSH); free(uxNSH); free(uyNSH); return; }