Пример #1
0
/*! \fn void ludcmp(Real **a, int n, int *indx, Real *d)
 *  \brief LU decomposition from Numerical Recipes
 *
 * Using Crout's method with partial pivoting
 * a is the input matrix, and is returned with LU decomposition readily made,
 * n is the matrix size, indx records the history of row permutation,
 * whereas d =1(-1) for even(odd) number of permutations.
 */
void ludcmp(Real **a, int n, int *indx, Real *d)
{
  int i,imax,j,k;
  Real big,dum,sum,temp;
  Real *rowscale;  /* the implicit scaling of each row */

  rowscale = (Real*)calloc_1d_array(n, sizeof(Real));
  *d=1.0;  /* No row interchanges yet */

  for (i=0;i<n;i++)
  { /* Loop over rows to get the implicit scaling information */
    big=0.0;
    for (j=0;j<n;j++)
      if ((temp=fabs(a[i][j])) > big) big=temp;
    if (big == 0.0) ath_error("[LUdecomp]:Input matrix is singular!");
    rowscale[i]=1.0/big;  /* Save the scaling */
  }

  for (j=0;j<n;j++) { /* Loop over columns of Crout's method */
    /* Calculate the upper block */
    for (i=0;i<j;i++) {
      sum=a[i][j];
      for (k=0;k<i;k++) sum -= a[i][k]*a[k][j];
      a[i][j]=sum;
    }
    /* Calculate the lower block (first step) */
    big=0.0;
    for (i=j;i<n;i++) {
      sum=a[i][j];
      for (k=0;k<j;k++)
        sum -= a[i][k]*a[k][j];
      a[i][j]=sum;
      /* search for the largest pivot element */
      if ( (dum=rowscale[i]*fabs(sum)) >= big) {
        big=dum;
        imax=i;
      }
    }
    /* row interchange */
    if (j != imax) {
      for (k=0;k<n;k++) {
        dum=a[imax][k];
        a[imax][k]=a[j][k];
        a[j][k]=dum;
      }
      *d = -(*d);
      rowscale[imax]=rowscale[j];
    }
    indx[j]=imax; /* record row interchange history */
    /* Calculate the lower block (second step) */
    if (a[j][j] == 0.0) a[j][j]=TINY_NUMBER;
    dum=1.0/(a[j][j]);
    for (i=j+1;i<n;i++) a[i][j] *= dum;
  }
  free(rowscale);
}
Пример #2
0
/*! \fn void InverseMatrix(Real **a, int n, Real **b)
 *  \brief Inverse matrix solver
 *
 * a: input matrix; n: matrix size, b: return matrix
 * Note: the input matrix will be DESTROYED
 */
void InverseMatrix(Real **a, int n, Real **b)
{
  int i,j,*indx;
  Real *col,d;

  indx = (int*)calloc_1d_array(n, sizeof(int));
  col = (Real*)calloc_1d_array(n, sizeof(Real));

  ludcmp(a,n,indx,&d);

  for (j=0; j<n; j++) {
    for (i=0; i<n; i++) col[i]=0.0;
    col[j]=1.0;
    lubksb(a, n, indx, col);
    for (i=0; i<n; i++)    b[i][j] = col[i];
  }

  return;
}
Пример #3
0
/*! \fn void MultiNSH(int n, Real *tstop, Real *mratio, Real etavk,
 *                   Real *uxNSH, Real *uyNSH, Real *wxNSH, Real *wyNSH)
 *  \brief Multi-species NSH equilibrium 
 *
 * Input: # of particle types (n), dust stopping time and mass ratio array, and 
 *        drift speed etavk.
 * Output: gas NSH equlibrium velocity (u), and dust NSH equilibrium velocity
 *         array (w).
 */
void MultiNSH(int n, Real *tstop, Real *mratio, Real etavk,
                     Real *uxNSH, Real *uyNSH, Real *wxNSH, Real *wyNSH)
{
  int i,j;
  Real *Lambda1,**Lam1GamP1, **A, **B, **Tmp;

  Lambda1 = (Real*)calloc_1d_array(n, sizeof(Real));     /* Lambda^{-1} */
  Lam1GamP1=(Real**)calloc_2d_array(n, n, sizeof(Real)); /* Lambda1*(1+Gamma) */
  A       = (Real**)calloc_2d_array(n, n, sizeof(Real));
  B       = (Real**)calloc_2d_array(n, n, sizeof(Real));
  Tmp     = (Real**)calloc_2d_array(n, n, sizeof(Real));

  /* definitions */
  for (i=0; i<n; i++){
    for (j=0; j<n; j++)
      Lam1GamP1[i][j] = mratio[j];
    Lam1GamP1[i][i] += 1.0;
    Lambda1[i] = 1.0/(tstop[i]+1.0e-16);
    for (j=0; j<n; j++)
      Lam1GamP1[i][j] *= Lambda1[i];
  }

  /* Calculate A and B */
  MatrixMult(Lam1GamP1, Lam1GamP1, n,n,n, Tmp);
  for (i=0; i<n; i++) Tmp[i][i] += 1.0;
  InverseMatrix(Tmp, n, B);
  for (i=0; i<n; i++)
  for (j=0; j<n; j++)
    B[i][j] *= Lambda1[j];
  MatrixMult(Lam1GamP1, B, n,n,n, A);

  /* Obtain NSH velocities */
  *uxNSH = 0.0;  *uyNSH = 0.0;
  for (i=0; i<n; i++){
    wxNSH[i] = 0.0;
    wyNSH[i] = 0.0;
    for (j=0; j<n; j++){
      wxNSH[i] -= B[i][j];
      wyNSH[i] -= A[i][j];
    }
    wxNSH[i] *= 2.0*etavk;
    wyNSH[i] *= etavk;
    *uxNSH -= mratio[i]*wxNSH[i];
    *uyNSH -= mratio[i]*wyNSH[i];
    wyNSH[i] += etavk;
  }

  free(Lambda1);
  free_2d_array(A);         free_2d_array(B);
  free_2d_array(Lam1GamP1); free_2d_array(Tmp);

  return;
}
Пример #4
0
/* problem:  */
void problem(DomainS *pDomain)
{
  GridS *pG = pDomain->Grid;
  int i,j,k,n,converged;
  int is,ie,il,iu,js,je,jl,ju,ks,ke,kl,ku;
  int nx1, nx2, nx3;
  Real x1, x2, x3;
  Real a,b,c,d,xmin,xmax,ymin,ymax;
  Real x,y,xslow,yslow,xfast,yfast;
  Real R0,R1,R2,rho,Mdot,K,Omega,Pgas,beta,vR,BR,vphi,Bphi;
  ConsS *Wind=NULL;
  Real *pU=NULL,*pUl=NULL,*pUr=NULL;
  Real lsf,rsf;

  is = pG->is;  ie = pG->ie;  nx1 = ie-is+1;
  js = pG->js;  je = pG->je;  nx2 = je-js+1;
  ks = pG->ks;  ke = pG->ke;  nx3 = ke-ks+1;

  il = is-nghost*(nx1>1);  iu = ie+nghost*(nx1>1);  nx1 = iu-il+1;
  jl = js-nghost*(nx2>1);  ju = je+nghost*(nx2>1);  nx2 = ju-jl+1;
  kl = ks-nghost*(nx3>1);  ku = ke+nghost*(nx3>1);  nx3 = ku-kl+1;

#ifndef CYLINDRICAL
  ath_error("[cylwindrotb]: This problem only works in cylindrical!\n");
#endif
#ifndef MHD
  ath_error("[cylwindrotb]: This problem only works in MHD!\n");
#endif

  if (nx1==1) {
    ath_error("[cylwindrotb]: Only R can be used in 1D!\n");
  }
  else if (nx2==1 && nx3>1) {
    ath_error("[cylwindrotb]: Only (R,phi) can be used in 2D!\n");
  }

  /* Allocate memory for wind solution */
  if ((Wind = (ConsS*)calloc_1d_array(nx1+1,sizeof(ConsS))) == NULL)
    ath_error("[cylwindrotb]: Error allocating memory\n");

  /* Allocate memory for grid solution */
  if ((RootSoln = (ConsS***)calloc_3d_array(nx3,nx2,nx1,sizeof(ConsS))) == NULL)
    ath_error("[cylwindrotb]: Error allocating memory\n");

  theta = par_getd("problem","theta");
  omega = par_getd("problem","omega");
  vz    = par_getd("problem","vz");

  /* This numerical solution was obtained from MATLAB.
   * Ideally, we replace this with a nonlinear solver... */
  xslow = 0.5243264128;
  yslow = 2.4985859152;
  xfast = 1.6383327831;
  yfast = 0.5373957134;
  E     = 7.8744739104;
  eta   = 2.3608500383;

  xmin = par_getd("domain1","x1min")/R_A;
  xmax = par_getd("domain1","x1max")/R_A;
  ymin = 0.45/rho_A;
  ymax = 2.6/rho_A;

  printf("theta = %f,\t omega = %f,\t eta = %f,\t E = %f\n", theta,omega,eta,E);
  printf("xslow = %f,\t yslow = %f,\t xfast = %f,\t yfast = %f\n", xslow,yslow,xfast,yfast);
  printf("xmin = %f,\t ymin = %f,\t xmax = %f,\t ymax = %f\n", xmin,ymin,xmax,ymax);


  /* Calculate the 1D wind solution at cell-interfaces */
  for (i=il; i<=iu+1; i++) {
    memset(&(Wind[i]),0.0,sizeof(ConsS));
    cc_pos(pG,i,js,ks,&x1,&x2,&x3);

    /* Want the solution at R-interfaces */
    R0 = x1 - 0.5*pG->dx1;
    x = R0/R_A;

    /* Look for a sign change interval */
    if (x < xslow) {
      sign_change(myfunc,yslow,10.0*ymax,x,&a,&b);
      sign_change(myfunc,b,10.0*ymax,x,&a,&b);
    } else if (x < 1.0) {
      sign_change(myfunc,1.0+TINY_NUMBER,yslow,x,&a,&b);
    } else if (x < xfast) {
      sign_change(myfunc,yfast,1.0-TINY_NUMBER,x,&a,&b);
      if (!sign_change(myfunc,b,1.0-TINY_NUMBER,x,&a,&b)) {
        a = yfast;
        b = 1.0-TINY_NUMBER;
      }
    } else {
      sign_change(myfunc,0.5*ymin,yfast,x,&a,&b);
    }

    /* Use bisection to find the root */
    converged = bisection(myfunc,a,b,x,&y);
    if(!converged) {
      ath_error("[cylwindrotb]:  Bisection did not converge!\n");
    }

    /* Construct the solution */
    rho = rho_A*y;
    Mdot = sqrt(R_A*SQR(rho_A)*GM*eta);
    Omega = sqrt((GM*omega)/pow(R_A,3));
    K = (GM*theta)/(Gamma*pow(rho_A,Gamma_1)*R_A);
    Pgas = K*pow(rho,Gamma);
    vR = Mdot/(R0*rho);
    beta = sqrt(1.0/rho_A);
    BR = beta*rho*vR;
    vphi = R0*Omega*(1.0/SQR(x)-y)/(1.0-y);
    Bphi = beta*rho*(vphi-R0*Omega);

    Wind[i].d   = rho;
    Wind[i].M1  = rho*vR;
    Wind[i].M2  = rho*vphi;
    Wind[i].M3  = rho*vz;
    Wind[i].B1c = BR;
    Wind[i].B2c = Bphi;
    Wind[i].B3c = 0.0;
    Wind[i].E   = Pgas/Gamma_1
      + 0.5*(SQR(Wind[i].B1c) + SQR(Wind[i].B2c) + SQR(Wind[i].B3c))
      + 0.5*(SQR(Wind[i].M1 ) + SQR(Wind[i].M2 ) + SQR(Wind[i].M3 ))/Wind[i].d;
  }

  /* Average the wind solution across the zone for cc variables */
  for (i=il; i<=iu; i++) {
    memset(&(pG->U[ks][js][i]),0.0,sizeof(ConsS));
    cc_pos(pG,i,js,ks,&x1,&x2,&x3);
    lsf = (x1 - 0.5*pG->dx1)/x1;
    rsf = (x1 + 0.5*pG->dx1)/x1;

    pU  = (Real*)&(pG->U[ks][js][i]);
    pUl = (Real*)&(Wind[i]);
    pUr = (Real*)&(Wind[i+1]);
    for (n=0; n<NWAVE; n++) {
      pU[n] = 0.5*(lsf*pUl[n] + rsf*pUr[n]);
    }

    pG->B1i[ks][js][i]   = Wind[i].B1c;
    pG->B2i[ks][js][i]   = 0.5*(lsf*Wind[i].B2c + rsf*Wind[i+1].B2c);
    pG->B3i[ks][js][i]   = 0.5*(lsf*Wind[i].B3c + rsf*Wind[i+1].B3c);
  }

  /* Copy 1D solution across the grid and save */
  for (k=kl; k<=ku; k++) {
    for (j=jl; j<=ju; j++) {
      for (i=il; i<=iu; i++) {
        pG->U[k][j][i] = pG->U[ks][js][i];
        pG->B1i[k][j][i] = pG->B1i[ks][js][i];
        pG->B2i[k][j][i] = pG->B2i[ks][js][i];
        pG->B3i[k][j][i] = pG->B3i[ks][js][i];
        RootSoln[k][j][i]  = pG->U[ks][js][i];
      }
    }
  }

  StaticGravPot = grav_pot;
  bvals_mhd_fun(pDomain,left_x1,do_nothing_bc);
  bvals_mhd_fun(pDomain,right_x1,do_nothing_bc);

  free_1d_array((void *)Wind);

  return;
}
Пример #5
0
void ionrad_prolong_snd(GridS *pGrid, int dim, int level, int domnumber)
{
  MeshS *pMesh = pGrid->Mesh;
  int ncg, ierr;
  GridOvrlpS *pCO;
  int fixed, arrsize;
  int i, j, k;
  int indexarith;

#ifdef MPI_PARALLEL
  MPI_Request *send_rq;
  send_rq = (MPI_Request*) calloc_1d_array(pGrid->NCGrid,sizeof(MPI_Request)) ;
  int tag1, tag2, tag3;
  char temp[10];
#endif

/* Loop over children grids to fill their buffer arrays*/
/* If not using MPI, this is the only operation that is performed*/
/* If using MPI, data will be sent to child grid in next step*/
  for (ncg=0; ncg<(pGrid->NCGrid); ncg++){
    pCO=(GridOvrlpS*)&(pGrid->CGrid[ncg]);

    /* fprintf(stderr, "Actually filling buffer \n"); */
      /* fprintf(stderr, "CHILD # %d of %d I think my child's x- start is %d and end %d. So the edgeflux corresponds to those - %d \n", ncg+1, pGrid->NCGrid, pCO->ijks[0], pCO->ijke[0], nghost); */

    switch(dim) {

    /*For the +x/-x direction*/
    case 0: case 1: {
      if (fmod(dim,2) == 0) {
	fixed = pCO->ijks[0] - nghost;
      } else {
	fixed = pCO->ijke[0] + 1 - nghost;
      }

      if(pCO->ionFlx[dim] != NULL) {
	/* fprintf(stderr, MAKE_BLUE"I am %d (Level %d) and sending data to i: %d, j: %d - %d, k: %d -%d relative to my overlap index\n " RESET_COLOR, myID_Comm_world, level, fixed, pCO->ijks[1] - nghost, pCO->ijke[1] - nghost, pCO->ijks[2] - nghost, pCO->ijke[2] - nghost); */
	for (k=pCO->ijks[2] - nghost; k<= pCO->ijke[2]+1 - nghost; k++) {
	  for (j=pCO->ijks[1] - nghost; j<= pCO->ijke[1]+1 - nghost; j++) {
	    indexarith = (k-(pCO->ijks[2]-nghost))*(pCO->ijke[1] - pCO->ijks[1] + 2)+j-(pCO->ijks[1]-nghost);

	    /*Store data in the child grid overlap structure ionFlx (which is a 1D array for the purposes of MPI communication)*/
	    pCO->ionFlx[dim][indexarith] = pGrid->EdgeFlux[k][j][fixed];
	    /* if (pGrid->EdgeFlux[k][j][fixed] > 1.) */
	    /*   fprintf(stderr,"On level: %d setting ionflx[%d][%d] to [%d][%d][%d] %e \n", level, dim, indexarith,k-2, j-2, fixed-2, pGrid->EdgeFlux[k][j][fixed]); */
	  }
	}
	/*TO_DO: Will need to check indexing to see if it's +1 or +2*/
	arrsize = (pCO->ijke[2] + 2 - pCO->ijks[2]) * (pCO->ijke[1] + 2 - pCO->ijks[1]);
	/* fprintf(stderr, MAKE_RED "Sending array size %d with 2s: %d 2e: %d [SENT] \n" RESET_COLOR, arrsize, pCO->ijks[2], pCO->ijke[2]); */
      } else{
	/* fprintf(stderr, MAKE_BLUE"I am %d (Level %d) and didn't fill a buffer. For reference, I have i: %d, j: %d - %d, k: %d -%d relative to my overlap index\n " RESET_COLOR, myID_Comm_world, level, fixed, pCO->ijks[1] - nghost, pCO->ijke[1] - nghost, pCO->ijks[2] - nghost, pCO->ijke[2] - nghost); */
      }
      break;
    }

    /*For the +y/-y direction*/
    case 2: case 3: {
      if (fmod(dim,2) == 0) {
	fixed = pCO->ijks[1] - nghost;
      } else {
	fixed = pCO->ijke[1] + 1 - nghost;
      }
      
      if(pCO->ionFlx[dim] != NULL) {
	  for (k=pCO->ijks[2] - nghost; k<= pCO->ijke[2]+1 - nghost; k++) {
	    for (i=pCO->ijks[0] - nghost; i<= pCO->ijke[0]+1 - nghost; i++) {
	      pCO->ionFlx[dim][(k-(pCO->ijks[2]-nghost))*(pCO->ijke[0] - pCO->ijks[0] + 2)+i-(pCO->ijks[0]-nghost)] = pGrid->EdgeFlux[k][fixed][i];
	    }
	  }
	  arrsize = (pCO->ijke[2] + 2 - pCO->ijks[2]) * (pCO->ijke[0] + 2 - pCO->ijks[0]);
      }
      break;
    }

    /*For the +z/-z direction*/
    case 4: case 5: {
      if (fmod(dim,2) == 0) {
	fixed = pCO->ijks[2] - nghost;
      } else {
	fixed = pCO->ijke[2] + 1 - nghost;
      }
      if(pCO->ionFlx[dim] != NULL) {
	for (j=pCO->ijks[1] - nghost; j<= pCO->ijke[1]+1 - nghost; j++) {
	  for (i=pCO->ijks[0] - nghost; i<= pCO->ijke[0]+1 - nghost; i++) {
	    pCO->ionFlx[dim][(j-(pCO->ijks[1]-nghost))*(pCO->ijke[0] - pCO->ijks[0] + 2)+i-(pCO->ijks[0]-nghost)] = pGrid->EdgeFlux[fixed][j][i];
	  }
	}
	arrsize = (pCO->ijke[0] + 2 - pCO->ijks[0]) * (pCO->ijke[1] + 2 - pCO->ijks[1]);
      }
      break;
    }
    }
     
    /*AT 9/26/12: I think the NULL check only needs to be here and needs to be modified above.*/
    /* The point of such a check is to ensure that we're not trying to communicate, when there's an overlapping grid not in the direction of propagation*/
    /*Send buffer arrays of radiation flux to children grids*/

    if(pCO->ionFlx[dim] != NULL) {
#ifdef MPI_PARALLEL
      /* 	tag1 = atoi(temp); */
      /* 	tag2 = level*1000000 + myID_Comm_world * 10000 + (level+1) * 100 + pCO->ID; */
	/* fprintf(stderr, "sndconcat: %d, powers:%d \n", tag1, tag2); */

      tag3 = domnumber + 257;

      /*Send data to child grid*/
      ierr = MPI_Isend(pCO->ionFlx[dim], arrsize, MP_RL, pCO->ID, tag3, pMesh->Domain[level][domnumber].Comm_Children, &send_rq[ncg]);
      
      /* fprintf(stderr, "Sent data to child ID %d using tag %d. I'm on level %d \n", pCO->ID, tag3, level); */
      /* fprintf(stderr, "Left x: %d, right x:%d I sent my data for child %d of %d\n", pGrid->lx1_id, pGrid->rx1_id,ncg+1, pGrid->NCGrid);*/
#endif
    }
  }
}
Пример #6
0
void ionrad_prolong_rcv(GridS *pGrid, int dim, int level, int domnumber)
{

  MeshS *pMesh = pGrid->Mesh;
  int npg;
  int i, j, k, fixed, arrsize, indexarith;
  int is, js, ks;
  int err, ierr;
  GridOvrlpS *pPO, *pCO;

  DomainS *pDomain;
  pDomain = (DomainS*)&(pMesh->Domain[level][domnumber]);  /* ptr to Domain */

#ifdef MPI_PARALLEL
  int allrcv =0;
  int rcvd = 0;
  int receive_done;
  int *succtest;
  int tag1, tag2, tag3;
  char temp[10];
  MPI_Status stat;
  MPI_Request *rcv_rq;
  int *rcvdarrsize;
  int dy, dz, coarsek;

  rcv_rq = (MPI_Request*) calloc_1d_array(pGrid->NPGrid,sizeof(MPI_Request));
  succtest = (int*) calloc_1d_array(pGrid->NPGrid,sizeof(int)) ;
  rcvdarrsize = (int*) calloc_1d_array(pGrid->NPGrid,sizeof(int)) ;
#else
  GridS *parentgrid; /*Parent grid of current grid*/
#endif


  /* fprintf (stderr, "Level: %d Grid disp : %d \n", level, pGrid->Disp[0]); */
/*Find my parent grid overlap structure(s if MPI) to receive data from it*/
  for (npg=0; npg<(pGrid->NPGrid); npg++)
    {
      pPO=(GridOvrlpS*)&(pGrid->PGrid[npg]);

      /*#ifndef MPI_PARALLEL*/
      /*      pPO->ionFlx[dim] = pCO->ionFlx[dim]*/

#ifdef MPI_PARALLEL
      if(pPO->ionFlx[dim] != NULL) {
	/* fprintf(stderr, "Beginning receive call for domain level %d \n", level); */
	
	/*Old tagging strategies*/
	/* tag1 = atoi(temp); */
	/* tag2 = (level - 1)*1000000 + pPO->ID * 10000 + level * 100 + domnumber; */
	/* fprintf(stderr, "rcvconcat: %d, powers:%d domno: %d, myid :%d \n", tag1, tag2, domnumber, myID_Comm_world); */
	tag3 = pPO->DomN + 257;

	
	/*AT 9/21/12: TO_DO: Insert case statement, so that arrsize is pulled from the correct indices*/
	
	/*Find the size of the array of flux values being transferred*/
	/* arrsize = (pPO->ijke[2] + 2 - pPO->ijks[2]) * (pPO->ijke[1] + 2 - pPO->ijks[1]); */
	dz = floor((pPO->ijke[2] - pPO->ijks[2])/2.)+2.;
	dy = floor((pPO->ijke[1] - pPO->ijks[1])/2.)+2.;

	rcvdarrsize[npg] = dy * dz;
	/*(pPO->ijke[2] - pPO->ijks[2]+3)/2. * (pPO->ijke[1] - pPO->ijks[1]+3)/2.;*/




	/* fprintf(stderr, MAKE_BLUE "ARR SIZE %d being received k: s: %d e: %d j: s: %d e: %d\n" RESET_COLOR, rcvdarrsize[npg], pPO->ijks[2], pPO->ijke[2], pPO->ijks[1], pPO->ijke[1]); */
	/* arrsize = (pCO->ijke[2] + 2 - pCO->ijks[2]) * (pCO->ijke[1] + 2 - pCO->ijks[1]); */


	/*Initiate non-blocking receive*/
	ierr = MPI_Irecv(pPO->ionFlx[dim], rcvdarrsize[npg], MP_RL, pPO->ID, tag3, pMesh->Domain[level][domnumber].Comm_Parent, &(rcv_rq[npg]));
	/* fprintf (stderr, "did i get here? %d \n", ierr); */

      } else {
	/*AT 9/26/12: Faking a successful test for grids that are not in the direction of propagation*/
	succtest[npg] = 1;
	rcvd++;
      }
    }

/*Loop over all possible communications to check which parent grids have been received from*/
  while(allrcv==0)
    {
      for (npg=0; npg<(pGrid->NPGrid); npg++)
	{
	  if (succtest[npg]==0) 
	    {
	      err = MPI_Test(&(rcv_rq[npg]), &receive_done, &stat);
	      if (receive_done) 
		{ 
		  succtest[npg]= 1; 
		  rcvd ++;
		  /* fprintf(stderr, " Data received from parent w/id: %d for tag %d. I'm on level %d \n", pGrid->PGrid[npg].ID, pGrid->PGrid[npg].DomN + 100, level); */
		  
		} 
	      else {
			/*fprintf(stderr, "Still waiting \n");*/
	      }
	    }
	}
      allrcv = (rcvd == pGrid->NPGrid) ? 1 : 0;
    }


/* Populate the cells on this (fine) grid with the values received from the coarse grid */
/*AT 2/28/13: TO_DO: Indexing needs to be fixed*/

  for (npg=0; npg<(pGrid->NPGrid); npg++)
    {
      pPO=(GridOvrlpS*)&(pGrid->PGrid[npg]);

      if(pPO->ionFlx[dim] != NULL) {
	/* fprintf(stderr, "Going to go fill edgeflux now \n"); */

	switch(dim) { 
	case 0: case 1: { 
	  if (fmod(dim,2) == 0) {
	    fixed = (pPO->ijks[0] - nghost) * 2.;
	  } else {
	    fixed = (pPO->ijke[0] + 1 - nghost) * 2.;
	  }  
  /* fprintf(stderr,"I'm here at line 150 \n"); */



	  for (k=pPO->ijks[2] - nghost; k<= pPO->ijke[2]+1 - nghost; k++) {
	    for (j=pPO->ijks[1] - nghost; j<= pPO->ijke[1]+1 - nghost; j++) {
	      coarsek = floor(k/2) + pDomain->Disp[2];

	      indexarith = floor((k - pPO->ijks[2] + nghost)/2.) * (floor((pPO->ijke[1] - pPO->ijks[1])/2.) + 2) + floor((j - pPO->ijks[1] + nghost)/2.);
	      
	      /* indexarith = floor(( (k-(pPO->ijks[2]-nghost))*(pPO->ijke[1] - pPO->ijks[1] + 2)+j-(pPO->ijks[1]-nghost))/2.); */
	      /* indexarith = (k-(pCO->ijks[2]-nghost))*(pCO->ijke[1] - pCO->ijks[1] + 2)+j-(pCO->ijks[1]-nghost); */



	      /* fprintf(stderr, "I'm here at line 155 k:%d j:%d \n", k, j); */

	      ks = k;

/* *2 - pDomain->Disp[2]; */

	      js = j;
/* *2 - pDomain->Disp[1]; */

	      /* fprintf(stderr, MAKE_RED"Now here at line 162. ks: %d, js:%d, indexarith: %d \n"RESET_COLOR, ks, js, indexarith); */
	      pGrid->EdgeFlux[ks][js][fixed] = pPO->ionFlx[dim][indexarith];
	      /* fprintf(stderr, "Now here at line 164 \n"); */
	      /* if (j < (pPO->ijke[1]+1 - nghost)) { */
	      /* fprintf(stderr, "Now here at line 166 \n"); */
	      /* 	if (k < (pPO->ijke[2]+1 - nghost)) {	 */
	      /* 	  /\*pPO is very likely the wrong place to store this since we really want hte parent's pCO*\/ */
	      /* 	  pGrid->EdgeFlux[ks+1][js+1][fixed] = pPO->ionFlx[dim][indexarith]; */
	      /* 	  pGrid->EdgeFlux[ks][js+1][fixed] = pPO->ionFlx[dim][indexarith]; */
	      /* 	  pGrid->EdgeFlux[ks+1][js][fixed] = pPO->ionFlx[dim][indexarith]; */
	      /* 	} */
		
	      /* 	/\*Handle edge cases*\/ */
	      /* 	else { */
	      /* 	  pGrid->EdgeFlux[ks][js+1][fixed] = pCO->ionFlx[dim][indexarith]; */

	      /* 	} */
	      /* 	/\*Handle edge cases*\/ */
	      /* }else { */
	      /* 	if (k < pCO->ijke[2]+1 - nghost ) { */
	      /* 	  pGrid->EdgeFlux[ks+1][js][fixed] = pCO->ionFlx[dim][indexarith]; */
		  
	      /* /\*Will need to check indexing to see if it's +1 or +2*\/ */
	      /* /\*	      fprintf(stderr, "Putting away received k:%d j:%d, index: %d \n", k, j, indexarith); *\/ */
	      /* 	} */
	      /* } */

  /* fprintf(stderr,"I'm here at line 185 \n"); */

	    }
	  }
	  break; 
	}

	case 2: case 3: {
	  if (fmod(dim,2) == 0) {
	    fixed = pPO->ijks[1] - nghost;
	  } else {
	    fixed = pPO->ijke[1] + 1 - nghost;
	  }	  
	  for (k=pPO->ijks[2] - nghost; k<= pPO->ijke[2]+1 - nghost; k++) {
	    for (i=pPO->ijks[0] - nghost; i<= pPO->ijke[0]+1 - nghost; i++) {
	      indexarith = (k-(pPO->ijks[2]-nghost))*(pPO->ijke[0] - pPO->ijks[0] + 2)+i-(pPO->ijks[0]-nghost);
	      pGrid->EdgeFlux[k][fixed][i] = pPO->ionFlx[dim][indexarith];
	    }
	  }
	  break;
	}

	case 4: case 5: {
	  if (fmod(dim,2) == 0) {
	    fixed = pPO->ijks[2] - nghost;
	  } else {
	    fixed = pPO->ijke[2] + 1 - nghost;
	  }
	  for (j=pPO->ijks[1] - nghost; j<= pPO->ijke[1]+1 - nghost; j++) {
	    for (i=pPO->ijks[0] - nghost; i<= pPO->ijke[0]+1 - nghost; i++) {
	      indexarith = (j-(pPO->ijks[1]-nghost))*(pPO->ijke[0] - pPO->ijks[0] + 2)+i-(pPO->ijks[0]-nghost);
	      pGrid->EdgeFlux[fixed][k][i] = pPO->ionFlx[dim][indexarith];
	    }
	  }
	  break;
	}
	}
      }
#else /*single processor*/

/*Let the parent grid be the grid whose domain is one level higher and has domain number matching pPO->DomN*/
      parentgrid = pMesh->Domain[level-1][pPO->DomN].Grid;
/*Find which child of my parent I am.  Assign that grid overlap structure to pCO*/
      for (i = 0; i <= parentgrid->NCGrid; i++) {
	pCO=(GridOvrlpS*)&(parentgrid->CGrid[i]);
	if (pCO->DomN == domnumber) {
	  break;
	}
      }



/*Use the parent grid's child grid overlap ionFlx array to fill this grid's edgeflux array*/


      /*AT 1/11/13: Changing the if and else conditions to be reversed of what they originally were so that dimensions match*/
      /* AT 12/23/12 TO_DO Add case statement back in when 1 direction works to account for other directions of propagation*/
      /* switch(dim) { */
      /* case 0: case 1: { */

/*Find the face where radiation is incoming from the coarse grid*/
      /*If in the +x direction*/
      if (fmod(dim,2) == 0) {
	fixed = (pPO->ijks[0] - nghost) * 2.;
      }
      /*If in the -x direction*/	
      else {
	fixed = (pPO->ijke[0] + 1 - nghost) * 2.;
      }
      
/*Loop over all cells orthogonal to the direction of propagation */
/*Loop indices range over all the cells, as indexed by the coarse grid*/
      if(pCO->ionFlx[dim] != NULL) {
	/* fprintf(stderr, "j Start: %d End: %d   k Start: %d End:%d \n", pCO->ijks[1] - nghost, pCO->ijke[1]- nghost, pCO->ijks[2]- nghost, pCO->ijke[2]- nghost) ; */

	for (k=pCO->ijks[2] - nghost; k<= pCO->ijke[2]+1 - nghost; k++) {
	  for (j=pCO->ijks[1] - nghost; j<= pCO->ijke[1]+1 - nghost; j++) {



	    /*Calculate the index on the ionFlx array corresponding to a given cell on this (fine) grid*/
	    /*Remember that ionFlx is a 1D array of values*/
	    indexarith = (k-(pCO->ijks[2]-nghost))*(pCO->ijke[1] - pCO->ijks[1] + 2)+j-(pCO->ijks[1]-nghost);

	    /*Find my location on the current (fine) grid*/
	    /*AT 12/23/12 BE CAREFUL WITH FACTORS OF 2 IN INDICES*/
	    ks = k*2 - pDomain->Disp[2];
	    js = j*2 - pDomain->Disp[1];

	    /* if (pCO->ionFlx[dim][indexarith] > .1) */
	    /*   fprintf(stderr,"Setting ks and js to be %e \n", pCO->ionFlx[dim][indexarith]); */

	    /*Assign my parent's ionizing flux to my EdgeFlux cell */
	    pGrid->EdgeFlux[ks][js][fixed] = pCO->ionFlx[dim][indexarith];

	    /*AT 01/14/13: The following is a clumsy way of linearly interpolating the values to 2 cells.*/
	    /*TO_DO: I also need to check that the if statements will hold up for grids of diff. size and that it works with MPI in the right area*/
	    
	    /*Assign the same value to the other 3 fine cells that make up the one coarse cell */
	    if (j < (pCO->ijke[1]+1 - nghost)) {
		if (k < (pCO->ijke[2]+1 - nghost)) {	
		pGrid->EdgeFlux[ks+1][js+1][fixed] = pCO->ionFlx[dim][indexarith];
		pGrid->EdgeFlux[ks][js+1][fixed] = pCO->ionFlx[dim][indexarith];
		pGrid->EdgeFlux[ks+1][js][fixed] = pCO->ionFlx[dim][indexarith];

		/* if ( pCO->ionFlx[dim][indexarith] > 1.) */
		/*   fprintf(stderr,"On level: %d setting [%d +=1][%d +=1][%d] ionflx[%d][%d]  %e \n", level, ks, js, fixed, dim, indexarith, pCO->ionFlx[dim][indexarith]); */

	      }
	      
	      /*Handle edge cases*/
	      else {
		pGrid->EdgeFlux[ks][js+1][fixed] = pCO->ionFlx[dim][indexarith];

		/* if ( pCO->ionFlx[dim][indexarith] > 1.) */
		/*   fprintf(stderr,MAKE_BLUE "1: On level: %d setting [%d][%d +=1][%d] ionflx[%d][%d]  %e \n" RESET_COLOR2, level, ks, js, fixed, dim, indexarith, pCO->ionFlx[dim][indexarith]); */

	      } 
	    }
	    /*Handle edge cases*/
	    else {
	      if (k < pCO->ijke[2]+1 - nghost ) {
		pGrid->EdgeFlux[ks+1][js][fixed] = pCO->ionFlx[dim][indexarith];

		/* if ( pCO->ionFlx[dim][indexarith] > 1.) */
		/*   fprintf(stderr,MAKE_RED"2: On level: %d setting [%d +=1][%d][%d] ionflx[%d][%d]  %e \n" RESET_COLOR2, level, ks, js, fixed, dim, indexarith, pCO->ionFlx[dim][indexarith]); */

	      }
	    }
	    
	  }
	}
      }

#endif
    }
}
Пример #7
0
void init_mesh(MeshS *pM)
{
  int nblock,num_domains,nd,nl,level,maxlevel=0,nd_this_level;
  int nDim,nDim_test,dim;
  int *next_domainid;
  char block[80];
  int ncd,ir,irefine,l,m,n,roffset;
  int i,Nx[3],izones;
  div_t xdiv[3];  /* divisor with quot and rem members */
  Real root_xmin[3], root_xmax[3];  /* min/max of x in each dir on root grid */
  int Nproc_Comm_world=1,nproc=0,next_procID;
  SideS D1,D2;
  DomainS *pD, *pCD;
#ifdef MPI_PARALLEL
  int ierr,child_found,groupn,Nranks,Nranks0,max_rank,irank,*ranks;
  MPI_Group world_group;

/* Get total # of processes, in MPI_COMM_WORLD */
  ierr = MPI_Comm_size(MPI_COMM_WORLD, &Nproc_Comm_world);
#endif

/* Start by initializing some quantaties in Mesh structure */

  pM->time = 0.0;
  pM->nstep = 0;
  pM->outfilename = par_gets("job","problem_id");

/*--- Step 1: Figure out how many levels and domains there are. --------------*/
/* read levels of each domain block in input file and calculate max level */

  num_domains = par_geti("job","num_domains");
#ifndef STATIC_MESH_REFINEMENT
  if (num_domains > 1) 
    ath_error("[init_mesh]: num_domains=%d; for num_domains > 1 configure with --enable-smr\n",num_domains);
#endif

  for (nblock=1; nblock<=num_domains; nblock++){
    sprintf(block,"domain%d",nblock);
    if (par_exist(block,"level") == 0)
      ath_error("[init_mesh]: level does not exist in block %s\n",block);
    level = par_geti(block,"level");
    maxlevel = MAX(maxlevel,level);
  }

/* set number of levels in Mesh, and allocate DomainsPerLevel array */

  pM->NLevels = maxlevel + 1;  /* level counting starts at 0 */

  pM->DomainsPerLevel = (int*)calloc_1d_array(pM->NLevels,sizeof(int));
  if (pM->DomainsPerLevel == NULL)
    ath_error("[init_mesh]: malloc returned a NULL pointer\n");

/* Now figure out how many domains there are at each level */

  for (nl=0; nl<=maxlevel; nl++){
    nd_this_level=0;
    for (nblock=1; nblock<=num_domains; nblock++){
      sprintf(block,"domain%d",nblock);
      if (par_geti(block,"level") == nl) nd_this_level++; 
    }

/* Error if there are any levels with no domains.  Else set DomainsPerLevel */

    if (nd_this_level == 0) {
      ath_error("[init_mesh]: Level %d has zero domains\n",nl);
    } else {
      pM->DomainsPerLevel[nl] = nd_this_level;
    }
  }

/*--- Step 2: Set up root level.  --------------------------------------------*/
/* Find the <domain> block in the input file corresponding to the root level,
 * and set root level properties in Mesh structure  */

  if (pM->DomainsPerLevel[0] != 1)
    ath_error("[init_mesh]: Level 0 has %d domains\n",pM->DomainsPerLevel[0]);

  for (nblock=1; nblock<=num_domains; nblock++){
    sprintf(block,"domain%d",nblock);
    level = par_geti(block,"level");
    if (level == 0){
      root_xmin[0] = par_getd(block,"x1min");
      root_xmax[0] = par_getd(block,"x1max");
      root_xmin[1] = par_getd(block,"x2min");
      root_xmax[1] = par_getd(block,"x2max");
      root_xmin[2] = par_getd(block,"x3min");
      root_xmax[2] = par_getd(block,"x3max");
      Nx[0] = par_geti(block,"Nx1");
      Nx[1] = par_geti(block,"Nx2");
      Nx[2] = par_geti(block,"Nx3");

/* number of dimensions of root level, to test against all other inputs */
      nDim=0;
      for (i=0; i<3; i++) if (Nx[i]>1) nDim++;
      if (nDim==0) ath_error("[init_mesh] None of Nx1,Nx2,Nx3 > 1\n");

/* some error tests of root grid */

      for (i=0; i<3; i++) {
        if (Nx[i] < 1) {
          ath_error("[init_mesh]: Nx%d in %s must be >= 1\n",(i+1),block);
        }
        if(root_xmax[i] < root_xmin[i]) {
          ath_error("[init_mesh]: x%dmax < x%dmin in %s\n",(i+1),block);
        }
      }
      if (nDim==1 && Nx[0]==1) {
        ath_error("[init_mesh]:1D requires Nx1>1: in %s Nx1=1,Nx2=%d,Nx3=%d\n",
        block,Nx[1],Nx[2]);
      }
      if (nDim==2 && Nx[2]>1) {ath_error(
        "[init_mesh]:2D requires Nx1,Nx2>1: in %s Nx1=%d,Nx2=%d,Nx3=%d\n",
        block,Nx[0],Nx[1],Nx[2]);
      }

/* Now that everything is OK, set root grid properties in Mesh structure  */

      for (i=0; i<3; i++) {
        pM->Nx[i] = Nx[i];
        pM->RootMinX[i] = root_xmin[i];
        pM->RootMaxX[i] = root_xmax[i];
        pM->dx[i] = (root_xmax[i] - root_xmin[i])/(Real)(Nx[i]);
      }

/* Set BC flags on root domain */

      pM->BCFlag_ix1 = par_geti_def(block,"bc_ix1",0);
      pM->BCFlag_ix2 = par_geti_def(block,"bc_ix2",0);
      pM->BCFlag_ix3 = par_geti_def(block,"bc_ix3",0);
      pM->BCFlag_ox1 = par_geti_def(block,"bc_ox1",0);
      pM->BCFlag_ox2 = par_geti_def(block,"bc_ox2",0);
      pM->BCFlag_ox3 = par_geti_def(block,"bc_ox3",0);
    }
  }

/*--- Step 3: Allocate and initialize domain array. --------------------------*/
/* Allocate memory and set pointers for Domain array in Mesh.  Since the
 * number of domains nd depends on the level nl, this is a strange array
 * because it is not [nl]x[nd].  Rather it is nl pointers to nd[nl] Domains.
 * Compare to the calloc_2d_array() function in ath_array.c
 */
      
  if((pM->Domain = (DomainS**)calloc((maxlevel+1),sizeof(DomainS*))) == NULL){
    ath_error("[init_mesh] failed to allocate memory for %d Domain pointers\n",
     (maxlevel+1));
  }

  if((pM->Domain[0]=(DomainS*)calloc(num_domains,sizeof(DomainS))) == NULL){
    ath_error("[init_mesh] failed to allocate memory for Domains\n");
  }

  for(nl=1; nl<=maxlevel; nl++)
    pM->Domain[nl] = (DomainS*)((unsigned char *)pM->Domain[nl-1] +
      pM->DomainsPerLevel[nl-1]*sizeof(DomainS));

/* Loop over every <domain> block in the input file, and initialize each Domain
 * in the mesh hierarchy (the Domain array), including the root level Domain  */

  next_domainid = (int*)calloc_1d_array(pM->NLevels,sizeof(int));
  for(nl=0; nl<=maxlevel; nl++) next_domainid[nl] = 0;

  for (nblock=1; nblock<=num_domains; nblock++){
    sprintf(block,"domain%d",nblock);

/* choose nd coordinate in Domain array for this <domain> block according
 * to the order it appears in input */

    nl = par_geti(block,"level");
    if (next_domainid[nl] > (pM->DomainsPerLevel[nl])-1)
      ath_error("[init_mesh]: Exceeded available domain ids on level %d\n",nl);
    nd = next_domainid[nl];
    next_domainid[nl]++;
    irefine = 1;
    for (ir=1;ir<=nl;ir++) irefine *= 2;   /* C pow fn only takes doubles !! */

/* Initialize level, number, input <domain> block number, and total number of
 * cells in this Domain */

    pM->Domain[nl][nd].Level = nl;
    pM->Domain[nl][nd].DomNumber = nd;
    pM->Domain[nl][nd].InputBlock = nblock;

    pM->Domain[nl][nd].Nx[0] = par_geti(block,"Nx1");
    pM->Domain[nl][nd].Nx[1] = par_geti(block,"Nx2");
    pM->Domain[nl][nd].Nx[2] = par_geti(block,"Nx3");

/* error tests: dimensions of domain */

    nDim_test=0;
    for (i=0; i<3; i++) if (pM->Domain[nl][nd].Nx[i]>1) nDim_test++;
    if (nDim_test != nDim) {
      ath_error("[init_mesh]: in %s grid is %dD, but in root level it is %dD\n",
      block,nDim_test,nDim);
    }
    for (i=0; i<3; i++) {
      if (pM->Domain[nl][nd].Nx[i] < 1) {
        ath_error("[init_mesh]: %s/Nx%d = %d must be >= 1\n",
          block,(i+1),pM->Domain[nl][nd].Nx[i]);
      }
    }
    if (nDim==1 && pM->Domain[nl][nd].Nx[0]==1) {ath_error(
      "[init_mesh]: 1D requires Nx1>1 but in %s Nx1=1,Nx2=%d,Nx3=%d\n",
      block,pM->Domain[nl][nd].Nx[1],pM->Domain[nl][nd].Nx[2]);
    }
    if (nDim==2 && pM->Domain[nl][nd].Nx[2]>1) {ath_error(
      "[init_mesh]:2D requires Nx1,Nx2 > 1 but in %s Nx1=%d,Nx2=%d,Nx3=%d\n",
      block,pM->Domain[nl][nd].Nx[0],pM->Domain[nl][nd].Nx[1],
      pM->Domain[nl][nd].Nx[2]);
    }
    for (i=0; i<nDim; i++) {
      xdiv[i] = div(pM->Domain[nl][nd].Nx[i], irefine);
      if (xdiv[i].rem != 0){
        ath_error("[init_mesh]: %s/Nx%d = %d must be divisible by %d\n",
          block,(i+1),pM->Domain[nl][nd].Nx[i],irefine);
      }
    }

/* Set cell size based on level of domain, but only if Ncell > 1 */

    for (i=0; i<3; i++) {
      if (pM->Domain[nl][nd].Nx[i] > 1) {
        pM->Domain[nl][nd].dx[i] = pM->dx[i]/(Real)(irefine);
      } else {
        pM->Domain[nl][nd].dx[i] = pM->dx[i];
      }
    }

/* Set displacement of Domain from origin. By definition, root level has 0
 * displacement, so only read for levels other than root  */

    for (i=0; i<3; i++) pM->Domain[nl][nd].Disp[i] = 0;
    if (nl != 0) {  
      if (par_exist(block,"iDisp") == 0)
        ath_error("[init_mesh]: iDisp does not exist in block %s\n",block);
      pM->Domain[nl][nd].Disp[0] = par_geti(block,"iDisp");

/* jDisp=0 if problem is only 1D */
      if (pM->Nx[1] > 1) {
        if (par_exist(block,"jDisp") == 0)
          ath_error("[init_mesh]: jDisp does not exist in block %s\n",block);
        pM->Domain[nl][nd].Disp[1] = par_geti(block,"jDisp");
      }

/* kDisp=0 if problem is only 2D */
      if (pM->Nx[2] > 1) {
        if (par_exist(block,"kDisp") == 0)
          ath_error("[init_mesh]: kDisp does not exist in block %s\n",block);
        pM->Domain[nl][nd].Disp[2] = par_geti(block,"kDisp");
      }
    }

    for (i=0; i<nDim; i++) {
      xdiv[i] = div(pM->Domain[nl][nd].Disp[i], irefine);
      if (xdiv[i].rem != 0){
        ath_error("[init_mesh]: %s/Disp%d = %d must be divisible by %d\n",
          block,(i+1),pM->Domain[nl][nd].Disp[i],irefine);
      }
    }

/* Use cell size and displacement from origin to compute min/max of x1/x2/x3 on
 * this domain.  Ensure that if Domain touches root grid boundary, the min/max
 * of this Domain are set IDENTICAL to values in root grid  */

    for (i=0; i<3; i++){

      if (pM->Domain[nl][nd].Disp[i] == 0) {
        pM->Domain[nl][nd].MinX[i] = root_xmin[i];
      } else { 
        pM->Domain[nl][nd].MinX[i] = root_xmin[i] 
          + ((Real)(pM->Domain[nl][nd].Disp[i]))*pM->Domain[nl][nd].dx[i];
      }

      izones= (pM->Domain[nl][nd].Disp[i] + pM->Domain[nl][nd].Nx[i])/irefine;
      if(izones == pM->Nx[i]){
        pM->Domain[nl][nd].MaxX[i] = root_xmax[i];
      } else {
        pM->Domain[nl][nd].MaxX[i] = pM->Domain[nl][nd].MinX[i] 
          + ((Real)(pM->Domain[nl][nd].Nx[i]))*pM->Domain[nl][nd].dx[i];
      }

      pM->Domain[nl][nd].RootMinX[i] = root_xmin[i];
      pM->Domain[nl][nd].RootMaxX[i] = root_xmax[i];
    }

  }  /*---------- end loop over domain blocks in input file ------------------*/
    
/*--- Step 4: Check that domains on the same level are non-overlapping. ------*/
/* Compare the integer coordinates of the sides of Domains at the same level.
 * Print error if Domains overlap or touch. */

  for (nl=maxlevel; nl>0; nl--){     /* start at highest level, and skip root */
  for (nd=0; nd<(pM->DomainsPerLevel[nl])-1; nd++){
    for (i=0; i<3; i++) {
      D1.ijkl[i] = pM->Domain[nl][nd].Disp[i];
      D1.ijkr[i] = pM->Domain[nl][nd].Disp[i] + pM->Domain[nl][nd].Nx[i];
    }

    for (ncd=nd+1; ncd<(pM->DomainsPerLevel[nl]); ncd++) {
      for (i=0; i<3; i++) {
        D2.ijkl[i] = pM->Domain[nl][ncd].Disp[i];
        D2.ijkr[i] = pM->Domain[nl][ncd].Disp[i] + pM->Domain[nl][ncd].Nx[i];
      }

      if (D1.ijkl[0] <= D2.ijkr[0] && D1.ijkr[0] >= D2.ijkl[0] &&
          D1.ijkl[1] <= D2.ijkr[1] && D1.ijkr[1] >= D2.ijkl[1] &&
          D1.ijkl[2] <= D2.ijkr[2] && D1.ijkr[2] >= D2.ijkl[2]){
          ath_error("Domains %d and %d at same level overlap or touch\n",
          pM->Domain[nl][nd].InputBlock,pM->Domain[nl][ncd].InputBlock);
      }
    }
  }}

/*--- Step 5: Check for illegal geometry of child/parent Domains -------------*/

  for (nl=0; nl<maxlevel; nl++){
  for (nd=0; nd<pM->DomainsPerLevel[nl]; nd++){
    pD = (DomainS*)&(pM->Domain[nl][nd]);  /* set ptr to this Domain */

    for (i=0; i<3; i++) {
      D1.ijkl[i] = pD->Disp[i];
      D1.ijkr[i] = pD->Disp[i] + pD->Nx[i];
    }

    for (ncd=0; ncd<pM->DomainsPerLevel[nl+1]; ncd++){
      pCD = (DomainS*)&(pM->Domain[nl+1][ncd]);  /* set ptr to potential child*/

      for (i=0; i<3; i++) {
        D2.ijkl[i] = pCD->Disp[i]/2;
        D2.ijkr[i] = 1;
        if (pCD->Nx[i] > 1) D2.ijkr[i] = (pCD->Disp[i] + pCD->Nx[i])/2;
      }

      if (D1.ijkl[0] <= D2.ijkr[0] && D1.ijkr[0] >= D2.ijkl[0] &&
          D1.ijkl[1] <= D2.ijkr[1] && D1.ijkr[1] >= D2.ijkl[1] &&
          D1.ijkl[2] <= D2.ijkr[2] && D1.ijkr[2] >= D2.ijkl[2]){

/* check for child Domains that touch edge of parent (and are not at edges of
 * root), extends past edge of parent, or are < nghost/2 from edge of parent  */

        for (dim=0; dim<nDim; dim++){
          irefine = 1;
          for (i=1;i<=nl;i++) irefine *= 2; /* parent refinement lev */
          roffset = (pCD->Disp[dim] + pCD->Nx[dim])/(2*irefine) - pM->Nx[dim];

          if (((D2.ijkl[dim] == D1.ijkl[dim]) && (pD->Disp[dim] != 0)) ||
              ((D2.ijkr[dim] == D1.ijkr[dim]) && (roffset != 0))) {
            for (i=0; i<nDim; i++) {
              D1.ijkl[i] /= irefine;  /* report indices scaled to root */
              D1.ijkr[i] /= irefine;
              D2.ijkl[i] /= irefine; 
              D2.ijkr[i] /= irefine;
            }
            ath_error("[init_mesh] child Domain D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d] touches parent D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d]\n",
              pCD->InputBlock,D2.ijkl[0],D2.ijkr[0],D2.ijkl[1],D2.ijkr[1],
              D2.ijkl[2],D2.ijkr[2],pD->InputBlock,D1.ijkl[0],D1.ijkr[0],
              D1.ijkl[1],D1.ijkr[1],D1.ijkl[2],D1.ijkr[2]);
          }

          if ((D2.ijkl[dim] < D1.ijkl[dim]) ||
              (D2.ijkr[dim] > D1.ijkr[dim])) {
            for (i=0; i<nDim; i++) {
              D1.ijkl[i] /= irefine;  /* report indices scaled to root */
              D1.ijkr[i] /= irefine;
              D2.ijkl[i] /= irefine; 
              D2.ijkr[i] /= irefine;
            }
            ath_error("[init_mesh] child Domain D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d] extends past parent D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d]\n",
              pCD->InputBlock,D2.ijkl[0],D2.ijkr[0],D2.ijkl[1],D2.ijkr[1],
              D2.ijkl[2],D2.ijkr[2],pD->InputBlock,D1.ijkl[0],D1.ijkr[0],
              D1.ijkl[1],D1.ijkr[1],D1.ijkl[2],D1.ijkr[2]);
          }

          if (((2*(D2.ijkl[dim]-D1.ijkl[dim]) < nghost) &&
               (2*(D2.ijkl[dim]-D1.ijkl[dim]) > 0     )) ||
              ((2*(D1.ijkr[dim]-D2.ijkr[dim]) < nghost) &&
               (2*(D1.ijkr[dim]-D2.ijkr[dim]) > 0     ))) {
            for (i=0; i<nDim; i++) {
              D1.ijkl[i] /= irefine;  /* report indices scaled to root */
              D1.ijkr[i] /= irefine;
              D2.ijkl[i] /= irefine; 
              D2.ijkr[i] /= irefine;
            }
            ath_error("[init_mesh] child Domain D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d] closer than nghost/2 to parent D%d[is,ie,js,je,ks,ke]=[%d %d %d %d %d %d]\n",
              pCD->InputBlock,D2.ijkl[0],D2.ijkr[0],D2.ijkl[1],D2.ijkr[1],
              D2.ijkl[2],D2.ijkr[2],pD->InputBlock,D1.ijkl[0],D1.ijkr[0],
              D1.ijkl[1],D1.ijkr[1],D1.ijkl[2],D1.ijkr[2]);
          }

        }
      }
    }
  }}

/*--- Step 6: Divide each Domain into Grids, and allocate to processor(s)  ---*/
/* Get the number of Grids in each direction.  These are given either in the
 * <domain?> block in the input file, or by automatic decomposition given the
 * number of processor desired for this domain.   */

  next_procID = 0;  /* start assigning processors to Grids at ID=0 */

  for (nl=0; nl<=maxlevel; nl++){
    for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
      pD = (DomainS*)&(pM->Domain[nl][nd]);  /* set ptr to this Domain */
      sprintf(block,"domain%d",pD->InputBlock);

#ifndef MPI_PARALLEL
      for (i=0; i<3; i++) pD->NGrid[i] = 1;
#else
      nproc = par_geti_def(block,"AutoWithNProc",0);

/* Read layout of Grids from input file */

      if (nproc == 0){
        pD->NGrid[0] = par_geti_def(block,"NGrid_x1",1);
        pD->NGrid[1] = par_geti_def(block,"NGrid_x2",1);
        pD->NGrid[2] = par_geti_def(block,"NGrid_x3",1);
        if (pD->NGrid[0] == 0)
          ath_error("[init_mesh] Cannot enter NGrid_x1=0 in %s\n",block);
        if (pD->NGrid[1] == 0)
          ath_error("[init_mesh] Cannot enter NGrid_x2=0 in %s\n",block);
        if (pD->NGrid[2] == 0)
          ath_error("[init_mesh] Cannot enter NGrid_x3=0 in %s\n",block);
      }

/* Auto decompose Domain into Grids.  To use this option, set "AutoWithNProc"
 * to number of processors desired for this Domain  */

      else if (nproc > 0){
        if(dom_decomp(pD->Nx[0],pD->Nx[1],pD->Nx[2],nproc,
           &(pD->NGrid[0]),&(pD->NGrid[1]),&(pD->NGrid[2])))
           ath_error("[init_mesh]: Error in automatic Domain decomposition\n");

        /* Store the domain decomposition in the par database */
        par_seti(block,"NGrid_x1","%d",pD->NGrid[0],"x1 decomp");
        par_seti(block,"NGrid_x2","%d",pD->NGrid[1],"x2 decomp");
        par_seti(block,"NGrid_x3","%d",pD->NGrid[2],"x3 decomp");

      } else {
        ath_error("[init_mesh] invalid AutoWithNProc=%d in %s\n",nproc,block);
      }
#endif /* MPI_PARALLEL */

/* test for conflicts between number of grids and dimensionality */

      for (i=0; i<3; i++){
        if(pD->NGrid[i] > 1 && pD->Nx[i] <= 1)
          ath_error("[init_mesh]: %s/NGrid_x%d = %d and Nx%d = %d\n",block,
          (i+1),pD->NGrid[i],(i+1),pD->Nx[i]);
      }

/* check there are more processors than Grids needed by this Domain. */

      nproc = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]);
      if(nproc > Nproc_Comm_world) ath_error(
        "[init_mesh]: %d Grids requested by block %s and only %d procs\n"
        ,nproc,block,Nproc_Comm_world); 

/* Build 3D array to store data on Grids in this Domain */

      if ((pD->GData = (GridsDataS***)calloc_3d_array(pD->NGrid[2],pD->NGrid[1],
        pD->NGrid[0],sizeof(GridsDataS))) == NULL) ath_error(
        "[init_mesh]: GData calloc returned a NULL pointer\n");

/* Divide the domain into blocks */

      for (i=0; i<3; i++) {
        xdiv[i] = div(pD->Nx[i], pD->NGrid[i]);
      }

/* Distribute cells in Domain to Grids.  Assign each Grid to a processor ID in
 * the MPI_COMM_WORLD communicator.  For single-processor jobs, there is only
 * one ID=0, and the GData array will have only one element. */

      for(n=0; n<(pD->NGrid[2]); n++){
      for(m=0; m<(pD->NGrid[1]); m++){
      for(l=0; l<(pD->NGrid[0]); l++){
        for (i=0; i<3; i++) pD->GData[n][m][l].Nx[i] = xdiv[i].quot;
        pD->GData[n][m][l].ID_Comm_world = next_procID++;
        if (next_procID > ((Nproc_Comm_world)-1)) next_procID=0;
      }}}

/* If the Domain is not evenly divisible put the extra cells on the first
 * Grids in each direction, maintaining the load balance as much as possible */

      for(n=0; n<(pD->NGrid[2]); n++){
        for(m=0; m<(pD->NGrid[1]); m++){
          for(l=0; l<xdiv[0].rem; l++){
            pD->GData[n][m][l].Nx[0]++;
          }
        }
      }
      xdiv[0].rem=0;

      for(n=0; n<(pD->NGrid[2]); n++){
        for(m=0; m<xdiv[1].rem; m++) {
          for(l=0; l<(pD->NGrid[0]); l++){
            pD->GData[n][m][l].Nx[1]++;
          }
        }
      }
      xdiv[1].rem=0;

      for(n=0; n<xdiv[2].rem; n++){
        for(m=0; m<(pD->NGrid[1]); m++){
          for(l=0; l<(pD->NGrid[0]); l++){
            pD->GData[n][m][l].Nx[2]++;
          }
        }
      }
      xdiv[2].rem=0;

/* Initialize displacements from origin for each Grid */

      for(n=0; n<(pD->NGrid[2]); n++){
        for(m=0; m<(pD->NGrid[1]); m++){
          pD->GData[n][m][0].Disp[0] = pD->Disp[0];
          for(l=1; l<(pD->NGrid[0]); l++){
            pD->GData[n][m][l].Disp[0] = pD->GData[n][m][l-1].Disp[0] + 
                                         pD->GData[n][m][l-1].Nx[0];
          }
        }
      }

      for(n=0; n<(pD->NGrid[2]); n++){
        for(l=0; l<(pD->NGrid[0]); l++){
          pD->GData[n][0][l].Disp[1] = pD->Disp[1];
          for(m=1; m<(pD->NGrid[1]); m++){
            pD->GData[n][m][l].Disp[1] = pD->GData[n][m-1][l].Disp[1] + 
                                         pD->GData[n][m-1][l].Nx[1];
          }
        }
      }

      for(m=0; m<(pD->NGrid[1]); m++){
        for(l=0; l<(pD->NGrid[0]); l++){
          pD->GData[0][m][l].Disp[2] = pD->Disp[2];
          for(n=1; n<(pD->NGrid[2]); n++){
            pD->GData[n][m][l].Disp[2] = pD->GData[n-1][m][l].Disp[2] + 
                                         pD->GData[n-1][m][l].Nx[2];
          }
        }
      }

    }  /* end loop over ndomains */
  }    /* end loop over nlevels */

/* check that total number of Grids was partitioned evenly over total number of
 * MPI processes available (equal to one for single processor jobs) */ 

  if (next_procID != 0)
    ath_error("[init_mesh]:total # of Grids != total # of MPI procs\n");

/*--- Step 7: Allocate a Grid for each Domain on this processor --------------*/

  for (nl=0; nl<=maxlevel; nl++){
    for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
      pD = (DomainS*)&(pM->Domain[nl][nd]);  /* set ptr to this Domain */
      sprintf(block,"domain%d",pD->InputBlock);
      pD->Grid = NULL;

/* Loop over GData array, and if there is a Grid assigned to this proc, 
 * allocate it */

      for(n=0; n<(pD->NGrid[2]); n++){
      for(m=0; m<(pD->NGrid[1]); m++){
      for(l=0; l<(pD->NGrid[0]); l++){
        if (pD->GData[n][m][l].ID_Comm_world == myID_Comm_world) {
          if ((pD->Grid = (GridS*)malloc(sizeof(GridS))) == NULL)
            ath_error("[init_mesh]: Failed to malloc a Grid for %s\n",block);
        }
      }}}
    }
  }

/*--- Step 8: Create an MPI Communicator for each Domain ---------------------*/

#ifdef MPI_PARALLEL
/* Allocate memory for ranks[] array */

  max_rank = 0;
  for (nl=0; nl<=maxlevel; nl++){
  for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
    pD = (DomainS*)&(pM->Domain[nl][nd]);  /* set ptr to this Domain */
    Nranks = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]);
    max_rank = MAX(max_rank, Nranks);
  }}
  ranks = (int*)calloc_1d_array(max_rank,sizeof(int));

/* Extract handle of group defined by MPI_COMM_WORLD communicator */

  ierr = MPI_Comm_group(MPI_COMM_WORLD, &world_group);

  for (nl=0; nl<=maxlevel; nl++){
  for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
    pD = (DomainS*)&(pM->Domain[nl][nd]);  /* set ptr to this Domain */

/* Load integer array with ranks of processes in MPI_COMM_WORLD updating Grids
 * on this Domain.  The ranks of these processes in the new Comm_Domain
 * communicator created below are equal to the indices of this array */

    Nranks = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]);
    groupn = 0;

    for(n=0; n<(pD->NGrid[2]); n++){
    for(m=0; m<(pD->NGrid[1]); m++){
    for(l=0; l<(pD->NGrid[0]); l++){
      ranks[groupn] = pD->GData[n][m][l].ID_Comm_world;
      pD->GData[n][m][l].ID_Comm_Domain = groupn;
      groupn++;
    }}}

/* Create a new group for this Domain; use it to create a new communicator */

    ierr = MPI_Group_incl(world_group,Nranks,ranks,&(pD->Group_Domain));
    ierr = MPI_Comm_create(MPI_COMM_WORLD,pD->Group_Domain,&(pD->Comm_Domain));

  }}

  free_1d_array(ranks);
#endif /* MPI_PARALLEL */

/*--- Step 9: Create MPI Communicators for Child and Parent Domains ----------*/

#if defined(MPI_PARALLEL) && defined(STATIC_MESH_REFINEMENT)
/* Initialize communicators to NULL, since not all Domains use them, and
 * allocate memory for ranks[] array */

  for (nl=0; nl<=maxlevel; nl++){
    for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
      pM->Domain[nl][nd].Comm_Parent = MPI_COMM_NULL;
      pM->Domain[nl][nd].Comm_Children = MPI_COMM_NULL;
    }
  }

  if (maxlevel > 0) {
    ranks = (int*)calloc_1d_array(Nproc_Comm_world,sizeof(int));
  }

/* For each Domain up to (maxlevel-1), initialize communicator with children */

  for (nl=0; nl<maxlevel; nl++){
  for (nd=0; nd<pM->DomainsPerLevel[nl]; nd++){
    pD = (DomainS*)&(pM->Domain[nl][nd]);  /* set ptr to this Domain */
    child_found = 0;

/* Load integer array with ranks of processes in MPI_COMM_WORLD updating Grids
 * on this Domain, in case a child Domain is found.  Set IDs in Comm_Children
 * communicator based on index in rank array, in case child found.  If no
 * child is found these ranks will never be used. */

    Nranks = (pD->NGrid[0])*(pD->NGrid[1])*(pD->NGrid[2]);
    groupn = 0;

    for(n=0; n<(pD->NGrid[2]); n++){
    for(m=0; m<(pD->NGrid[1]); m++){
    for(l=0; l<(pD->NGrid[0]); l++){
      ranks[groupn] = pD->GData[n][m][l].ID_Comm_world;
      pD->GData[n][m][l].ID_Comm_Children = groupn;
      groupn++;
    }}}

/* edges of this Domain */
    for (i=0; i<3; i++) {
      D1.ijkl[i] = pD->Disp[i];
      D1.ijkr[i] = pD->Disp[i] + pD->Nx[i];
    }

/* Loop over all Domains at next level, looking for children of this Domain */

    for (ncd=0; ncd<pM->DomainsPerLevel[nl+1]; ncd++){
      pCD = (DomainS*)&(pM->Domain[nl+1][ncd]);  /* set ptr to potential child*/

/* edges of potential child Domain */
      for (i=0; i<3; i++) {
        D2.ijkl[i] = pCD->Disp[i]/2;
        D2.ijkr[i] = 1;
        if (pCD->Nx[i] > 1) D2.ijkr[i] = (pCD->Disp[i] + pCD->Nx[i])/2;
      }

      if (D1.ijkl[0] < D2.ijkr[0] && D1.ijkr[0] > D2.ijkl[0] &&
          D1.ijkl[1] < D2.ijkr[1] && D1.ijkr[1] > D2.ijkl[1] &&
          D1.ijkl[2] < D2.ijkr[2] && D1.ijkr[2] > D2.ijkl[2]){
        child_found = 1;

/* Child found.  Add child processors to ranks array, but only if they are
 * different from processes currently there (including parent and any previously
 * found children).  Set IDs associated with Comm_Parent communicator, since on 
 * the child Domain this is the same as the Comm_Children communicator on the
 * parent Domain  */

        for(n=0; n<(pCD->NGrid[2]); n++){
        for(m=0; m<(pCD->NGrid[1]); m++){
        for(l=0; l<(pCD->NGrid[0]); l++){
          irank = -1;
          for (i=0; i<Nranks; i++) {
            if(pCD->GData[n][m][l].ID_Comm_world == ranks[i]) irank = i;
          }
          if (irank == -1) {
            ranks[groupn] = pCD->GData[n][m][l].ID_Comm_world;
            pCD->GData[n][m][l].ID_Comm_Parent = groupn;
            groupn++;
            Nranks++;
          } else {
            pCD->GData[n][m][l].ID_Comm_Parent = ranks[irank];
          }
        }}}
      }
    }

/* After looping over all potential child Domains, create a new communicator if
 * a child was found */

    if (child_found == 1) {
      ierr = MPI_Group_incl(world_group, Nranks, ranks, &(pD->Group_Children));
      ierr = MPI_Comm_create(MPI_COMM_WORLD,pD->Group_Children,
        &pD->Comm_Children);

/* Loop over children to set Comm_Parent communicators */

      for (ncd=0; ncd<pM->DomainsPerLevel[nl+1]; ncd++){
        pCD = (DomainS*)&(pM->Domain[nl+1][ncd]);  

        for (i=0; i<3; i++) {
          D2.ijkl[i] = pCD->Disp[i]/2;
          D2.ijkr[i] = 1;
          if (pCD->Nx[i] > 1) D2.ijkr[i] = (pCD->Disp[i] + pCD->Nx[i])/2;
        }

        if (D1.ijkl[0] < D2.ijkr[0] && D1.ijkr[0] > D2.ijkl[0] &&
            D1.ijkl[1] < D2.ijkr[1] && D1.ijkr[1] > D2.ijkl[1] &&
            D1.ijkl[2] < D2.ijkr[2] && D1.ijkr[2] > D2.ijkl[2]){
          pCD->Comm_Parent = pD->Comm_Children;
        }
      }
    }
  }}

#endif /* MPI_PARALLEL & STATIC_MESH_REFINEMENT  */

  free(next_domainid);
  return;
}
Пример #8
0
void bvals_grav_init(MeshS *pM)
{
  GridS *pG;
  DomainS *pD;
  int i,nl,nd,irefine;
#ifdef MPI_PARALLEL
  int myL,myM,myN,l,m,n,nx1t,nx2t,nx3t,size;
  int x1cnt=0, x2cnt=0, x3cnt=0; /* Number of words passed in x1/x2/x3-dir. */
#endif /* MPI_PARALLEL */

/* Cycle through all the Domains that have active Grids on this proc */

  for (nl=0; nl<(pM->NLevels); nl++){
  for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
  if (pM->Domain[nl][nd].Grid != NULL) {
    pD = (DomainS*)&(pM->Domain[nl][nd]);  /* ptr to Domain */
    pG = pM->Domain[nl][nd].Grid;          /* ptr to Grid */
    irefine = 1;
    for (i=1;i<=nl;i++) irefine *= 2;   /* C pow fn only takes doubles !! */
#ifdef MPI_PARALLEL
/* get (l,m,n) coordinates of Grid being updated on this processor */
    get_myGridIndex(pD, myID_Comm_world, &myL, &myM, &myN);
#endif /* MPI_PARALLEL */

/* Set function pointers for physical boundaries in x1-direction */

    if(pG->Nx[0] > 1) {

/*---- ix1 boundary ----------------------------------------------------------*/

      if(pD->ix1_GBCFun == NULL){

/* Domain boundary is in interior of root */
        if(pD->Disp[0] != 0) {
          pD->ix1_GBCFun = ProlongateLater;

/* Domain is at L-edge of root Domain */
        } else {
          switch(pM->BCFlag_ix1){

          case 1: /* Reflecting */
            pD->ix1_GBCFun = reflect_Phi_ix1;
          break;

          case 2: /* Outflow */
            ath_error("[bvals_grav_init]: BCFlag_ix1 = 2 not implemented\n");
          break;

          case 4: /* Periodic */
            pD->ix1_GBCFun = periodic_Phi_ix1;
#ifdef MPI_PARALLEL
            if(pG->lx1_Gid < 0 && pD->NGrid[0] > 1){
              pG->lx1_Gid = pD->GData[myN][myM][pD->NGrid[0]-1].ID_Comm_Domain;
	    }
#endif /* MPI_PARALLEL */
          break;

          case 5: /* Reflecting, B_normal!=0 */
            pD->ix1_GBCFun = reflect_Phi_ix1;
          break;

          default:
            ath_error("[bvals_grav_init]: BCFlag_ix1 = %d unknown\n",
            pM->BCFlag_ix1);
          }
        }
      }

/*---- ox1 boundary ----------------------------------------------------------*/

      if(pD->ox1_GBCFun == NULL){

/* Domain boundary is in interior of root */
        if((pD->Disp[0] + pD->Nx[0])/irefine != pM->Nx[0]) {
          pD->ox1_GBCFun = ProlongateLater;

/* Domain is at R-edge of root Domain */
        } else {
          switch(pM->BCFlag_ox1){

          case 1: /* Reflecting */
            pD->ox1_GBCFun = reflect_Phi_ox1;
          break;

          case 2: /* Outflow */
            ath_error("[bvals_grav_init]: BCFlag_ox1 = 2 not implemented\n");
          break;

          case 4: /* Periodic */
            pD->ox1_GBCFun = periodic_Phi_ox1;
#ifdef MPI_PARALLEL
            if(pG->rx1_Gid < 0 && pD->NGrid[0] > 1){
              pG->rx1_Gid = pD->GData[myN][myM][0].ID_Comm_Domain;
            }
#endif /* MPI_PARALLEL */
          break;

          case 5: /* Reflecting, B_normal!=0 */
            pD->ox1_GBCFun = reflect_Phi_ox1;
          break;

          default:
            ath_error("[bvals_grav_init]: BCFlag_ox1 = %d unknown\n",
            pM->BCFlag_ox1);
          }
        }
      }
    }

/* Set function pointers for physical boundaries in x2-direction */

    if(pG->Nx[1] > 1) {

/*---- ix2 boundary ----------------------------------------------------------*/

      if(pD->ix2_GBCFun == NULL){

/* Domain boundary is in interior of root */
        if(pD->Disp[1] != 0) {
          pD->ix2_GBCFun = ProlongateLater;

/* Domain is at L-edge of root Domain */
        } else {
          switch(pM->BCFlag_ix2){

          case 1: /* Reflecting */
            pD->ix2_GBCFun = reflect_Phi_ix2;
          break;

          case 2: /* Outflow */
            ath_error("[bvals_grav_init]: BCFlag_ix2 = 2 not implemented\n");
          break;

          case 4: /* Periodic */
            pD->ix2_GBCFun = periodic_Phi_ix2;
#ifdef MPI_PARALLEL
            if(pG->lx2_Gid < 0 && pD->NGrid[1] > 1){
              pG->lx2_Gid = pD->GData[myN][pD->NGrid[1]-1][myL].ID_Comm_Domain;
            }
#endif /* MPI_PARALLEL */
          break;

          case 5: /* Reflecting, B_normal!=0 */
            pD->ix2_GBCFun = reflect_Phi_ix2;
          break;

          default:
            ath_error("[bvals_grav_init]: BCFlag_ix2 = %d unknown\n",
            pM->BCFlag_ix2);
          }
        }
      }

/*---- ox2 boundary ----------------------------------------------------------*/

    if(pD->ox2_GBCFun == NULL){

/* Domain boundary is in interior of root */
        if((pD->Disp[1] + pD->Nx[1])/irefine != pM->Nx[1]) {
          pD->ox2_GBCFun = ProlongateLater;

/* Domain is at R-edge of root Domain */
        } else {
          switch(pM->BCFlag_ox2){

          case 1: /* Reflecting */
            pD->ox2_GBCFun = reflect_Phi_ox2;
          break;

          case 2: /* Outflow */
            ath_error("[bvals_grav_init]: BCFlag_ox2 = 2 not implemented\n");
          break;

          case 4: /* Periodic */
            pD->ox2_GBCFun = periodic_Phi_ox2;
#ifdef MPI_PARALLEL
            if(pG->rx2_Gid < 0 && pD->NGrid[1] > 1){
              pG->rx2_Gid = pD->GData[myN][0][myL].ID_Comm_Domain;
            }
#endif /* MPI_PARALLEL */
          break;

          case 5: /* Reflecting, B_normal!=0 */
            pD->ox2_GBCFun = reflect_Phi_ox2;
          break;

          default:
            ath_error("[bvals_grav_init]: BCFlag_ox2 = %d unknown\n",
            pM->BCFlag_ox2);
          }
        }
      }
    }

/* Set function pointers for physical boundaries in x3-direction */

    if(pG->Nx[2] > 1) {

/*---- ix3 boundary ----------------------------------------------------------*/

      if(pD->ix3_GBCFun == NULL){

/* Domain boundary is in interior of root */
        if(pD->Disp[2] != 0) {
          pD->ix3_GBCFun = ProlongateLater;

/* Domain is at L-edge of root Domain */
        } else {
          switch(pM->BCFlag_ix3){

          case 1: /* Reflecting */
            pD->ix3_GBCFun = reflect_Phi_ix3;
          break;

          case 2: /* Outflow */
            ath_error("[bvals_grav_init]: BCFlag_ix3 = 2 not defined\n");
          break;

          case 4: /* Periodic */
            pD->ix3_GBCFun = periodic_Phi_ix3;
#ifdef MPI_PARALLEL
            if(pG->lx3_Gid < 0 && pD->NGrid[2] > 1){
              pG->lx3_Gid = pD->GData[pD->NGrid[2]-1][myM][myL].ID_Comm_Domain;
            }
#endif /* MPI_PARALLEL */
          break;

          case 5: /* Reflecting, B_normal!=0 */
            pD->ix3_GBCFun = reflect_Phi_ix3;
          break;

          default:
            ath_error("[bvals_grav_init]: BCFlag_ix3 = %d unknown\n",
            pM->BCFlag_ix3);
          }
        }
      }

/*---- ox3 boundary ----------------------------------------------------------*/

    if(pD->ox3_GBCFun == NULL){

/* Domain boundary is in interior of root */
        if((pD->Disp[2] + pD->Nx[2])/irefine != pM->Nx[2]) {
          pD->ox3_GBCFun = ProlongateLater;

/* Domain is at R-edge of root Domain */
        } else {
          switch(pM->BCFlag_ox3){

          case 1: /* Reflecting */
            pD->ox3_GBCFun = reflect_Phi_ox3;
          break;

          case 2: /* Outflow */
            ath_error("[bvals_grav_init]: BCFlag_ox3 = 2 not defined\n");
          break;

          case 4: /* Periodic */
            pD->ox3_GBCFun = periodic_Phi_ox3;
#ifdef MPI_PARALLEL
            if(pG->rx3_Gid < 0 && pD->NGrid[2] > 1){
              pG->rx3_Gid = pD->GData[0][myM][myL].ID_Comm_Domain;
            }
#endif /* MPI_PARALLEL */
          break;

          case 5: /* Reflecting, B_normal!=0 */
            pD->ox3_GBCFun = reflect_Phi_ox3;
          break;

          default:
            ath_error("[bvals_grav_init]: BCFlag_ox3 = %d unknown\n",
            pM->BCFlag_ox3);
          }
        }
      }
    }

/* Figure out largest size needed for send/receive buffers with MPI ----------*/

#ifdef MPI_PARALLEL

    for (n=0; n<(pD->NGrid[2]); n++){
    for (m=0; m<(pD->NGrid[1]); m++){
      for (l=0; l<(pD->NGrid[0]); l++){

/* x1cnt is surface area of x1 faces */
        if(pD->NGrid[0] > 1){
          nx2t = pD->GData[n][m][l].Nx[1];
          if(nx2t > 1) nx2t += 1;

          nx3t = pD->GData[n][m][l].Nx[2];
          if(nx3t > 1) nx3t += 1;

          if(nx2t*nx3t > x1cnt) x1cnt = nx2t*nx3t;
        }

/* x2cnt is surface area of x2 faces */
        if(pD->NGrid[1] > 1){
          nx1t = pD->GData[n][m][l].Nx[0];
          if(nx1t > 1) nx1t += 2*nghost;

          nx3t = pD->GData[n][m][l].Nx[2];
          if(nx3t > 1) nx3t += 1;

          if(nx1t*nx3t > x2cnt) x2cnt = nx1t*nx3t;
        }

/* x3cnt is surface area of x3 faces */
        if(pD->NGrid[2] > 1){
          nx1t = pD->GData[n][m][l].Nx[0];
          if(nx1t > 1) nx1t += 2*nghost;

          nx2t = pD->GData[n][m][l].Nx[1];
          if(nx2t > 1) nx2t += 2*nghost;

          if(nx1t*nx2t > x3cnt) x3cnt = nx1t*nx2t;
        }
      }
    }}
#endif /* MPI_PARALLEL */

  }}}  /* End loop over all Domains with active Grids -----------------------*/

#ifdef MPI_PARALLEL
/* Allocate memory for send/receive buffers and MPI_Requests */

  size = x1cnt > x2cnt ? x1cnt : x2cnt;
  size = x3cnt >  size ? x3cnt : size;

  size *= nghost; /* Multiply by the third dimension */

  if (size > 0) {
    if((send_buf = (double**)calloc_2d_array(2,size,sizeof(double))) == NULL)
      ath_error("[bvals_init]: Failed to allocate send buffer\n");

    if((recv_buf = (double**)calloc_2d_array(2,size,sizeof(double))) == NULL)
      ath_error("[bvals_init]: Failed to allocate recv buffer\n");
  }

  if((recv_rq = (MPI_Request*) calloc_1d_array(2,sizeof(MPI_Request))) == NULL)
    ath_error("[bvals_init]: Failed to allocate recv MPI_Request array\n");
  if((send_rq = (MPI_Request*) calloc_1d_array(2,sizeof(MPI_Request))) == NULL)
    ath_error("[bvals_init]: Failed to allocate send MPI_Request array\n");

#endif /* MPI_PARALLEL */

  return;
}
Пример #9
0
int main(int argc, char* argv[])
{
    /* argument variables */
    int f1=0,f2=0,fi=1;
    char *defdir = ".";
    char *inbase = NULL, *postname = NULL;
    char *indir = defdir;
    /* fild variables */
    FILE *fid;
    char fname[100];
    /* data variables */
    int i,*cpuid,*property,ntype;
    long p,n,*pid,*order;
    float header[20],**data,time[2],*typeinfo=NULL;

    /* Read Arguments */
    for (i=1; i<argc; i++) {
        /* If argv[i] is a 2 character string of the form "-?" then: */
        if(*argv[i] == '-'  && *(argv[i]+1) != '\0' && *(argv[i]+2) == '\0') {
            switch(*(argv[i]+1)) {
            case 'd':                                /* -d <indir> */
                indir = argv[++i];
                break;
            case 'i':                                /* -i <basename>   */
                inbase = argv[++i];
                break;
            case 's':                                /* -s <post-name> */
                postname = argv[++i];
                break;
            case 'f':                                /* -f <# range(f1:f2:fi)>*/
                sscanf(argv[++i],"%d:%d:%d",&f1,&f2,&fi);
                if (f2 == 0) f2 = f1;
                break;
            case 'h':                                /* -h */
                usage(argv[0]);
                break;
            default:
                usage(argv[0]);
                break;
            }
        }
    }

    /* Checkpoints */
    if (inbase == NULL)
        sort_error("Please specify input file basename using -i option!\n");

    if (postname == NULL)
        sort_error("Please specify posterior file name using -s option!\n");

    if ((f1>f2) || (f2<0) || (fi<=0))
        sort_error("Wrong number sequence in the -f option!\n");

    /* ====================================================================== */

    for (i=f1; i<=f2; i+=fi)
    {
        fprintf(stderr,"Processing file number %d...\n",i);

        /* Step 1: Read the file */
        sprintf(fname,"%s/%s.%04d.%s.lis",indir,inbase,i,postname);

        fid = fopen(fname,"rb");
        if (fid == NULL)
            sort_error("Fail to open output file %s!\n",fname);

        /* read header */
        fread(header,sizeof(float),12,fid);
        fread(&ntype,sizeof(int),1,fid);

        if (i == f1)
            typeinfo = (float*)calloc_1d_array(ntype,sizeof(float));

        fread(typeinfo,sizeof(float),ntype,fid);
        fread(&time,sizeof(float),2,fid);
        fread(&n,sizeof(long),1,fid);

        data  = (float**)calloc_2d_array(n,7,sizeof(float));
        property = (int*)calloc_1d_array(n,sizeof(int));
        pid   = (long*)calloc_1d_array(n,sizeof(long));
        cpuid = (int*)calloc_1d_array(n,sizeof(int));
        order = (long*)calloc_1d_array(n,sizeof(long));

        /* read data */
        for (p=0; p<n; p++)
        {
            fread(data[p],sizeof(float),7,fid);
            fread(&(property[p]),sizeof(int),1,fid);
            fread(&(pid[p]),sizeof(long),1,fid);
            fread(&(cpuid[p]),sizeof(int),1,fid);
        }

        fclose(fid);

        /* Step 2: sort the particles */
        for (p=0; p<n; p++)
            order[p] = p;

        quicksort(cpuid, pid, order, 0, n-1);

        /* Step 3: write back the ordered particle list */
        fid = fopen(fname,"wb");

        /* write header */
        fwrite(header,sizeof(float),12,fid);
        fwrite(&ntype,sizeof(int),1,fid);
        fwrite(typeinfo,sizeof(float),ntype,fid);
        fwrite(time,sizeof(float),2,fid);
        fwrite(&n,sizeof(long),1,fid);

        /* write data */
        for (p=0; p<n; p++)
        {
            fwrite(data[order[p]],sizeof(float),7,fid);
            fwrite(&property[p],sizeof(int),1,fid);
            fwrite(&pid[order[p]],sizeof(long),1,fid);
            fwrite(&cpuid[order[p]],sizeof(int),1,fid);
        }

        free_2d_array(data);
        free(property);
        free(pid);
        free(cpuid);
        free(order);

        fclose(fid);
    }

    if (typeinfo != NULL)
        free(typeinfo);

    return 0;
}
Пример #10
0
void problem(DomainS *pDomain)
{
  GridS *pGrid = pDomain->Grid;
  int i,j,k,ks,pt,tsmode;
  long p,q;
  Real ScaleHg,tsmin,tsmax,tscrit,amin,amax,Hparmin,Hparmax;
  Real *ep,*ScaleHpar,epsum,mratio,pwind,rhoaconv,etavk;
  Real *epsilon,*uxNSH,*uyNSH,**wxNSH,**wyNSH;
  Real rhog,h,x1,x2,x3,t,x1p,x2p,x3p,zmin,zmax,dx3_1,b;
  long int iseed = myID_Comm_world; /* Initialize on the first call to ran2 */

  if (pDomain->Nx[2] == 1) {
    ath_error("[par_strat3d]: par_strat3d only works for 3D problem.\n");
  }
  
#ifdef MPI_PARALLEL
  if (pDomain->NGrid[2] > 2) {
    ath_error(   
  "[par_strat3d]: The z-domain can not be decomposed into more than 2 grids\n");
  }
#endif

/* Initialize boxsize */
  x1min = pGrid->MinX[0];
  x1max = pGrid->MaxX[0];
  Lx = x1max - x1min;

  x2min = pGrid->MinX[1];
  x2max = pGrid->MaxX[1];
  Ly = x2max - x2min;

  x3min = par_getd("domain1","x3min");
  x3max = par_getd("domain1","x3max");
  Lz = x3max - x3min;

  Lg = nghost*pGrid->dx3; /* size of the ghost zone */

  ks = pGrid->ks;

/* Read initial conditions */
  Omega_0 = par_getd("problem","omega");
  qshear = par_getd_def("problem","qshear",1.5);
  ipert = par_geti_def("problem","ipert",1);
  vsc1 = par_getd_def("problem","vsc1",0.05); /* in unit of iso_sound (N.B.!) */
  vsc2 = par_getd_def("problem","vsc2",0.0);

  vsc1 = vsc1 * Iso_csound;
  vsc2 = vsc2 * Iso_csound;

  ScaleHg = Iso_csound/Omega_0;

  /* particle number */
  Npar  = (long)(par_geti("particle","parnumgrid"));

  pGrid->nparticle = Npar*npartypes;
  for (i=0; i<npartypes; i++)
    grproperty[i].num = Npar;

  if (pGrid->nparticle+2 > pGrid->arrsize)
    particle_realloc(pGrid, pGrid->nparticle+2);

  ep = (Real*)calloc_1d_array(npartypes, sizeof(Real));
  ScaleHpar = (Real*)calloc_1d_array(npartypes, sizeof(Real));

  epsilon = (Real*)calloc_1d_array(npartypes, sizeof(Real));
  wxNSH   = (Real**)calloc_2d_array(pGrid->Nx[2]+1, npartypes,sizeof(Real));
  wyNSH   = (Real**)calloc_2d_array(pGrid->Nx[2]+1, npartypes,sizeof(Real));
  uxNSH   = (Real*)calloc_1d_array(pGrid->Nx[2]+1, sizeof(Real));
  uyNSH   = (Real*)calloc_1d_array(pGrid->Nx[2]+1, sizeof(Real));

  /* particle stopping time */
  tsmode = par_geti("particle","tsmode");
  if (tsmode == 3) {/* fixed stopping time */
    tsmin = par_getd("problem","tsmin"); /* in code unit */
    tsmax = par_getd("problem","tsmax");
    tscrit= par_getd("problem","tscrit");

    for (i=0; i<npartypes; i++) {
      tstop0[i] = tsmin*exp(i*log(tsmax/tsmin)/MAX(npartypes-1,1.0));
      grproperty[i].rad = tstop0[i];
      /* use fully implicit integrator for well coupled particles */
      if (tstop0[i] < tscrit) grproperty[i].integrator = 3;
    }
  }
  else { 
    amin = par_getd("problem","amin");
    amax = par_getd("problem","amax");

    for (i=0; i<npartypes; i++)
      grproperty[i].rad = amin*exp(i*log(amax/amin)/MAX(npartypes-1,1.0));

    if (tsmode <= 2) {/* Epstein/General regime */
      /* conversion factor for rhoa */
      rhoaconv = par_getd_def("problem","rhoaconv",1.0);

      for (i=0; i<npartypes; i++)
        grrhoa[i]=grproperty[i].rad*rhoaconv;
    }

    if (tsmode == 1)  /* General drag formula */
      alamcoeff = par_getd("problem","alamcoeff");
  }

  /* particle scale height */
  Hparmax = par_getd("problem","hparmax"); /* in unit of gas scale height */
  Hparmin = par_getd("problem","hparmin");
  for (i=0; i<npartypes; i++) 
    ScaleHpar[i] = Hparmax*
                   exp(-i*log(Hparmax/Hparmin)/MAX(npartypes-1,1.0));

#ifdef FEEDBACK
  mratio = par_getd_def("problem","mratio",0.0); /* total mass fraction */
  pwind = par_getd_def("problem","pwind",0.0);   /* power law index */
  if (mratio < 0.0)
    ath_error("[par_strat2d]: mratio must be positive!\n");

  epsum = 0.0;
  for (i=0; i<npartypes; i++)
  {
    ep[i] = pow(grproperty[i].rad,pwind);	epsum += ep[i];
  }

  for (i=0; i<npartypes; i++)
  {
    ep[i] = mratio*ep[i]/epsum;
    grproperty[i].m = sqrt(2.0*PI)*ScaleHg/Lz*ep[i]*
                                   pGrid->Nx[0]*pGrid->Nx[1]*pGrid->Nx[2]/Npar;
  }
#else
  mratio = 0.0;
  for (i=0; i<npartypes; i++)
    ep[i] = 0.0;
#endif

  /* NSH equilibrium */
  for (k=pGrid->ks; k<=pGrid->ke+1; k++) {

    h = pGrid->MinX[2] + (k-pGrid->ks)*pGrid->dx3;
    q = k - ks;
    etavk = fabs(vsc1+vsc2*SQR(h));

    for (i=0; i<npartypes; i++) {
      epsilon[i] = ep[i]/ScaleHpar[i]*exp(-0.5*SQR(h/ScaleHg)
         *(SQR(1.0/ScaleHpar[i])-1.0))/erf(Lz/(sqrt(8.0)*ScaleHpar[i]*ScaleHg));

      if (tsmode != 3)
        tstop0[i] = get_ts(pGrid,i,exp(-0.5*SQR(h/ScaleHg)),Iso_csound,etavk);
    }

    MultiNSH(npartypes, tstop0, epsilon, etavk,
                              &uxNSH[q], &uyNSH[q], wxNSH[q], wyNSH[q]);
  }

/* Now set initial conditions for the gas */
  for (k=pGrid->ks; k<=pGrid->ke; k++) {
  for (j=pGrid->js; j<=pGrid->je; j++) {
  for (i=pGrid->is; i<=pGrid->ie; i++) {
    cc_pos(pGrid,i,j,k,&x1,&x2,&x3);

    rhog = exp(-0.5*SQR(x3/ScaleHg));
    pGrid->U[k][j][i].d = rhog;

    if (ipert != 1) {/* NSH velocity */
      pGrid->U[k][j][i].M1 = 0.5*rhog*(uxNSH[k-ks]+uxNSH[k-ks+1]);
      pGrid->U[k][j][i].M2 = 0.5*rhog*(uyNSH[k-ks]+uyNSH[k-ks+1]);
    } else {
      pGrid->U[k][j][i].M1 = 0.0;
      pGrid->U[k][j][i].M2 = 0.0;
    }

    pGrid->U[k][j][i].M3 = 0.0;
#ifndef FARGO
    pGrid->U[k][j][i].M2 -= qshear*rhog*Omega_0*x1;
#endif

  }}}

/* Now set initial conditions for the particles */
  p = 0;
  dx3_1 = 1.0/pGrid->dx3;
  zmin = pGrid->MinX[2];
  zmax = pGrid->MaxX[2];

  for (q=0; q<Npar; q++) {

    for (pt=0; pt<npartypes; pt++) {

      x1p = x1min + Lx*ran2(&iseed);
      x2p = x2min + Ly*ran2(&iseed);
      x3p = ScaleHpar[pt]*ScaleHg*Normal(&iseed);
      while ((x3p >= zmax) || (x3p < zmin))
        x3p = ScaleHpar[pt]*ScaleHg*Normal(&iseed);

      pGrid->particle[p].property = pt;
      pGrid->particle[p].x1 = x1p;
      pGrid->particle[p].x2 = x2p;
      pGrid->particle[p].x3 = x3p;

      if (ipert != 1) {/* NSH velocity */

        cellk(pGrid, x3p, dx3_1, &k, &b);
        k = k-pGrid->ks;  b = b - pGrid->ks;

        pGrid->particle[p].v1 = (k+1-b)*wxNSH[k][pt]+(b-k)*wxNSH[k+1][pt];
        pGrid->particle[p].v2 = (k+1-b)*wyNSH[k][pt]+(b-k)*wyNSH[k+1][pt];

      } else {

        pGrid->particle[p].v1 = 0.0;
        pGrid->particle[p].v2 = vsc1+vsc2*SQR(x2p);

      }

      pGrid->particle[p].v3 = 0.0;
#ifndef FARGO
      pGrid->particle[p].v2 -= qshear*Omega_0*x1p;
#endif

      pGrid->particle[p].pos = 1; /* grid particle */
      pGrid->particle[p].my_id = p;
#ifdef MPI_PARALLEL
      pGrid->particle[p].init_id = myID_Comm_world;
#endif
      p++;
  }}

/* enroll gravitational potential function, shearing sheet BC functions */
  ShearingBoxPot = StratifiedDisk;

  dump_history_enroll(hst_rho_Vx_dVy, "<rho Vx dVy>");

  /* set the # of the particles in list output
   * (by default, output 1 particle per cell)
   */
  nlis = par_geti_def("problem","nlis",pGrid->Nx[0]*pGrid->Nx[1]*pGrid->Nx[2]);

  /* set the number of particles to keep track of */
  ntrack = par_geti_def("problem","ntrack",2000);

  /* set the threshold particle density */
  dpar_thresh = par_geti_def("problem","dpar_thresh",10.0);

  /* finalize */
  free(ep);  free(ScaleHpar);
  free(epsilon);
  free_2d_array(wxNSH);  free_2d_array(wyNSH);
  free(uxNSH);           free(uyNSH);

  return;
}