Ejemplo n.º 1
0
void bvals_grav_fun(DomainS *pD, enum BCDirection dir, VGFun_t prob_bc)
{
  switch(dir){
  case left_x1:
    pD->ix1_GBCFun = prob_bc;
    break;
  case right_x1:
    pD->ox1_GBCFun = prob_bc;
    break;
  case left_x2:
    pD->ix2_GBCFun = prob_bc;
    break;
  case right_x2:
    pD->ox2_GBCFun = prob_bc;
    break;
  case left_x3:
    pD->ix3_GBCFun = prob_bc;
    break;
  case right_x3:
    pD->ox3_GBCFun = prob_bc;
    break;
  default:
    ath_perr(-1,"[bvals_grav_fun]: Unknown direction = %d\n",dir);
    exit(EXIT_FAILURE);
  }
  return;
}
Ejemplo n.º 2
0
char *ath_strdup(const char *in)
{
  char *out = (char *)malloc((1+strlen(in))*sizeof(char));
  if(out == NULL) {
    ath_perr(-1,"ath_strdup: failed to alloc %d\n",(int)(1+strlen(in)));
    return NULL; /* malloc failed */
  }
  return strcpy(out,in);
}
Ejemplo n.º 3
0
void Userwork_after_loop(MeshS *pM)
{
  GridS *pGrid;
  int i=0,j=0,k=0;
  int is,ie,js,je,ks,ke;
  Real rms_error=0.0;
  ConsS error,total_error;
  FILE *fp;
  char *fname;
  int Nx1, Nx2, Nx3, count, min_zones;
#if defined MPI_PARALLEL
  double err[8+(NSCALARS)], tot_err[8+(NSCALARS)];
  int ierr,myID;
#endif
#if (NSCALARS > 0)
  int n;
#endif

  int err_test = par_getd_def("problem","error_test",0);
  if (err_test == 0) return;

  total_error.d = 0.0;
  total_error.M1 = 0.0;
  total_error.M2 = 0.0;
  total_error.M3 = 0.0;
#ifdef MHD
  total_error.B1c = 0.0;
  total_error.B2c = 0.0;
  total_error.B3c = 0.0;
#endif /* MHD */
#ifndef ISOTHERMAL
  total_error.E = 0.0;
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
  for (n=0; n<NSCALARS; n++) total_error.s[n] = 0.0;
#endif

/* Compute error only on root Grid, which is in Domain[0][0] */

  pGrid=pM->Domain[0][0].Grid;
  if (pGrid == NULL) return;

/* compute L1 error in each variable, and rms total error */

  is = pGrid->is; ie = pGrid->ie;
  js = pGrid->js; je = pGrid->je;
  ks = pGrid->ks; ke = pGrid->ke;
  for (k=ks; k<=ke; k++) {
  for (j=js; j<=je; j++) {
    error.d = 0.0;
    error.M1 = 0.0;
    error.M2 = 0.0;
    error.M3 = 0.0;
#ifdef MHD
    error.B1c = 0.0;
    error.B2c = 0.0;
    error.B3c = 0.0;
#endif /* MHD */
#ifndef ISOTHERMAL
    error.E = 0.0;
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
    for (n=0; n<NSCALARS; n++) error.s[n] = 0.0;
#endif

    for (i=is; i<=ie; i++) {
      error.d   += fabs(pGrid->U[k][j][i].d   - RootSoln[k][j][i].d );
      error.M1  += fabs(pGrid->U[k][j][i].M1  - RootSoln[k][j][i].M1);
      error.M2  += fabs(pGrid->U[k][j][i].M2  - RootSoln[k][j][i].M2);
      error.M3  += fabs(pGrid->U[k][j][i].M3  - RootSoln[k][j][i].M3); 
#ifdef MHD
      error.B1c += fabs(pGrid->U[k][j][i].B1c - RootSoln[k][j][i].B1c);
      error.B2c += fabs(pGrid->U[k][j][i].B2c - RootSoln[k][j][i].B2c);
      error.B3c += fabs(pGrid->U[k][j][i].B3c - RootSoln[k][j][i].B3c);
#endif /* MHD */
#ifndef ISOTHERMAL
      error.E   += fabs(pGrid->U[k][j][i].E   - RootSoln[k][j][i].E );
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
      for (n=0; n<NSCALARS; n++)
        error.s[n] += fabs(pGrid->U[k][j][i].s[n] - RootSoln[k][j][i].s[n]);
#endif
    }

    total_error.d += error.d;
    total_error.M1 += error.M1;
    total_error.M2 += error.M2;
    total_error.M3 += error.M3;
#ifdef MHD
    total_error.B1c += error.B1c;
    total_error.B2c += error.B2c;
    total_error.B3c += error.B3c;
#endif /* MHD */
#ifndef ISOTHERMAL
    total_error.E += error.E;
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
    for (n=0; n<NSCALARS; n++) total_error.s[n] += error.s[n];
#endif
  }}

#ifdef MPI_PARALLEL
  Nx1 = pM->Domain[0][0].Nx[0];
  Nx2 = pM->Domain[0][0].Nx[1];
  Nx3 = pM->Domain[0][0].Nx[2];
#else
  Nx1 = ie - is + 1;
  Nx2 = je - js + 1;
  Nx3 = ke - ks + 1;
#endif
  count = Nx1*Nx2*Nx3;

#ifdef MPI_PARALLEL 
/* Now we have to use an All_Reduce to get the total error over all the MPI
 * grids.  Begin by copying the error into the err[] array */

  err[0] = total_error.d;
  err[1] = total_error.M1;
  err[2] = total_error.M2;
  err[3] = total_error.M3;
#ifdef MHD
  err[4] = total_error.B1c;
  err[5] = total_error.B2c;
  err[6] = total_error.B3c;
#endif /* MHD */
#ifndef ISOTHERMAL
  err[7] = total_error.E;
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
  for (n=0; n<NSCALARS; n++) err[8+n] = total_error.s[n];
#endif

  /* Sum up the Computed Error */
  ierr = MPI_Reduce(err,tot_err,(8+(NSCALARS)),MPI_DOUBLE,MPI_SUM,0,
    pM->Domain[0][0].Comm_Domain);

/* If I'm the parent, copy the sum back to the total_error variable */

  ierr = MPI_Comm_rank(pM->Domain[0][0].Comm_Domain, &myID);
  if(myID == 0){ /* I'm the parent */
    total_error.d   = tot_err[0];
    total_error.M1  = tot_err[1];
    total_error.M2  = tot_err[2];
    total_error.M3  = tot_err[3];
#ifdef MHD
    total_error.B1c = tot_err[4];
    total_error.B2c = tot_err[5];
    total_error.B3c = tot_err[6];
#endif /* MHD */
#ifndef ISOTHERMAL
    total_error.E   = tot_err[7];
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
    for (n=0; n<NSCALARS; n++) total_error.s[n] = tot_err.s[8+n];
#endif
  }
  else return; /* The child grids do not do any of the following code */

#endif /* MPI_PARALLEL */

/* Compute RMS error over all variables, and print out */

  rms_error = SQR(total_error.d) + SQR(total_error.M1) + SQR(total_error.M2)
                + SQR(total_error.M3);
#ifdef MHD
  rms_error += SQR(total_error.B1c) + SQR(total_error.B2c) 
               + SQR(total_error.B3c);
#endif /* MHD */
#ifndef ISOTHERMAL
  rms_error += SQR(total_error.E);
#endif /* ISOTHERMAL */
#if (NSCALARS > 0)
  for (n=0; n<NSCALARS; n++) rms_error += SQR(total_error.s[n]);
#endif
  rms_error = sqrt(rms_error)/(double)count;

/* Print warning to stdout if rms_error exceeds estimate of 1st-order conv */
/* For 1D, assume shock propagates along direction with MAX number of zones */

  min_zones = Nx1;
  if (Nx2 > 1) min_zones = MAX(min_zones,Nx2);
  if (Nx3 > 1) min_zones = MAX(min_zones,Nx3);
  if (rms_error > 8.0/min_zones)
    printf("WARNING: rms_error=%e exceeds estimate\n",rms_error);

/* Print error to file "LinWave-errors.#.dat", where #=wave_flag  */

#ifdef MPI_PARALLEL
  fname = "../shock-errors.dat";
#else
  fname = "shock-errors.dat";
#endif

/* The file exists -- reopen the file in append mode */
  if((fp=fopen(fname,"r")) != NULL){
    if((fp = freopen(fname,"a",fp)) == NULL){
      ath_perr(-1,"[Userwork_after_loop]: Unable to reopen file.\n");
      free(fname);
      return;
    }
  }
/* The file does not exist -- open the file in write mode */
  else{
    if((fp = fopen(fname,"w")) == NULL){
      ath_perr(-1,"[Userwork_after_loop]: Unable to open file.\n");
      free(fname);
      return;
    }
/* Now write out some header information */
    fprintf(fp,"# Nx1  Nx2  Nx3  RMS-Error  d  M1  M2  M3");
#ifndef ISOTHERMAL
    fprintf(fp,"  E");
#endif /* ISOTHERMAL */
#ifdef MHD
    fprintf(fp,"  B1c  B2c  B3c");
#endif /* MHD */
#if (NSCALARS > 0)
    for (n=0; n<NSCALARS; n++) {
      fprintf(fp,"  S[ %d ]",n);
    }
#endif
    fprintf(fp,"\n#\n");
  }

  fprintf(fp,"%d  %d  %d  %e",Nx1,Nx2,Nx3,rms_error);

  fprintf(fp,"  %e  %e  %e  %e",
	  (total_error.d/(double)count),
	  (total_error.M1/(double)count),
	  (total_error.M2/(double)count),
	  (total_error.M3/(double)count) );

#ifndef ISOTHERMAL
  fprintf(fp,"  %e",(total_error.E/(double)count) );
#endif /* ISOTHERMAL */

#ifdef MHD
  fprintf(fp,"  %e  %e  %e",
	  (total_error.B1c/(double)count),
	  (total_error.B2c/(double)count),
	  (total_error.B3c/(double)count));
#endif /* MHD */

#if (NSCALARS > 0)
  for (n=0; n<NSCALARS; n++) {
    fprintf(fp,"  %e",total_error.s[n]/(double)count);
  }
#endif

  fprintf(fp,"\n");

  fclose(fp);

  return;
}
void dump_history(MeshS *pM, OutputS *pOut)
{
  GridS *pG;
  DomainS *pD;
  int i,j,k,is,ie,js,je,ks,ke,nl,nd;
  double dVol, scal[NSCAL + NSCALARS + MAX_USR_H_COUNT], d1;
  FILE *pfile;
  char *fname,*plev=NULL,*pdom=NULL,*pdir=NULL,fmt[80];
  char levstr[8],domstr[8],dirstr[20];
  int n, total_hst_cnt, mhst, myID_Comm_Domain=1;
#ifdef MPI_PARALLEL
  double my_scal[NSCAL + NSCALARS + MAX_USR_H_COUNT]; /* My Volume averages */
  int ierr;
#endif
#ifdef CYLINDRICAL
  Real x1,x2,x3;
#endif
#ifdef SPECIAL_RELATIVITY
  PrimS W;
  Real g, g2, g_2;
  Real bx, by, bz, vB, b2, Bmag2;
#endif


  total_hst_cnt = 9 + NSCALARS + usr_hst_cnt;
#ifdef ADIABATIC
  total_hst_cnt++;
#endif
#ifdef MHD
  total_hst_cnt += 3;
#endif
#ifdef SELF_GRAVITY
  total_hst_cnt += 1;
#endif
#ifdef CYLINDRICAL
  total_hst_cnt++;  /* for angular momentum */
#endif
#ifdef SPECIAL_RELATIVITY
  total_hst_cnt = 12 + usr_hst_cnt;
#ifdef MHD
   total_hst_cnt += 6;
#endif
#endif

/* Add a white space to the format */
  if(pOut->dat_fmt == NULL){
    sprintf(fmt," %%14.6e"); /* Use a default format */
  }
  else{
    sprintf(fmt," %s",pOut->dat_fmt);
  }

/* store time and dt in first two elements of output vector */

  scal[0] = pM->time;
  scal[1] = pM->dt;

/* Loop over all Domains in Mesh, and output Grid data */

  for (nl=0; nl<(pM->NLevels); nl++){

    for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){

      if (pM->Domain[nl][nd].Grid != NULL){
        //printf("calculating local sum ... %d\n",myID_Comm_world);
        pG = pM->Domain[nl][nd].Grid;
        pD = (DomainS*)&(pM->Domain[nl][nd]);
        is = pG->is, ie = pG->ie;
        js = pG->js, je = pG->je;
        ks = pG->ks, ke = pG->ke;

        for (i=2; i<total_hst_cnt; i++) {
          scal[i] = 0.0;
        }
 
/* Compute history variables */

        for (k=ks; k<=ke; k++) {
          for (j=js; j<=je; j++) {
            for (i=is; i<=ie; i++) {
              dVol = 1.0; 
              if (pG->dx1 > 0.0) dVol *= pG->dx1;
              if (pG->dx2 > 0.0) dVol *= pG->dx2;
              if (pG->dx3 > 0.0) dVol *= pG->dx3;
#ifndef SPECIAL_RELATIVITY
#ifdef CYLINDRICAL
              cc_pos(pG,i,j,k,&x1,&x2,&x3);
              dVol *= x1;
#endif

              mhst = 2;
              scal[mhst] += dVol*pG->U[k][j][i].d;
              d1 = 1.0/pG->U[k][j][i].d;
#ifndef BAROTROPIC
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].E;
#endif
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].M1;
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].M2;
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].M3;
              mhst++;
              scal[mhst] += dVol*0.5*SQR(pG->U[k][j][i].M1)*d1;
              mhst++;
              scal[mhst] += dVol*0.5*SQR(pG->U[k][j][i].M2)*d1;
              mhst++;
              scal[mhst] += dVol*0.5*SQR(pG->U[k][j][i].M3)*d1;
#ifdef MHD
              mhst++;
              scal[mhst] += dVol*0.5*SQR(pG->U[k][j][i].B1c);
              mhst++;
              scal[mhst] += dVol*0.5*SQR(pG->U[k][j][i].B2c);
              mhst++;
              scal[mhst] += dVol*0.5*SQR(pG->U[k][j][i].B3c);
#endif
#ifdef SELF_GRAVITY
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].d*pG->Phi[k][j][i];
#endif
#if (NSCALARS > 0)
              for(n=0; n<NSCALARS; n++){
                mhst++;
                scal[mhst] += dVol*pG->U[k][j][i].s[n];
              }
#endif

#ifdef CYLINDRICAL
              mhst++;
              scal[mhst] += dVol*(x1*pG->U[k][j][i].M2);
#endif

#else /* SPECIAL_RELATIVITY */

              W = Cons_to_Prim (&(pG->U[k][j][i]));
        
              /* calculate gamma */
              g   = pG->U[k][j][i].d/W.d;
              g2  = SQR(g);
              g_2 = 1.0/g2;

              mhst = 2;
              scal[mhst] += dVol*pG->U[k][j][i].d;
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].E;
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].M1;
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].M2;
              mhst++;
              scal[mhst] += dVol*pG->U[k][j][i].M3;

              mhst++;
              scal[mhst] += dVol*SQR(g);
              mhst++;
              scal[mhst] += dVol*SQR(g*W.V1);
              mhst++;
              scal[mhst] += dVol*SQR(g*W.V2);
              mhst++;
              scal[mhst] += dVol*SQR(g*W.V3);

	      mhst++;
	      scal[mhst] += dVol*W.P;

#ifdef MHD

              vB = W.V1*pG->U[k][j][i].B1c + W.V2*W.B2c + W.V3*W.B3c;
              Bmag2 = SQR(pG->U[k][j][i].B1c) + SQR(W.B2c) + SQR(W.B3c);
        
              bx = g*(pG->U[k][j][i].B1c*g_2 + vB*W.V1);
              by = g*(W.B2c*g_2 + vB*W.V2);
              bz = g*(W.B3c*g_2 + vB*W.V3);
        
              b2 = Bmag2*g_2 + vB*vB;

              mhst++;
              scal[mhst] += dVol*(g*vB*g*vB);
              mhst++;
              scal[mhst] += dVol*bx*bx;
              mhst++;
              scal[mhst] += dVol*by*by;
              mhst++;
              scal[mhst] += dVol*bz*bz;
              mhst++;
              scal[mhst] += dVol*b2;
              mhst++;
              scal[mhst] += dVol*(Bmag2*(1.0 - 0.5*g_2) - SQR(vB) / 2.0);

#endif /* MHD */

#endif  /* SPECIAL_RELATIVITY */

/* Calculate the user defined history variables */
              for(n=0; n<usr_hst_cnt; n++){
                mhst++;
                scal[mhst] += dVol*(*phst_fun[n])(pG, i, j, k);
              }
            }
          }
        }

/* Compute the sum over all Grids in Domain */
        //printf("calculating global sum ... %d\n",myID_Comm_world);

#ifdef MPI_PARALLEL 
        for(i=2; i<total_hst_cnt; i++){
          my_scal[i] = scal[i];
        }
        ierr = MPI_Reduce(&(my_scal[2]), &(scal[2]), (total_hst_cnt - 2),
          MPI_DOUBLE, MPI_SUM, 0, pD->Comm_Domain);
#endif

/* Only the parent (rank=0) process computes the average and writes output.
 * For single-processor jobs, myID_Comm_world is always zero. */

#ifdef MPI_PARALLEL
        ierr = MPI_Comm_rank(pD->Comm_Domain, &myID_Comm_Domain);
#endif
        if((myID_Comm_Domain==0) || (myID_Comm_world==0)){  /* I'm the parent */

/* Compute volume averages */
//          printf("dump history ... %d\n",myID_Comm_world);

          dVol = pD->MaxX[0] - pD->MinX[0];
#ifdef CYLINDRICAL
          dVol = 0.5*(SQR(pD->MaxX[0]) - SQR(pD->MinX[0]));
#endif
          if (pD->Nx[1] > 1) dVol *= (pD->MaxX[1] - pD->MinX[1]);
          if (pD->Nx[2] > 1) dVol *= (pD->MaxX[2] - pD->MinX[2]);
          for(i=2; i<total_hst_cnt; i++){
            scal[i] /= dVol;
          }

/* Create filename and open file.  History files are always written in lev#
 * directories of root process (rank=0 in MPI_COMM_WORLD) */
#ifdef MPI_PARALLEL
          if (nl>0) {
            plev = &levstr[0];
            sprintf(plev,"lev%d",nl);
            pdir = &dirstr[0];
            sprintf(pdir,"../id0/lev%d",nl);
          }
#else
          if (nl>0) {
            plev = &levstr[0];
            sprintf(plev,"lev%d",nl);
            pdir = &dirstr[0];
            sprintf(pdir,"lev%d",nl);
          }
#endif

          if (nd>0) {
            pdom = &domstr[0];
            sprintf(pdom,"dom%d",nd);
          }

          fname = ath_fname(pdir,pM->outfilename,plev,pdom,0,0,NULL,"hst");
          if(fname == NULL){
            ath_perr(-1,"[dump_history]: Unable to create history filename\n");
          }
          if(pOut->num == 0) pfile = fopen(fname,"w"); 
          else pfile = fopen(fname,"a");
          if(pfile == NULL){
            ath_perr(-1,"[dump_history]: Unable to open the history file\n");
          }
          free(fname);

/* Write out column headers, but only for first dump */

          mhst = 0;
          if(pOut->num == 0){
            fprintf(pfile,
         "# Athena history dump for level=%i domain=%i volume=%e\n",nl,nd,dVol);
            mhst++;
            fprintf(pfile,"#   [%i]=time   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=dt      ",mhst);
#ifndef SPECIAL_RELATIVITY
            mhst++;
            fprintf(pfile,"   [%i]=mass    ",mhst);
#ifdef ADIABATIC
            mhst++;
            fprintf(pfile,"   [%i]=total E ",mhst);
#endif
            mhst++;
            fprintf(pfile,"   [%i]=x1 Mom. ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x2 Mom. ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x3 Mom. ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x1-KE   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x2-KE   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x3-KE   ",mhst);
#ifdef MHD
            mhst++;
            fprintf(pfile,"   [%i]=x1-ME   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x2-ME   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x3-ME   ",mhst);
#endif
#ifdef SELF_GRAVITY
            mhst++;
            fprintf(pfile,"   [%i]=grav PE ",mhst);
#endif
#if (NSCALARS > 0)
            for(n=0; n<NSCALARS; n++){
              mhst++;
              fprintf(pfile,"  [%i]=scalar %i",mhst,n);
            }
#endif

#ifdef CYLINDRICAL
            mhst++;
            fprintf(pfile,"   [%i]=Ang.Mom.",mhst);
#endif

#else /* SPECIAL_RELATIVITY */
            mhst++;
            fprintf(pfile,"   [%i]=mass    ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=total E ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x1 Mom. ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x2 Mom. ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x3 Mom." ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=Gamma   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x1-KE   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x2-KE   ",mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x3-KE  " ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=Press  " ,mhst);
#ifdef MHD
            mhst++;
            fprintf(pfile,"   [%i]=x0-ME  " ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x1-ME  " ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x2-ME  " ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=x3-ME  " ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=bsq    " ,mhst);
            mhst++;
            fprintf(pfile,"   [%i]=T^00_EM" ,mhst);
#endif
#endif /* SPECIAL_RELATIVITY */

            for(n=0; n<usr_hst_cnt; n++){
              mhst++;
              fprintf(pfile,"  [%i]=%s",mhst,usr_label[n]);
            }
            fprintf(pfile,"\n#\n");
          }

/* Write out data, and close file */

          for (i=0; i<total_hst_cnt; i++) {
           //printf("dump history data %d ... %d\n",i,myID_Comm_world);
            fprintf(pfile,fmt,scal[i]);
          }
          fprintf(pfile,"\n");
          fclose(pfile);
  
        }
      }
    }
  }

  return;
}
Ejemplo n.º 5
0
void init_output(MeshS *pM)
{
  int i,j,outn,maxout;
  char block[80], *fmt, defid[10];
  OutputS new_out;
  int usr_expr_flag;

  maxout = par_geti_def("job","maxout",MAXOUT_DEFAULT);

/* allocate output array */

  if((OutArray = (OutputS *)malloc(maxout*sizeof(OutputS))) == NULL){
    ath_error("[init_output]: Error allocating output array\n");
  }

/*--- loop over maxout output blocks, reading parameters into a temporary -----*
 *--- OutputS called new_out --------------------------------------------------*/

  for (outn=1; outn<=maxout; outn++) {

    sprintf(block,"output%d",outn);

/* An output format or output name is required.
 * If neither is present we write an error message and move on. */
    if((par_exist(block,"out_fmt") == 0) && (par_exist(block,"name") == 0)){
      ath_perr(-1,"[init_output]: neither %s/out_fmt, nor %s/name exist\n",
	       block, block);
      continue;
    }

/* Zero (NULL) all members of the temporary OutputS structure "new_out" */
    memset(&new_out,0,sizeof(OutputS));

/* The next output time and number */
    new_out.t   = par_getd_def(block,"time",pM->time);
    new_out.num = par_geti_def(block,"num",0);

    new_out.dt  = par_getd(block,"dt");
    new_out.n   = outn;

/* level and domain number can be specified with SMR  */
    new_out.nlevel = par_geti_def(block,"level",-1);
    new_out.ndomain = par_geti_def(block,"domain",-1);

    if (par_exist(block,"dat_fmt")) new_out.dat_fmt = par_gets(block,"dat_fmt");

/* set id in output filename to input string if present, otherwise use "outN"
 * as default, where N is output number */
    sprintf(defid,"out%d",outn);
    new_out.id = par_gets_def(block,"id",defid);

    if(par_exist(block,"out_fmt")) 
      fmt = new_out.out_fmt = par_gets(block,"out_fmt");

/* out:     controls what variable can be output (all, prim, or any of expr_*)
 * out_fmt: controls format of output (single variable) or dump (all cons/prim)
 * if "out" doesn't exist, we assume 'cons' variables are meant to be dumped */

    new_out.out = par_gets_def(block,"out","cons");

#ifdef PARTICLES
    /* check input for particle binning (=1, default) or not (=0) */
    new_out.out_pargrid = par_geti_def(block,"pargrid",
                                       check_particle_binning(new_out.out));
    if ((new_out.out_pargrid < 0) || (new_out.out_pargrid >1)) {
      ath_perr(-1,"[init_output]: %s/pargrid must be 0 or 1\n", block);
      continue;
    }

/* set particle property selection function. By default, will select all the
 * particles. Used only when particle output is called, otherwise useless. */
    if(par_exist(block,"par_prop")) {
      new_out.par_prop = get_usr_par_prop(par_gets(block,"par_prop"));
      if (new_out.par_prop == NULL) {
        ath_pout(0,"[init_output]: Particle selection function not found! \
Now use the default one.\n");
        new_out.par_prop = property_all;
      }
    }
Ejemplo n.º 6
0
void fluxes(const Cons1DS Ul, const Cons1DS Ur,
            const Prim1DS Wl, const Prim1DS Wr,
            const Real Bxi, Cons1DS *pFlux)
{
  Real sqrtdl,sqrtdr,isdlpdr,droe,v1roe,v2roe,v3roe;
#ifndef BAROTROPIC
  Real hroe;
#endif
  Real ev[NWAVE];
  Real *pFl, *pFr, *pF;
  Cons1DS Fl,Fr;
  int n;
  Real cfl,cfr,bp,bm,tmp;
  Real al,ar; /* Min and Max wave speeds */
  Real am,cp; /* Contact wave speed and pressure */
  Real tl,tr,dl,dr,sl,sm,sr;

/*--- Step 1. ------------------------------------------------------------------
 * Convert left- and right- states in conserved to primitive variables.
 */
/*
  pbl = Cons1D_to_Prim1D(&Ul,&Wl,&Bxi);
  pbr = Cons1D_to_Prim1D(&Ur,&Wr,&Bxi);
*/

/*--- Step 2. ------------------------------------------------------------------
 * Compute Roe-averaged data from left- and right-states
 */

  sqrtdl = sqrt((double)Wl.d);
  sqrtdr = sqrt((double)Wr.d);
  isdlpdr = 1.0/(sqrtdl + sqrtdr);

  droe  = sqrtdl*sqrtdr;
  v1roe = (sqrtdl*Wl.Vx + sqrtdr*Wr.Vx)*isdlpdr;
  v2roe = (sqrtdl*Wl.Vy + sqrtdr*Wr.Vy)*isdlpdr;
  v3roe = (sqrtdl*Wl.Vz + sqrtdr*Wr.Vz)*isdlpdr;

/*
 * Following Roe(1981), the enthalpy H=(E+P)/d is averaged for adiabatic flows,
 * rather than E or P directly.  sqrtdl*hl = sqrtdl*(el+pl)/dl = (el+pl)/sqrtdl
 */

#ifndef ISOTHERMAL
  hroe = ((Ul.E + Wl.P)/sqrtdl + (Ur.E + Wr.P)/sqrtdr)*isdlpdr;
#endif

/*--- Step 3. ------------------------------------------------------------------
 * Compute eigenvalues using Roe-averaged values
 */

#ifdef ISOTHERMAL
  esys_roe_iso_hyd(v1roe, v2roe, v3roe, ev, NULL, NULL);
#else
  esys_roe_adb_hyd(v1roe, v2roe, v3roe, hroe, ev, NULL, NULL);
#endif /* ISOTHERMAL */

/*--- Step 4. ------------------------------------------------------------------
 * Compute the max and min wave speeds
 */

#ifdef ISOTHERMAL
  cfl = cfr = Iso_csound;
#else
  cfl = sqrt((double)(Gamma*Wl.P/Wl.d));
  cfr = sqrt((double)(Gamma*Wr.P/Wr.d));
#endif

  ar = MAX(ev[NWAVE-1],(Wr.Vx + cfr));
  al = MIN(ev[0]      ,(Wl.Vx - cfl));

  bp = ar > 0.0 ? ar : 0.0;
  bm = al < 0.0 ? al : 0.0;

/*--- Step 5. ------------------------------------------------------------------
 * Compute the contact wave speed and Pressure
 */

#ifdef ISOTHERMAL
  tl = Wl.d*Iso_csound2 + (Wl.Vx - al)*Ul.Mx;
  tr = Wr.d*Iso_csound2 + (Wr.Vx - ar)*Ur.Mx;
#else
  tl = Wl.P + (Wl.Vx - al)*Ul.Mx;
  tr = Wr.P + (Wr.Vx - ar)*Ur.Mx;
#endif

  dl =   Ul.Mx - Ul.d*al;
  dr = -(Ur.Mx - Ur.d*ar);

  tmp = 1.0/(dl + dr);
/* Determine the contact wave speed... */
  am = (tl - tr)*tmp;
/* ...and the pressure at the contact surface */
  cp = (dl*tr + dr*tl)*tmp;
  if(cp < 0.0) ath_perr(1,"[hllc flux]: Contact Pressure = %g\n",cp);
  cp = cp > 0.0 ? cp : 0.0;

/*--- Step 6. ------------------------------------------------------------------
 * Compute L/R fluxes along the line bm, bp
 */

  Fl.d  = Ul.Mx - bm*Ul.d;
  Fr.d  = Ur.Mx - bp*Ur.d;

  Fl.Mx = Ul.Mx*(Wl.Vx - bm);
  Fr.Mx = Ur.Mx*(Wr.Vx - bp);

  Fl.My = Ul.My*(Wl.Vx - bm);
  Fr.My = Ur.My*(Wr.Vx - bp);

  Fl.Mz = Ul.Mz*(Wl.Vx - bm);
  Fr.Mz = Ur.Mz*(Wr.Vx - bp);

#ifdef ISOTHERMAL
  Fl.Mx += Wl.d*Iso_csound2;
  Fr.Mx += Wr.d*Iso_csound2;
#else
  Fl.Mx += Wl.P;
  Fr.Mx += Wr.P;

  Fl.E  = Ul.E*(Wl.Vx - bm) + Wl.P*Wl.Vx;
  Fr.E  = Ur.E*(Wr.Vx - bp) + Wr.P*Wr.Vx;
#endif /* ISOTHERMAL */

#if (NSCALARS > 0)
  for (n=0; n<NSCALARS; n++) {
    Fl.s[n] = Fl.d*Wl.r[n];
    Fr.s[n] = Fr.d*Wr.r[n];
  }
#endif

/*--- Step 7. ------------------------------------------------------------------
 * Compute flux weights or scales
 */

  if (am >= 0.0) {
    sl =  am/(am - bm);
    sr = 0.0;
    sm = -bm/(am - bm);
  }
  else {
    sl =  0.0;
    sr = -am/(bp - am);
    sm =  bp/(bp - am);
  }

/*--- Step 8. ------------------------------------------------------------------
 * Compute the HLLC flux at interface
 */
  pFl = (Real *)&(Fl);
  pFr = (Real *)&(Fr);
  pF  = (Real *)pFlux;
  for (n=0; n<NWAVE; n++) pF[n] = sl*pFl[n] + sr*pFr[n];

/* Add the weighted contribution of the flux along the contact */
  pFlux->Mx += sm*cp;
#ifndef ISOTHERMAL
  pFlux->E  += sm*cp*am;
#endif /* ISOTHERMAL */

/* Fluxes of passively advected scalars, computed from density flux */
#if (NSCALARS > 0)
  if (pFlux->d >= 0.0) {
    for (n=0; n<NSCALARS; n++) pFlux->s[n] = pFlux->d*Wl.r[n];
  } else {
    for (n=0; n<NSCALARS; n++) pFlux->s[n] = pFlux->d*Wr.r[n];
  }
#endif

#ifdef CYLINDRICAL
  if (al > 0.0) {
#ifndef ISOTHERMAL
    pFlux->Pflux = Wl.P;
#else /* ISOTHERMAL */
    pFlux->Pflux = Wl.d*Iso_csound2;
#endif /* ISOTHERMAL */
  }
  else if (ar < 0.0) {
#ifndef ISOTHERMAL
    pFlux->Pflux = Wr.P;
#else /* ISOTHERMAL */
    pFlux->Pflux = Wr.d*Iso_csound2;
#endif /* ISOTHERMAL */
  }
  else {
#ifndef ISOTHERMAL
    pFlux->Pflux = cp;
#else /* ISOTHERMAL */
    if (am >= 0.0) {
      pFlux->Pflux = Wl.d*(al-Wl.Vx)/(al-am);
    }
    else {
      pFlux->Pflux = Wr.d*(ar-Wr.Vx)/(ar-am);
    }
#endif /* ISOTHERMAL */
  }
#endif /* CYLINDRICAL */

  return;
}