Beispiel #1
0
void Initialize()
{
		gGetVarStringPtr = &get_var_string;
        srand(time(NULL));

        /* set default solver */
        /* already done in globals.c */
        //    gSolver = NULL;

        /* set default go again */
        gGoAgain=DefaultGoAgain;

        sprintf(gPlayerName[kPlayerOneTurn],"Player");
        sprintf(gPlayerName[kPlayerTwoTurn],"Computer");

        /* generic hash */
        //generic_hash_context_init();

        /* get the DB function table with all pointers to default */
        CreateDatabases();

        /* game-specific variabless */
        InitializeGame();

        /* set the solver */
        SetSolver();
}
Beispiel #2
0
 /// Equivalent to SetSolver(Solver).
 virtual void SetPreconditioner(Solver &solver) { SetSolver(solver); }
Beispiel #3
0
/* ********************************************************************* */
int main (int argc, char *argv[])
/*!
 * Start PLUTO, initialize functions, define data structures and 
 * handle the main integration loop.
 *
 * \param [in] argc Argument counts.
 * \param [in] argv Array of pointers to the strings.
 * \return This function return 0 on normal exit.
 *
 *********************************************************************** */
{
  int    nv, idim, err;
  char   first_step=1, last_step=0;
  double scrh;
  Data   data;
  time_t  tbeg, tend;
  /* AYW -- 2012-06-19 10:18 JST
   * Time difference using difftime */
  double t_elapsed;
#ifdef PARALLEL
  double tbeg_mpi, tend_mpi;
#endif
  //struct tm * ptm;
  /* -- AYW */
  Riemann_Solver *Solver;
  Grid      grd[3];
  Time_Step Dts;
  Cmd_Line cmd_line;
  Input  ini;
  Output *output;

  #ifdef PARALLEL
   AL_Init (&argc, &argv);
   MPI_Comm_rank (MPI_COMM_WORLD, &prank);
  #endif

  Initialize (argc, argv, &data, &ini, grd, &cmd_line);

  double *dbl_pnt;
  int    *int_pnt;
  print1 ("> Basic data type:\n");
  print1 ("  sizeof (char)     = %d\n", sizeof(char));
  print1 ("  sizeof (uchar)    = %d\n", sizeof(unsigned char));
  print1 ("  sizeof (int)      = %d\n", sizeof(int));
  print1 ("  sizeof (*int)     = %d\n", sizeof(int_pnt));
  print1 ("  sizeof (float)    = %d\n", sizeof(float));
  print1 ("  sizeof (double)   = %d\n", sizeof(double));
  print1 ("  sizeof (*double)  = %d\n", sizeof(dbl_pnt));
  
/*
  print1 ("\n> Structure data type:\n");
  print1 ("  sizeof (CMD_LINE)   = %d\n", sizeof(Cmd_Line));
  print1 ("  sizeof (DATA)       = %d\n", sizeof(Data));
  print1 ("  sizeof (STATE_1D)   = %d\n", sizeof(State_1D));
  print1 ("  sizeof (GRID)       = %d\n", sizeof(Grid));
  print1 ("  sizeof (TIME_STEP)  = %d\n", sizeof(Time_Step));
  print1 ("  sizeof (OUTPUT)     = %d\n", sizeof(Output));
  print1 ("  sizeof (INPUT)      = %d\n", sizeof(Input));
  print1 ("  sizeof (RUNTIME)    = %d\n", sizeof(Runtime));
  print1 ("  sizeof (RGB)        = %d\n", sizeof(RGB));
  print1 ("  sizeof (IMAGE)      = %d\n", sizeof(Image));
  print1 ("  sizeof (FLOAT_VECT) = %d\n", sizeof(Float_Vect));
  print1 ("  sizeof (INDEX)      = %d\n", sizeof(Index));
  print1 ("  sizeof (RBOX)       = %d\n", sizeof(RBox));
*/

/* -- initialize members of Time_Step structure -- */

  Dts.cmax     = ARRAY_1D(NMAX_POINT, double);
  Dts.inv_dta  = 0.0;
  Dts.inv_dtp  = 0.0;
  Dts.dt_cool  = 1.e38;
  Dts.cfl      = ini.cfl;
  Dts.cfl_par  = ini.cfl_par;
  Dts.rmax_par = ini.rmax_par;
  Dts.Nsts     = Dts.Nrkc = 0;
  
  Solver = SetSolver (ini.solv_type);

  /* AYW -- 2012-06-26 11:43 JST */
#ifdef PARALLEL  
    if (prank == 0) tbeg_mpi = MPI_Wtime();
    MPI_Bcast(&tbeg_mpi, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
#else
  time (&tbeg);
#endif
  /* -- AYW */

  /* AYW -- 2012-06-26 11:43 JST */
  //ptm = gmtime ( &tbeg );
  //print1("\n> First Timestamp: %2d:%02d:%02d.\n", (ptm->tm_hour)%24, ptm->tm_min, ptm->tm_sec);
  /* -- AYW */

  g_stepNumber = 0;

/* --------------------------------------------------------
    Check if restart is necessary. 
    If not, write initial condition to disk.
   ------------------------------------------------------- */
   
  if (cmd_line.restart == YES) {
    Restart (&ini, cmd_line.nrestart, DBL_OUTPUT, grd);
  }else if (cmd_line.h5restart == YES){
    Restart (&ini, cmd_line.nrestart, DBL_H5_OUTPUT, grd);
  }else if (cmd_line.write){
    /* AYW -- 2013-01-08 18:05 JST 
     * Arguments modified in CheckFor functions to include last_step*/
    CheckForOutput (&data, &ini, grd, last_step);
    CheckForAnalysis (&data, &ini, grd, last_step);
    //CheckForOutput (&data, &ini, grd);
    //CheckForAnalysis (&data, &ini, grd);
    /* -- AYW */

    #ifdef USE_ASYNC_IO
     Async_EndWriteData (&ini);
    #endif
  }

  print1 ("> Starting computation... \n\n");

/* =====================================================================
          M A I N      L O O P      S T A R T S      H E R E
   ===================================================================== */

#ifndef USE_ASYNC_IO  /* -- Standard loop, don't use Asynchrouns I/O -- */

  while (!last_step){

  /* ------------------------------------------------------
      Check if this is the last integration step:
      - final tstop has been reached: adjust time step 
      - or max number of steps has been reached
     ------------------------------------------------------ */

    if ((g_time + g_dt) >= ini.tstop*(1.0 - 1.e-8)) {
      g_dt   = (ini.tstop - g_time);
      last_step = 1;
    }
    if (g_stepNumber == cmd_line.maxsteps && cmd_line.maxsteps > 0) {
      last_step = 1;
    }

  /* ------------------------------------------------------
                Dump log information
     ------------------------------------------------------ */

    if (g_stepNumber%ini.log_freq == 0) {
      print1 ("step:%d ; t = %10.4e ; dt = %10.4e ; %d %% ; [%f, %d",
               g_stepNumber, g_time, g_dt, (int)(100.0*g_time/ini.tstop), 
               g_maxMach, g_maxRiemannIter);
/*      if (g_maxRootIter > 0) print1 (", root it. # = %d",g_maxRootIter);  */
      #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING)
       print1 (", Nsts = %d",Dts.Nsts);
      #endif
      #if (PARABOLIC_FLUX & RK_CHEBYSHEV)
       print1 (", Nrkc = %d",Dts.Nrkc);
      #endif
      print1 ("]\n");      
    }

  /* ------------------------------------------------------
       check if it's time to write or perform analysis
     ------------------------------------------------------ */

    if (!first_step && !last_step && cmd_line.write) {
      /* AYW -- 2013-01-08 18:05 JST 
       * Arguments modified in CheckFor functions to include last_step*/
      CheckForOutput (&data, &ini, grd, last_step);
      CheckForAnalysis (&data, &ini, grd, last_step);
      //CheckForOutput  (&data, &ini, grd);
      //CheckForAnalysis(&data, &ini, grd);
      /* -- AYW */
    }

  /* ------------------------------------------------------
      Advance solution array by a single time step
      g_dt = dt(n)
     ------------------------------------------------------ */

    if (cmd_line.jet != -1) SetJetDomain (&data, cmd_line.jet, ini.log_freq, grd); 
    err = Integrate (&data, Solver, &Dts, grd);
    if (cmd_line.jet != -1) UnsetJetDomain (&data, cmd_line.jet, grd); 

  /* ------------------------------------------------------
       Integration didn't go through. Step must
       be redone from previously saved solution.
     ------------------------------------------------------ */
/*
    if (err != 0){
      print1 ("! Step failed. Re-trying\n");
      zones with problems must be tagged with MINMOD_FLAG and HLL_FLAG
      time step should be halved
      GET_SOL(&data);
    }
*/

  /* ------------------------------------------------------
      Increment time, t(n+1) = t(n) + dt(n)
     ------------------------------------------------------ */

    g_time += g_dt;

  /* ------------------------------------------------------
      Show the time step ratios between the actual g_dt
      and the advection, diffusion and cooling time scales.
     ------------------------------------------------------ */

    #if SHOW_TIME_STEPS == YES
     if (g_stepNumber%ini.log_freq == 0) {
       double cg, dta, dtp, dtc;
       dta = 1.0/Dts.inv_dta;
       dtp = 0.5/Dts.inv_dtp;
       dtc = Dts.dt_cool;
       #ifdef PARALLEL
        MPI_Allreduce (&dta, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
        dta = cg;

        MPI_Allreduce (&dtp, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
        dtp = cg;

        MPI_Allreduce (&dtc, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
        dtc = cg;
       #endif
       /*
   print1 ("[dt/dt(adv) = %10.4e, dt/dt(par) = %10.4e, dt/dt(cool) = %10.4e]\n",
                g_dt/dta, g_dt/dtp, g_dt/dtc);
                */
       print1 ("  dt(adv)  = cfl x %10.4e;\n",dta);
       print1 ("  dt(par)  = cfl x %10.4e;\n",dtp);
       print1 ("  dt(cool) =       %10.4e;\n",dtc);
     }
    #endif

  /* ------------------------------------------------------
   * AYW -- 2013-01-08 15:05 JST
   * Check if wallclock time has been reached. Measure 
   * delta t every timestep.
     ------------------------------------------------------ */

#ifdef PARALLEL  
    if (prank == 0) tend_mpi = MPI_Wtime();
    MPI_Bcast(&tend_mpi, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
#else
    time(&tend);
#endif

#ifdef PARALLEL  
    t_elapsed = tend_mpi - tbeg_mpi;
#else
    t_elapsed = difftime(tend, tbeg);
#endif
    if (t_elapsed >= cmd_line.maxtime && cmd_line.maxtime > 0){
      print1("\n> Max time %f reached.\n", cmd_line.maxtime);
      last_step = 1;
    }
    //else{
    //  ptm = gmtime ( &tend );
    //  print1("\n> Timestamp: %2d:%02d:%02d.\n", (ptm->tm_hour)%24, ptm->tm_min, ptm->tm_sec);
    //  print1("\n> Wall time (s): %f / %f.\n", t_elapsed, cmd_line.maxtime);
    //}
    /* -- AYW */


  /* ------------------------------------------------------
      Get next time step dt(n+1).
      Do it every two steps if cooling or dimensional
      splitting are used.
     ------------------------------------------------------ */

    #if (COOLING == NO) && ((DIMENSIONS == 1) || (DIMENSIONAL_SPLITTING == NO))
      g_dt = NextTimeStep(&Dts, &ini, grd);
    #else
     if (g_stepNumber%2 == 1) g_dt = NextTimeStep(&Dts, &ini, grd);
    #endif

  /* ------------------------------------------------------
          Global MPI reduction operations
     ------------------------------------------------------ */
  
    #ifdef PARALLEL
     MPI_Allreduce (&g_maxMach, &scrh, 1, 
                    MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
     g_maxMach = scrh;

     MPI_Allreduce (&g_maxRiemannIter, &nv, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
     g_maxRiemannIter = nv;
    #endif

    g_stepNumber++;
    
    first_step = 0;
  }

#else /* Use Asynchrounous I/O */

  while (!last_step){

  /* ------------------------------------------------------
      Check if this is the last integration step:
      - final tstop has been reached: adjust time step 
      - or max number of steps has been reached
     ------------------------------------------------------ */

    if ((g_time + g_dt) >= ini.tstop*(1.0 - 1.e-8)) {
      g_dt   = (ini.tstop - g_time);
      last_step = 1;
    }
    if (g_stepNumber == cmd_line.maxsteps && cmd_line.maxsteps > 0) {
      last_step = 1;
    }

  /* ------------------------------------------------------
       check if it's time to write or perform analysis
     ------------------------------------------------------ */

    if (!first_step && !last_step && cmd_line.write) {
      /* AYW -- 2013-01-08 18:05 JST 
       * Arguments modified in CheckFor functions to include last_step*/
      CheckForOutput (&data, &ini, grd, last_step);
      CheckForAnalysis (&data, &ini, grd, last_step);
      //CheckForOutput  (&data, &ini, grd);
      //CheckForAnalysis(&data, &ini, grd);
      /* -- AYW */

      CheckForOutput  (&data, &ini, grd);
      CheckForAnalysis(&data, &ini, grd);
    }

  /* ------------------------------------------------------
      Show the time step ratios between the actual g_dt
      and the advection, diffusion and cooling time scales.
     ------------------------------------------------------ */

    #if SHOW_TIME_STEPS == YES
     if (!first_step && g_stepNumber%ini.log_freq == 0) {
       double cg, dta, dtp, dtc;
       dta = 1.0/Dts.inv_dta;
       dtp = 0.5/Dts.inv_dtp;
       dtc = Dts.dt_cool;
       #ifdef PARALLEL
        MPI_Allreduce (&dta, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
        dta = cg;

        MPI_Allreduce (&dtp, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
        dtp = cg;

        MPI_Allreduce (&dtc, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
        dtc = cg;
       #endif
       print1 ("\t[dt/dta = %10.4e, dt/dtp = %10.4e, dt/dtc = %10.4e \n",
                g_dt/dta, g_dt/dtp, g_dt/dtc);
     }
    #endif

  /* ------------------------------------------------------
          Global MPI reduction operations
     ------------------------------------------------------ */
  
    #ifdef PARALLEL
     MPI_Allreduce (&g_maxMach, &scrh, 1, 
                    MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
     g_maxMach = scrh;

     MPI_Allreduce (&g_maxRiemannIter, &nv, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
     g_maxRiemannIter = nv;
    #endif

  /* ------------------------------------------------------
             Finish writing using Async I/O
     ------------------------------------------------------ */

    #ifdef USE_ASYNC_IO
     Async_EndWriteData (&ini);
    #endif

  /* ------------------------------------------------------
                Dump log information
     ------------------------------------------------------ */

    if (g_stepNumber%ini.log_freq == 0) {
      print1 ("step:%d ; t = %10.4e ; dt = %10.4e ; %d %% ; [%f, %d",
               g_stepNumber, g_time, g_dt, (int)(100.0*g_time/ini.tstop), 
               g_maxMach, g_maxRiemannIter);
      #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING)
       print1 (", Nsts = %d",Dts.Nsts);
      #endif
      #if (PARABOLIC_FLUX & RK_CHEBYSHEV)
       print1 (", Nrkc = %d",Dts.Nrkc);
      #endif
      print1 ("]\n");      
    }
    
  /* ------------------------------------------------------
      Advance solution array by a single time step
      g_dt = dt(n)
     ------------------------------------------------------ */

    if (cmd_line.jet != -1) SetJetDomain (&data, cmd_line.jet, ini.log_freq, grd); 
    err = Integrate (&data, Solver, &Dts, grd);
    if (cmd_line.jet != -1) UnsetJetDomain (&data, cmd_line.jet, grd); 

  /* ------------------------------------------------------
       Integration didn't go through. Step must
       be redone from previously saved solution.
     ------------------------------------------------------ */
/*
    if (err != 0){
      print1 ("! Step failed. Re-trying\n");
      zones with problems must be tagged with MINMOD_FLAG and HLL_FLAG
      time step should be halved
      GET_SOL(&data);
    }
*/
  /* ------------------------------------------------------
      Increment time, t(n+1) = t(n) + dt(n)
     ------------------------------------------------------ */

    g_time += g_dt;

  
  /* ------------------------------------------------------
   * AYW -- 2013-01-08 15:05 JST
   * Check if wallclock time has been reached. Measure 
   * delta t every timestep.
     ------------------------------------------------------ */

#ifdef PARALLEL  
    if (prank == 0) tend_mpi = MPI_Wtime();
    MPI_Bcast(&tend_mpi, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
#else
    time(&tend);
#endif

#ifdef PARALLEL  
    t_elapsed = tend_mpi - tbeg_mpi;
#else
    t_elapsed = difftime(tend, tbeg);
#endif
    if (t_elapsed >= cmd_line.maxtime && cmd_line.maxtime > 0){
      print1("\n> Max time %f reached.\n", cmd_line.maxtime);
      last_step = 1;
    }
    //else{
    //  ptm = gmtime ( &tend );
    //  print1("\n> Timestamp: %2d:%02d:%02d.\n", (ptm->tm_hour)%24, ptm->tm_min, ptm->tm_sec);
    //  print1("\n> Wall time (s): %f / %f.\n", t_elapsed, cmd_line.maxtime);
    //}
    /* -- AYW */


  /* ------------------------------------------------------
                Get next time step dt(n+1)
     ------------------------------------------------------ */

    g_dt = NextTimeStep(&Dts, &ini, grd);
    g_stepNumber++;
    first_step = 0;
  }
#endif /* USE_ASYNC_IO */

/* =====================================================================
          M A I N       L O O P      E N D S       H E R E 
   ===================================================================== */

  if (cmd_line.write){

    /* AYW -- 2013-01-08 18:05 JST 
     * Arguments modified in CheckFor functions to include last_step*/
    CheckForOutput (&data, &ini, grd, last_step);
    CheckForAnalysis (&data, &ini, grd, last_step);
    //CheckForOutput (&data, &ini, grd);
    //CheckForAnalysis (&data, &ini, grd);
    /* -- AYW */

    #ifdef USE_ASYNC_IO
     Async_EndWriteData (&ini);
    #endif
  }

  #ifdef PARALLEL
   MPI_Barrier (MPI_COMM_WORLD);
   print1  ("\n> Total allocated memory  %6.2f Mb (proc #%d)\n",
             (float)g_usedMemory/1.e6,prank);
   MPI_Barrier (MPI_COMM_WORLD);
  #else
   print1  ("\n> Total allocated memory  %6.2f Mb\n",(float)g_usedMemory/1.e6);
  #endif

  time(&tend);
  g_dt = difftime(tend, tbeg);
  print1("> Elapsed time             %s\n", TotalExecutionTime(g_dt));
  print1("> Average time/step       %10.2e  (sec)  \n", 
          difftime(tend,tbeg)/(double)g_stepNumber);
  print1("> Local time                %s",asctime(localtime(&tend)));
  print1("> Done\n");

  FreeArray4D ((void *) data.Vc);
  #ifdef PARALLEL
   MPI_Barrier (MPI_COMM_WORLD);
   AL_Finalize ();
  #endif

  return (0);
}