示例#1
0
  // Prijmuti a zpracovani zpravy behem aktivni prace procesu
  // Vraci true, pokud nektery proces poslal info o nalezeni nejlepsiho mozneho 
  // vysledku = nema smysl dal pracovat
  bool activeRecv(stack<string>& workStack) {
    string recvMsg;
    
    // Prisla nejaka zprava?
    boost::optional<mpi::status> inc = com.iprobe();

    // Neprisla, zpatky do prace!
    if (!inc) {
      return false;
    }
       
    mpi::status status = *inc;

    // Prisel token, zatim ulozim a dal zpracuji az po dokonceni vlastni prace
    if (status.tag() == TOKEN) {
      bool isWhiteToken;
      //cout << "active: recv token from " << status.source() << endl;
      com.recv(status.source(), status.tag(), isWhiteToken);
      if (isWhiteToken) {
        token = WHITE_TOKEN;
      } else {
        token = BLACK_TOKEN;
      }
    } 
    
    // Prisel pozadavek o praci
    else if (status.tag() == WORK_REQUEST) {
      //cout << "active: recv work_req from " << status.source() << endl;
      com.recv(status.source(), status.tag());
      handleWorkRequest(status.source(), workStack);
    }
    
    // Prislo oznameni o nalezeni nejake podsekvence
    // Pokud je delsi nez moje nejlepsi nalezena, ulozim si ji
    else if (status.tag() == FOUND) {
      cout << "active: recv found from " << status.source() << endl;
      com.recv(status.source(), status.tag(), recvMsg);
      if (recvMsg.length() > myLongest.length()) {
        myLongest = recvMsg;
      }
    }
        
    // Nektery proces uz nasel nejlepsi mozny vysledek
    else if (status.tag() == FOUND_BEST) {
      cout << "active: recv found best from " << status.source() << endl;
      com.recv(status.source(), status.tag(), myLongest);
      return true;
    } 
    
    // Prisla zprava, kterou prave neumim zpracovat, vyzvednu a zahodim
    else {
      //cout << "active: recv from " << status.source() << endl;
      com.recv(status.source(), status.tag());
    }
    
    return false;
  }
示例#2
0
文件: mccrun.cpp 项目: liangjj/hVMC
MCCResults mccrun_master(
  const Options& opts, const Eigen::VectorXd& vpar, unsigned int num_bins,
  const set<observables_t>& obs, const mpi::communicator& mpicomm )
{
  cout << "========== NEW MONTE CARLO CYCLE ==========" << endl;
  cout << ":: Preparing the simulation" << endl;

  HubbardModelVMC model = prepare_model( opts, vpar, mpicomm );

  vector< unique_ptr<Observable> > obscalc = prepare_obscalcs( obs, opts );
  ObservableCache obscache;

  unsigned int finished_workers = 0;
  unsigned int scheduled_bins = 0;
  unsigned int completed_bins = 0;
  unsigned int enqueued_bins  = num_bins;

  // define procedure to query the slaves for new work requests
  function<void()> mpiquery_work_requests( [&]() {
    while ( boost::optional<mpi::status> status
            = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_REQUEST_BINS ) ) {
      // receive the request and hand out new bins to the source
      mpicomm.recv( status->source(), MSGTAG_S_M_REQUEST_BINS );
      if ( enqueued_bins > 0 ) {
        mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 1 );
        scheduled_bins += 1;
        enqueued_bins  -= 1;
      } else {
        mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 0 );
        ++finished_workers;
      }
    }
  } );

  // define procedure to query the slaves for finished work
  function<void()> mpiquery_finished_work( [&]() {
    while ( boost::optional<mpi::status> status
            = mpicomm.iprobe( mpi::any_source, 2 ) ) {
      mpicomm.recv( status->source(), 2 );
      --scheduled_bins;
      ++completed_bins;
    }
  } );

  cout << ":: Equilibrating the system" << endl;

  for (
    unsigned int mcs = 0;
    mcs < opts["calc.num-mcs-equil"].as<unsigned int>();
    ++mcs ) {
    // take care of the slaves
    mpiquery_finished_work();
    mpiquery_work_requests();

    // perform a Monte Carlo step
    model.mcs();
  }

  unsigned int completed_bins_master = 0;

  cout << ":: Performing Monte Carlo cycle" << endl;
  cout << endl;
  cout << "   Progress:" << endl;

  while ( enqueued_bins > 0 ) {

    cout << '\r' << "     Bin "
         << completed_bins << "/" << num_bins;
    cout.flush();

    --enqueued_bins;
    ++scheduled_bins;

    for (
      unsigned int mcs = 0;
      mcs < opts["calc.num-binmcs"].as<unsigned int>();
      ++mcs ) {
      // take care of the slaves
      mpiquery_finished_work();
      mpiquery_work_requests();

      // perform a Monte Carlo step
      model.mcs();

      // measure observables
      for ( const unique_ptr<Observable>& o : obscalc ) {
        o->measure( model, obscache );
      }
      obscache.clear();
    }

    // tell the observables that a bin has been completed
    for ( const unique_ptr<Observable>& o : obscalc ) {
      o->completebin();
    }

    --scheduled_bins;
    ++completed_bins_master;
    ++completed_bins;
  }
  ++finished_workers;

  while ( completed_bins != num_bins ||
          static_cast<int>( finished_workers ) < mpicomm.size() ) {
    if ( boost::optional<mpi::status> status
         = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_FINISHED_BINS ) ) {
      mpicomm.recv( status->source(), MSGTAG_S_M_FINISHED_BINS );
      --scheduled_bins;
      ++completed_bins;

      cout << '\r' << "     Bin " << completed_bins << "/" << num_bins;
      cout.flush();
    }

    if ( boost::optional<mpi::status> status
         = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_REQUEST_BINS ) ) {
      // receive the request for more work
      mpicomm.recv( status->source(), MSGTAG_S_M_REQUEST_BINS );
      // tell him there is no more work
      mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 0 );
      ++finished_workers;
    }
  }
  assert( enqueued_bins == 0 );
  assert( scheduled_bins == 0 );

  cout << '\r' << "     Bin " << completed_bins << "/" << num_bins << endl;
  cout.flush();

  // check for floating point precision problems

  cout << endl;
  cout << "   Floating point precision control" << endl;

  vector<FPDevStat> W_devstats;
  assert( mpicomm.rank() == 0 );
  mpi::gather( mpicomm, model.get_W_devstat(), W_devstats, 0 );
  FPDevStat W_devstat_combined =
    accumulate(
      W_devstats.begin(), W_devstats.end(),
      FPDevStat( opts["fpctrl.W-deviation-target"].as<double>() )
    );
  cout << "     W: " << W_devstat_combined.recalcs
       << "/" << W_devstat_combined.misses
       << "/" << W_devstat_combined.mag1_misses << endl;

  vector<FPDevStat> T_devstats;
  assert( mpicomm.rank() == 0 );
  mpi::gather( mpicomm, model.get_T_devstat(), T_devstats, 0 );
  FPDevStat T_devstat_combined =
    accumulate(
      T_devstats.begin(), T_devstats.end(),
      FPDevStat( opts["fpctrl.T-deviation-target"].as<double>() )
    );
  cout << "     T: " << T_devstat_combined.recalcs
       << "/" << T_devstat_combined.misses
       << "/" << T_devstat_combined.mag1_misses << endl;

  if ( W_devstat_combined.mag1_misses > 0 ||
       T_devstat_combined.mag1_misses > 0 ) {
    cout << "   Precision targets missed by more than an order of magnitude!" << endl
         << "   WARNING: Your results might be unreliable!!!" << endl << endl;
  } else if ( W_devstat_combined.misses > 0 ||
              T_devstat_combined.misses > 0 ) {
    cout << "   Some precision targets were missed, but your results should be fine."
         << endl << endl;
  } else {
    cout << "   No missed precision targets." << endl << endl;
  }

  // collect results from the slaves and return results the scheduler
  MCCResults results;
  for ( const unique_ptr<Observable>& o : obscalc ) {
    o->collect_and_write_results( mpicomm, results );
  }
  results.success = true;
  return results;
}