Exemplo n.º 1
0
  // Prijmuti a zpracovani zpravy behem aktivni prace procesu
  // Vraci true, pokud nektery proces poslal info o nalezeni nejlepsiho mozneho 
  // vysledku = nema smysl dal pracovat
  bool activeRecv(stack<string>& workStack) {
    string recvMsg;
    
    // Prisla nejaka zprava?
    boost::optional<mpi::status> inc = com.iprobe();

    // Neprisla, zpatky do prace!
    if (!inc) {
      return false;
    }
       
    mpi::status status = *inc;

    // Prisel token, zatim ulozim a dal zpracuji az po dokonceni vlastni prace
    if (status.tag() == TOKEN) {
      bool isWhiteToken;
      //cout << "active: recv token from " << status.source() << endl;
      com.recv(status.source(), status.tag(), isWhiteToken);
      if (isWhiteToken) {
        token = WHITE_TOKEN;
      } else {
        token = BLACK_TOKEN;
      }
    } 
    
    // Prisel pozadavek o praci
    else if (status.tag() == WORK_REQUEST) {
      //cout << "active: recv work_req from " << status.source() << endl;
      com.recv(status.source(), status.tag());
      handleWorkRequest(status.source(), workStack);
    }
    
    // Prislo oznameni o nalezeni nejake podsekvence
    // Pokud je delsi nez moje nejlepsi nalezena, ulozim si ji
    else if (status.tag() == FOUND) {
      cout << "active: recv found from " << status.source() << endl;
      com.recv(status.source(), status.tag(), recvMsg);
      if (recvMsg.length() > myLongest.length()) {
        myLongest = recvMsg;
      }
    }
        
    // Nektery proces uz nasel nejlepsi mozny vysledek
    else if (status.tag() == FOUND_BEST) {
      cout << "active: recv found best from " << status.source() << endl;
      com.recv(status.source(), status.tag(), myLongest);
      return true;
    } 
    
    // Prisla zprava, kterou prave neumim zpracovat, vyzvednu a zahodim
    else {
      //cout << "active: recv from " << status.source() << endl;
      com.recv(status.source(), status.tag());
    }
    
    return false;
  }
Exemplo n.º 2
0
void collect_data(mpi::communicator local, mpi::communicator world)
{
  // The rank of the collector within the world communicator
  int master_collector = world.size() - local.size();

  if (world.rank() == master_collector) {
    while (true) {
      // Wait for a message
      mpi::status msg = world.probe();
      if (msg.tag() == msg_data_packet) {
        // Receive the packet of data
        std::vector<int> data;
        world.recv(msg.source(), msg.tag(), data);

        // Tell each of the collectors that we'll be broadcasting some data
        for (int dest = 1; dest < local.size(); ++dest)
          local.send(dest, msg_broadcast_data, msg.source());

        // Broadcast the actual data.
        broadcast(local, data, 0);
      } else if (msg.tag() == msg_finished) {
        // Receive the message
        world.recv(msg.source(), msg.tag());

        // Tell each of the collectors that we're finished
        for (int dest = 1; dest < local.size(); ++dest)
          local.send(dest, msg_finished);

        break;
      }
    }
  } else {
    while (true) {
      // Wait for a message from the master collector
      mpi::status msg = local.probe();
      if (msg.tag() == msg_broadcast_data) {
        // Receive the broadcast message
        int originator;
        local.recv(msg.source(), msg.tag(), originator);

        // Receive the data broadcasted from the master collector
        std::vector<int> data;
        broadcast(local, data, 0);

        std::cout << "Collector #" << local.rank()
                  << " is processing data from generator #" << originator
                  << "." << std::endl;
      } else if (msg.tag() == msg_finished) {
        // Receive the message
        local.recv(msg.source(), msg.tag());

        break;
      }
    }
  }
}
Exemplo n.º 3
0
void bi::MarginalSISHandler<B,A,S>::handleAdapterSamples(
    boost::mpi::communicator child, boost::mpi::status status) {
  typedef typename temp_host_matrix<real>::type matrix_type;

  static const int N = B::NP;

  /* add samples */
  boost::optional<int> n = status.template count<real>();
  if (n) {
    matrix_type Z(N + T, *n / (N + T));
    child.recv(status.source(), status.tag(), Z.buf(), *n);

    for (int j = 0; j < Z.size2(); ++j) {
      adapter.add(subrange(column(Z,j), 0, N), subrange(column(Z,j), N, T));
    }
  }

  /* send new proposal if necessary */
  if (adapter.stop(t)) {
    adapter.adapt(t);
    BOOST_AUTO(q, adapter.get(t));
    BOOST_AUTO(iter, node.children.begin());
    for (; iter != node.children.end(); ++iter) {
      node.requests.push_front(iter->isend(0, MPI_TAG_ADAPTER_PROPOSAL, q));
    }
    ///@todo Serialize q into archive just once, then send to all. This may
    ///be how broadcast is already implemented in Boost.MPI.
  }
}
void runMaster(mpi::communicator world, int size, int grid_dimension)
{
  // Start timer and go.
  boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();

  // Send
  Matrix      A(Size(size, size));
  Matrix result(Size(size, size));

  for(int row = 0; row < A.size.rows; ++row){
    for(int col = 0; col < A.size.cols; ++col){
      A.data[row][col] = (row % 11) + (col % 11);
    }
  }
  //cout << A << endl;
  //cout << "\nProduct:\n" << A*A << endl;

  // Do sequential
  if (grid_dimension == 0)
    A.square(result);

  // Else parallel
  else{
    // Split matrix up and send to slaves
    int slave_id = 1;
    int sub_matrix_sizes = size / grid_dimension;

    for(int i = 0; i < size; i += sub_matrix_sizes){
      for(int j = 0; j < size; j += sub_matrix_sizes){
        MatrixCrossSection cs = getCrossSection( A, i, j, sub_matrix_sizes);
        world.send(slave_id, 0, cs);
        slave_id ++;
      }
    }

    // Recieve
    std::vector<Matrix> saved;
    int num_slaves = world.size() -1;

    for(int i = 1; i <= num_slaves; ++i){
      Matrix r;
      world.recv(i, 0, r);
      result.insertSubMatrix(r);
    }
  }

  // Done
  boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
  cout << sec.count() << endl;

  // Print Result
  //cout << "\nResult:\n" << result << endl;

  //assert ( result == A*A);
}
void runSlave(mpi::communicator world)
{
  // Recieve
  MatrixCrossSection cs;
  world.recv(0, 0, cs);

  Matrix subMatrix(Size(cs.row_data.size(), cs.row_data.size()));
  cs.calculateVectorProduct(subMatrix);

  world.send(0, 0, subMatrix);
}
Exemplo n.º 6
0
boost::mpi::status mpi_recv_workaround(int source, int tag, T& value,
                                       boost::mpi::communicator & comm)
{
    // receive a string
    std::string s;
    boost::mpi::status st = comm.recv(source, tag, s);
    // serialize into T
    std::istringstream iss(s);
    boost::archive::text_iarchive ia(iss);
    ia >> value;
    return st;
}
Exemplo n.º 7
0
static void master(mpi::communicator world){
  int ntasks, rank;  
  vector<int> data;
  int work;
  int result;

   for(int i = 0; i< 10; i++){
    data.push_back(i);
  }

   const int size_work = (int)data.size();

  rank = world.rank(); //int rank(ID) of processor
  ntasks = world.size();//int total number of processors

  for (rank = 1; rank < ntasks; ++rank) {

    get_next_work_item(work,size_work,data); 
   
     world.send(rank,WORKTAG,work);
  }
  
  int ret = get_next_work_item(work,size_work,data);

  while (ret == 0){
    mpi::status status = world.recv(mpi::any_source,mpi::any_tag,result);

      world.send(status.source(),WORKTAG,work);
     
      ret = get_next_work_item(work,size_work,data);
  }

  for (rank = 1; rank < ntasks; ++rank) {
    world.recv( mpi::any_source, mpi::any_tag,result);
  }

  for (rank = 1; rank < ntasks; ++rank) {
    world.send(rank,DIETAG,0);
  }
}
Exemplo n.º 8
0
static void slave(mpi::communicator world) {

  int work;
  int result;

  while (1) {

    mpi::status status = world.recv(mpi::any_source,mpi::any_tag,work);
    if (status.tag() == DIETAG) {
      return;
    }
      do_work(work,result);
      world.send(0,0,result);
  }
}
Exemplo n.º 9
0
void bi::MarginalSISHandler<B,A,S>::handleStopperLogWeights(
    boost::mpi::communicator child, boost::mpi::status status) {
  typedef typename temp_host_vector<real>::type vector_type;

  double maxlw = BI_INF;

  /* add weights */
  boost::optional<int> n = status.template count<real>();
  if (n) {
    vector_type lws(*n);
    child.recv(status.source(), status.tag(), lws.buf(), *n);
    stopper.add(lws, maxlw);
  }

  /* signal stop if necessary */
  if (stopper.stop()) {
    BOOST_AUTO(iter, node.children.begin());
    for (; iter != node.children.end(); ++iter) {
      node.requests.push_front(iter->isend(0, MPI_TAG_STOPPER_STOP));
    }
  }
}
Exemplo n.º 10
0
MCCResults mccrun_master(
  const Options& opts, const Eigen::VectorXd& vpar, unsigned int num_bins,
  const set<observables_t>& obs, const mpi::communicator& mpicomm )
{
  cout << "========== NEW MONTE CARLO CYCLE ==========" << endl;
  cout << ":: Preparing the simulation" << endl;

  HubbardModelVMC model = prepare_model( opts, vpar, mpicomm );

  vector< unique_ptr<Observable> > obscalc = prepare_obscalcs( obs, opts );
  ObservableCache obscache;

  unsigned int finished_workers = 0;
  unsigned int scheduled_bins = 0;
  unsigned int completed_bins = 0;
  unsigned int enqueued_bins  = num_bins;

  // define procedure to query the slaves for new work requests
  function<void()> mpiquery_work_requests( [&]() {
    while ( boost::optional<mpi::status> status
            = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_REQUEST_BINS ) ) {
      // receive the request and hand out new bins to the source
      mpicomm.recv( status->source(), MSGTAG_S_M_REQUEST_BINS );
      if ( enqueued_bins > 0 ) {
        mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 1 );
        scheduled_bins += 1;
        enqueued_bins  -= 1;
      } else {
        mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 0 );
        ++finished_workers;
      }
    }
  } );

  // define procedure to query the slaves for finished work
  function<void()> mpiquery_finished_work( [&]() {
    while ( boost::optional<mpi::status> status
            = mpicomm.iprobe( mpi::any_source, 2 ) ) {
      mpicomm.recv( status->source(), 2 );
      --scheduled_bins;
      ++completed_bins;
    }
  } );

  cout << ":: Equilibrating the system" << endl;

  for (
    unsigned int mcs = 0;
    mcs < opts["calc.num-mcs-equil"].as<unsigned int>();
    ++mcs ) {
    // take care of the slaves
    mpiquery_finished_work();
    mpiquery_work_requests();

    // perform a Monte Carlo step
    model.mcs();
  }

  unsigned int completed_bins_master = 0;

  cout << ":: Performing Monte Carlo cycle" << endl;
  cout << endl;
  cout << "   Progress:" << endl;

  while ( enqueued_bins > 0 ) {

    cout << '\r' << "     Bin "
         << completed_bins << "/" << num_bins;
    cout.flush();

    --enqueued_bins;
    ++scheduled_bins;

    for (
      unsigned int mcs = 0;
      mcs < opts["calc.num-binmcs"].as<unsigned int>();
      ++mcs ) {
      // take care of the slaves
      mpiquery_finished_work();
      mpiquery_work_requests();

      // perform a Monte Carlo step
      model.mcs();

      // measure observables
      for ( const unique_ptr<Observable>& o : obscalc ) {
        o->measure( model, obscache );
      }
      obscache.clear();
    }

    // tell the observables that a bin has been completed
    for ( const unique_ptr<Observable>& o : obscalc ) {
      o->completebin();
    }

    --scheduled_bins;
    ++completed_bins_master;
    ++completed_bins;
  }
  ++finished_workers;

  while ( completed_bins != num_bins ||
          static_cast<int>( finished_workers ) < mpicomm.size() ) {
    if ( boost::optional<mpi::status> status
         = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_FINISHED_BINS ) ) {
      mpicomm.recv( status->source(), MSGTAG_S_M_FINISHED_BINS );
      --scheduled_bins;
      ++completed_bins;

      cout << '\r' << "     Bin " << completed_bins << "/" << num_bins;
      cout.flush();
    }

    if ( boost::optional<mpi::status> status
         = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_REQUEST_BINS ) ) {
      // receive the request for more work
      mpicomm.recv( status->source(), MSGTAG_S_M_REQUEST_BINS );
      // tell him there is no more work
      mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 0 );
      ++finished_workers;
    }
  }
  assert( enqueued_bins == 0 );
  assert( scheduled_bins == 0 );

  cout << '\r' << "     Bin " << completed_bins << "/" << num_bins << endl;
  cout.flush();

  // check for floating point precision problems

  cout << endl;
  cout << "   Floating point precision control" << endl;

  vector<FPDevStat> W_devstats;
  assert( mpicomm.rank() == 0 );
  mpi::gather( mpicomm, model.get_W_devstat(), W_devstats, 0 );
  FPDevStat W_devstat_combined =
    accumulate(
      W_devstats.begin(), W_devstats.end(),
      FPDevStat( opts["fpctrl.W-deviation-target"].as<double>() )
    );
  cout << "     W: " << W_devstat_combined.recalcs
       << "/" << W_devstat_combined.misses
       << "/" << W_devstat_combined.mag1_misses << endl;

  vector<FPDevStat> T_devstats;
  assert( mpicomm.rank() == 0 );
  mpi::gather( mpicomm, model.get_T_devstat(), T_devstats, 0 );
  FPDevStat T_devstat_combined =
    accumulate(
      T_devstats.begin(), T_devstats.end(),
      FPDevStat( opts["fpctrl.T-deviation-target"].as<double>() )
    );
  cout << "     T: " << T_devstat_combined.recalcs
       << "/" << T_devstat_combined.misses
       << "/" << T_devstat_combined.mag1_misses << endl;

  if ( W_devstat_combined.mag1_misses > 0 ||
       T_devstat_combined.mag1_misses > 0 ) {
    cout << "   Precision targets missed by more than an order of magnitude!" << endl
         << "   WARNING: Your results might be unreliable!!!" << endl << endl;
  } else if ( W_devstat_combined.misses > 0 ||
              T_devstat_combined.misses > 0 ) {
    cout << "   Some precision targets were missed, but your results should be fine."
         << endl << endl;
  } else {
    cout << "   No missed precision targets." << endl << endl;
  }

  // collect results from the slaves and return results the scheduler
  MCCResults results;
  for ( const unique_ptr<Observable>& o : obscalc ) {
    o->collect_and_write_results( mpicomm, results );
  }
  results.success = true;
  return results;
}
Exemplo n.º 11
0
void mccrun_slave(
  const Options& opts, const Eigen::VectorXd& vpar,
  const set<observables_t>& obs, const mpi::communicator& mpicomm )
{
  // prepare the simulation

  HubbardModelVMC model = prepare_model( opts, vpar, mpicomm );
  vector< unique_ptr<Observable> > obscalc = prepare_obscalcs( obs, opts );
  ObservableCache obscache;

  // equilibrate the system

  for (
    unsigned int mcs = 0;
    mcs < opts["calc.num-mcs-equil"].as<unsigned int>();
    ++mcs )
  {
    model.mcs();
  }

  // run this slaves part of the Monte Carlo cycle

  unsigned int completed_bins_thisslave = 0;
  bool master_out_of_work = false;
  unsigned int scheduled_bins_thisslave;
  mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
  mpicomm.recv( 0, MSGTAG_M_S_DISPATCHED_BINS, scheduled_bins_thisslave );
  master_out_of_work = ( scheduled_bins_thisslave == 0 );

  while ( scheduled_bins_thisslave > 0 ) {

    unsigned int new_scheduled_bins_thisslave;
    mpi::request master_answer;
    if ( !master_out_of_work ) {
      // ask the master for more work
      mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
      master_answer = mpicomm.irecv(
        0, MSGTAG_M_S_DISPATCHED_BINS,
        new_scheduled_bins_thisslave
      );
    }

    for (
      unsigned int mcs = 0;
      mcs < opts["calc.num-binmcs"].as<unsigned int>();
      ++mcs )
    {
      // perform a Monte Carlo step
      model.mcs();

     // measure observables
      for ( const unique_ptr<Observable>& o : obscalc ) {
        o->measure( model, obscache );
      }
      obscache.clear();
    }

    // tell the observables that a bin has been completed
    for ( const unique_ptr<Observable>& o : obscalc ) {
      o->completebin();
    }

    // report completion of the work
    mpicomm.send( 0, 2 );
    ++completed_bins_thisslave;
    --scheduled_bins_thisslave;

    if ( !master_out_of_work ) {
      // wait for answer from master concerning the next bin
      master_answer.wait();
      if ( new_scheduled_bins_thisslave == 1 ) {
        ++scheduled_bins_thisslave;
      } else {
        master_out_of_work = true;
      }
    }
  }

  // send floating point precision control data to master
  mpi::gather( mpicomm, model.get_W_devstat(), 0 );
  mpi::gather( mpicomm, model.get_T_devstat(), 0 );

  // send observables to master
  for ( const unique_ptr<Observable>& o : obscalc ) {
    o->send_results_to_master( mpicomm );
  }
}
Exemplo n.º 12
0
  // Prijem a zpracovani prichozich zprav ve stavu, kdy proces nema praci
  void inactiveRecvLoop() {
    string recvMsg;
    
    while (true) {
                
      // Blokujici cekani dokud neprijde zprava 
      mpi::status status = com.probe();

      // Prisly data - hura do prace!
      if (status.tag() == DATA) {
        //cout << "inactive: recv data from " << status.source() << endl;
        com.recv(status.source(), status.tag(), data.startString);
        work();
      } 
                
      // Pozadany proces nema zadnou praci na rozdani
      // Posle se novy pozadavek o praci nahodnemu procesu
      else if (status.tag() == NO_WORK) {
        //cout << "inactive: recv no_work from " << status.source() << endl;
        com.recv(status.source(), status.tag());
        sendRequest();
      } 
      
      // Prisel pozadavek o praci, sam ale zadnou nemam
      else if (status.tag() == WORK_REQUEST) {
        //cout << "inactive: recv work_req from " << status.source() << endl;
        com.recv(status.source(), status.tag());
        //cout << "Sending NO_WORK to " << status.source() << endl;
        com.send(status.source(), NO_WORK);
      } 
      
      // Prisel token, zpracovani se lisi podle toho, zda jsem master nebo slave proces
      else if (status.tag() == TOKEN) {
        bool isWhiteToken;
        //cout << "inactive: recv token from " << status.source() << endl;
        com.recv(status.source(), status.tag(), isWhiteToken);
        if (handleToken(isWhiteToken)) {
          return;
        }
      } 

      // Prislo oznameni o nalezeni nejake podsekvence
      // Pokud je delsi nez moje nejlepsi nalezena, ulozim si ji
      else if (status.tag() == FOUND) {
        cout << "inactive: recv found from " << status.source() << endl;
        com.recv(status.source(), status.tag(), recvMsg);
        if (recvMsg.length() > myLongest.length()) {
          myLongest = recvMsg;
        }
      }
                
      // Nektery proces nasel nejlepsi vysledek, ulozim si ho a koncim cekani/praci
      else if (status.tag() == FOUND_BEST) {
        cout << "inactive: recv found best from " << status.source() << endl;
        com.recv(status.source(), status.tag(), myLongest);
        return;
      }
      
      // Info od master procesu, ze vypocet uspesne skoncil - je mozne se vypnout
      else if (status.tag() == END) {
        //cout << "inactive: recv end from " << status.source() << endl;
        return;
      }
      
      // Prisla zprava, kterou prave neumim zpracovat, vyzvednu a zahodim
      else {
        //cout << "inactive: recv from " << status.source() << endl;
        com.recv(status.source(), status.tag());
      }
    } 
  }