void generate_data(mpi::communicator local, mpi::communicator world)
{
  using std::srand;
  using std::rand;

  // The rank of the collector within the world communicator
  int master_collector = local.size();

  srand(time(0) + world.rank());

  // Send out several blocks of random data to the collectors.
  int num_data_blocks = rand() % 3 + 1;
  for (int block = 0; block < num_data_blocks; ++block) {
    // Generate some random data
    int num_samples = rand() % 1000;
    std::vector<int> data;
    for (int i = 0; i < num_samples; ++i) {
      data.push_back(rand());
    }

    // Send our data to the master collector process.
    std::cout << "Generator #" << local.rank() << " sends some data..."
              << std::endl;
    world.send(master_collector, msg_data_packet, data);
  }

  // Wait for all of the generators to complete
  (local.barrier)();

  // The first generator will send the message to the master collector
  // indicating that we're done.
  if (local.rank() == 0)
    world.send(master_collector, msg_finished);
}
void collect_data(mpi::communicator local, mpi::communicator world)
{
  // The rank of the collector within the world communicator
  int master_collector = world.size() - local.size();

  if (world.rank() == master_collector) {
    while (true) {
      // Wait for a message
      mpi::status msg = world.probe();
      if (msg.tag() == msg_data_packet) {
        // Receive the packet of data
        std::vector<int> data;
        world.recv(msg.source(), msg.tag(), data);

        // Tell each of the collectors that we'll be broadcasting some data
        for (int dest = 1; dest < local.size(); ++dest)
          local.send(dest, msg_broadcast_data, msg.source());

        // Broadcast the actual data.
        broadcast(local, data, 0);
      } else if (msg.tag() == msg_finished) {
        // Receive the message
        world.recv(msg.source(), msg.tag());

        // Tell each of the collectors that we're finished
        for (int dest = 1; dest < local.size(); ++dest)
          local.send(dest, msg_finished);

        break;
      }
    }
  } else {
    while (true) {
      // Wait for a message from the master collector
      mpi::status msg = local.probe();
      if (msg.tag() == msg_broadcast_data) {
        // Receive the broadcast message
        int originator;
        local.recv(msg.source(), msg.tag(), originator);

        // Receive the data broadcasted from the master collector
        std::vector<int> data;
        broadcast(local, data, 0);

        std::cout << "Collector #" << local.rank()
                  << " is processing data from generator #" << originator
                  << "." << std::endl;
      } else if (msg.tag() == msg_finished) {
        // Receive the message
        local.recv(msg.source(), msg.tag());

        break;
      }
    }
  }
}
Beispiel #3
0
void DocumentDecoder::manageTranslators(
	boost::mpi::communicator comm,
	NistXmlCorpus &testset
) {
	namespace mpi = boost::mpi;

	mpi::request reqs[2];
	int stopped = 0;

	NumberedOutputDocument translation;
	reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
	reqs[1] = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);

	NistXmlCorpus::const_iterator it = testset.begin();
	uint docno = 0;
	for(int i = 0; i < comm.size() && it != testset.end(); ++i, ++docno, ++it) {
		LOG(logger_, debug, "S: Sending document " << docno << " to translator " << i);
		comm.send(i, TAG_TRANSLATE, std::make_pair(docno, *(*it)->asMMAXDocument()));
	}

	for(;;) {
		std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
		if(wstat.first.tag() == TAG_STOP_COLLECTING) {
			stopped++;
			LOG(logger_, debug, "C: Received STOP_COLLECTING from translator "
				<< wstat.first.source() << ", now " << stopped << " stopped translators.");
			if(stopped == comm.size()) {
				reqs[0].cancel();
				return;
			}
			*wstat.second = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
		} else {
			LOG(logger_, debug, "C: Received translation of document " <<
				translation.first << " from translator " << wstat.first.source());
			reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
			if(it != testset.end()) {
				LOG(logger_, debug, "S: Sending document " << docno <<
					" to translator " << wstat.first.source());
				comm.send(wstat.first.source(), TAG_TRANSLATE,
					std::make_pair(docno, *(*it)->asMMAXDocument()));
				++docno; ++it;
			} else {
				LOG(logger_, debug,
					"S: Sending STOP_TRANSLATING to translator " << wstat.first.source());
				comm.send(wstat.first.source(), TAG_STOP_TRANSLATING);
			}
			testset[translation.first]->setTranslation(translation.second);
		}
	}
}
Beispiel #4
0
  // Vyrizeni pozadavku o praci
  void handleWorkRequest(int source, stack<string>& workStack) {
    string response = splitStack(workStack);

    // Nemam co poslat 
    if (response.empty()) {
      //cout << "Sending NO_WORK to " << source << endl;
      com.send(source, NO_WORK);
    } else {
      // Pokud bych posilal data procesu s nizsim indexem, prebarvim se na cerno
      if (source < com.rank()) {
		    isWhiteProcess = false;
      }
      //cout << "Sending DATA to " << source << endl;
      com.send(source, DATA, response);
    }
  }
Beispiel #5
0
 // Poslani zpravy vsem ostatnim procesum 
 void broadcastMessage(int msgType) {
   for (int i = 0; i < com.size(); i++) {
     // Sobe nic neposilam
     if (i == com.rank()) { continue; }
     
     // Pokud jsem nasel vysledek, poslu ho
     if (msgType == FOUND || msgType == FOUND_BEST) {
       //cout << "Sending (BEST)FOUND to " << i << endl;
       com.send(i, msgType, myLongest);
     }
     
     // Pri oznameni konce vypoctu neni treba posilat zadna data
     else if (msgType == END) {
       //cout << "Sending end to " << i << endl;
       com.send(i, msgType);
     }
   }
 }
Beispiel #6
0
void mpi_send_workaround(int dest, int tag, const T& value,
                         boost::mpi::communicator & comm)
{
    // serialize T into a string
    std::ostringstream oss;
    boost::archive::text_oarchive oa(oss);
    oa << value;
    // send the string
    comm.send(dest, tag, oss.str());
}
void runMaster(mpi::communicator world, int size, int grid_dimension)
{
  // Start timer and go.
  boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();

  // Send
  Matrix      A(Size(size, size));
  Matrix result(Size(size, size));

  for(int row = 0; row < A.size.rows; ++row){
    for(int col = 0; col < A.size.cols; ++col){
      A.data[row][col] = (row % 11) + (col % 11);
    }
  }
  //cout << A << endl;
  //cout << "\nProduct:\n" << A*A << endl;

  // Do sequential
  if (grid_dimension == 0)
    A.square(result);

  // Else parallel
  else{
    // Split matrix up and send to slaves
    int slave_id = 1;
    int sub_matrix_sizes = size / grid_dimension;

    for(int i = 0; i < size; i += sub_matrix_sizes){
      for(int j = 0; j < size; j += sub_matrix_sizes){
        MatrixCrossSection cs = getCrossSection( A, i, j, sub_matrix_sizes);
        world.send(slave_id, 0, cs);
        slave_id ++;
      }
    }

    // Recieve
    std::vector<Matrix> saved;
    int num_slaves = world.size() -1;

    for(int i = 1; i <= num_slaves; ++i){
      Matrix r;
      world.recv(i, 0, r);
      result.insertSubMatrix(r);
    }
  }

  // Done
  boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
  cout << sec.count() << endl;

  // Print Result
  //cout << "\nResult:\n" << result << endl;

  //assert ( result == A*A);
}
void runSlave(mpi::communicator world)
{
  // Recieve
  MatrixCrossSection cs;
  world.recv(0, 0, cs);

  Matrix subMatrix(Size(cs.row_data.size(), cs.row_data.size()));
  cs.calculateVectorProduct(subMatrix);

  world.send(0, 0, subMatrix);
}
Beispiel #9
0
 // Posle pozadavek o praci nahodne vybranemu procesu
 void sendRequest() {
   int pn = rand() % com.size();
   srand(time(NULL));
   
   // Pokud by se nahodne cislo trefilo na moje cislo, generuj nove
   if (pn == com.rank()) {
     pn = rand() % com.size();
   }
   
   //cout << "Sending WORK_REQUEST to " << pn << endl;
   com.send(pn, WORK_REQUEST);
 }
Beispiel #10
0
static void master(mpi::communicator world){
  int ntasks, rank;  
  vector<int> data;
  int work;
  int result;

   for(int i = 0; i< 10; i++){
    data.push_back(i);
  }

   const int size_work = (int)data.size();

  rank = world.rank(); //int rank(ID) of processor
  ntasks = world.size();//int total number of processors

  for (rank = 1; rank < ntasks; ++rank) {

    get_next_work_item(work,size_work,data); 
   
     world.send(rank,WORKTAG,work);
  }
  
  int ret = get_next_work_item(work,size_work,data);

  while (ret == 0){
    mpi::status status = world.recv(mpi::any_source,mpi::any_tag,result);

      world.send(status.source(),WORKTAG,work);
     
      ret = get_next_work_item(work,size_work,data);
  }

  for (rank = 1; rank < ntasks; ++rank) {
    world.recv( mpi::any_source, mpi::any_tag,result);
  }

  for (rank = 1; rank < ntasks; ++rank) {
    world.send(rank,DIETAG,0);
  }
}
Beispiel #11
0
static void slave(mpi::communicator world) {

  int work;
  int result;

  while (1) {

    mpi::status status = world.recv(mpi::any_source,mpi::any_tag,work);
    if (status.tag() == DIETAG) {
      return;
    }
      do_work(work,result);
      world.send(0,0,result);
  }
}
Beispiel #12
0
void DocumentDecoder::translate() {
	namespace mpi = boost::mpi;

	mpi::request reqs[2];
	reqs[1] = communicator_.irecv(0, TAG_STOP_TRANSLATING);
	NumberedInputDocument input;
	for(;;) {
		reqs[0] = communicator_.irecv(0, TAG_TRANSLATE, input);
		std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
		if(wstat.first.tag() == TAG_STOP_TRANSLATING) {
			LOG(logger_, debug, "T: Received STOP_TRANSLATING.");
			reqs[0].cancel();
			communicator_.send(0, TAG_STOP_COLLECTING);
			return;
		} else {
			NumberedOutputDocument output;
			LOG(logger_, debug, "T: Received document " << input.first << " for translation.");
			output.first = input.first;
			output.second = runDecoder(input);
			LOG(logger_, debug, "T: Sending translation of document " << input.first << " to collector.");
			communicator_.send(0, TAG_COLLECT, output);
		}
	}
}
Beispiel #13
0
MCCResults mccrun_master(
  const Options& opts, const Eigen::VectorXd& vpar, unsigned int num_bins,
  const set<observables_t>& obs, const mpi::communicator& mpicomm )
{
  cout << "========== NEW MONTE CARLO CYCLE ==========" << endl;
  cout << ":: Preparing the simulation" << endl;

  HubbardModelVMC model = prepare_model( opts, vpar, mpicomm );

  vector< unique_ptr<Observable> > obscalc = prepare_obscalcs( obs, opts );
  ObservableCache obscache;

  unsigned int finished_workers = 0;
  unsigned int scheduled_bins = 0;
  unsigned int completed_bins = 0;
  unsigned int enqueued_bins  = num_bins;

  // define procedure to query the slaves for new work requests
  function<void()> mpiquery_work_requests( [&]() {
    while ( boost::optional<mpi::status> status
            = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_REQUEST_BINS ) ) {
      // receive the request and hand out new bins to the source
      mpicomm.recv( status->source(), MSGTAG_S_M_REQUEST_BINS );
      if ( enqueued_bins > 0 ) {
        mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 1 );
        scheduled_bins += 1;
        enqueued_bins  -= 1;
      } else {
        mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 0 );
        ++finished_workers;
      }
    }
  } );

  // define procedure to query the slaves for finished work
  function<void()> mpiquery_finished_work( [&]() {
    while ( boost::optional<mpi::status> status
            = mpicomm.iprobe( mpi::any_source, 2 ) ) {
      mpicomm.recv( status->source(), 2 );
      --scheduled_bins;
      ++completed_bins;
    }
  } );

  cout << ":: Equilibrating the system" << endl;

  for (
    unsigned int mcs = 0;
    mcs < opts["calc.num-mcs-equil"].as<unsigned int>();
    ++mcs ) {
    // take care of the slaves
    mpiquery_finished_work();
    mpiquery_work_requests();

    // perform a Monte Carlo step
    model.mcs();
  }

  unsigned int completed_bins_master = 0;

  cout << ":: Performing Monte Carlo cycle" << endl;
  cout << endl;
  cout << "   Progress:" << endl;

  while ( enqueued_bins > 0 ) {

    cout << '\r' << "     Bin "
         << completed_bins << "/" << num_bins;
    cout.flush();

    --enqueued_bins;
    ++scheduled_bins;

    for (
      unsigned int mcs = 0;
      mcs < opts["calc.num-binmcs"].as<unsigned int>();
      ++mcs ) {
      // take care of the slaves
      mpiquery_finished_work();
      mpiquery_work_requests();

      // perform a Monte Carlo step
      model.mcs();

      // measure observables
      for ( const unique_ptr<Observable>& o : obscalc ) {
        o->measure( model, obscache );
      }
      obscache.clear();
    }

    // tell the observables that a bin has been completed
    for ( const unique_ptr<Observable>& o : obscalc ) {
      o->completebin();
    }

    --scheduled_bins;
    ++completed_bins_master;
    ++completed_bins;
  }
  ++finished_workers;

  while ( completed_bins != num_bins ||
          static_cast<int>( finished_workers ) < mpicomm.size() ) {
    if ( boost::optional<mpi::status> status
         = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_FINISHED_BINS ) ) {
      mpicomm.recv( status->source(), MSGTAG_S_M_FINISHED_BINS );
      --scheduled_bins;
      ++completed_bins;

      cout << '\r' << "     Bin " << completed_bins << "/" << num_bins;
      cout.flush();
    }

    if ( boost::optional<mpi::status> status
         = mpicomm.iprobe( mpi::any_source, MSGTAG_S_M_REQUEST_BINS ) ) {
      // receive the request for more work
      mpicomm.recv( status->source(), MSGTAG_S_M_REQUEST_BINS );
      // tell him there is no more work
      mpicomm.send( status->source(), MSGTAG_M_S_DISPATCHED_BINS, 0 );
      ++finished_workers;
    }
  }
  assert( enqueued_bins == 0 );
  assert( scheduled_bins == 0 );

  cout << '\r' << "     Bin " << completed_bins << "/" << num_bins << endl;
  cout.flush();

  // check for floating point precision problems

  cout << endl;
  cout << "   Floating point precision control" << endl;

  vector<FPDevStat> W_devstats;
  assert( mpicomm.rank() == 0 );
  mpi::gather( mpicomm, model.get_W_devstat(), W_devstats, 0 );
  FPDevStat W_devstat_combined =
    accumulate(
      W_devstats.begin(), W_devstats.end(),
      FPDevStat( opts["fpctrl.W-deviation-target"].as<double>() )
    );
  cout << "     W: " << W_devstat_combined.recalcs
       << "/" << W_devstat_combined.misses
       << "/" << W_devstat_combined.mag1_misses << endl;

  vector<FPDevStat> T_devstats;
  assert( mpicomm.rank() == 0 );
  mpi::gather( mpicomm, model.get_T_devstat(), T_devstats, 0 );
  FPDevStat T_devstat_combined =
    accumulate(
      T_devstats.begin(), T_devstats.end(),
      FPDevStat( opts["fpctrl.T-deviation-target"].as<double>() )
    );
  cout << "     T: " << T_devstat_combined.recalcs
       << "/" << T_devstat_combined.misses
       << "/" << T_devstat_combined.mag1_misses << endl;

  if ( W_devstat_combined.mag1_misses > 0 ||
       T_devstat_combined.mag1_misses > 0 ) {
    cout << "   Precision targets missed by more than an order of magnitude!" << endl
         << "   WARNING: Your results might be unreliable!!!" << endl << endl;
  } else if ( W_devstat_combined.misses > 0 ||
              T_devstat_combined.misses > 0 ) {
    cout << "   Some precision targets were missed, but your results should be fine."
         << endl << endl;
  } else {
    cout << "   No missed precision targets." << endl << endl;
  }

  // collect results from the slaves and return results the scheduler
  MCCResults results;
  for ( const unique_ptr<Observable>& o : obscalc ) {
    o->collect_and_write_results( mpicomm, results );
  }
  results.success = true;
  return results;
}
Beispiel #14
0
void mccrun_slave(
  const Options& opts, const Eigen::VectorXd& vpar,
  const set<observables_t>& obs, const mpi::communicator& mpicomm )
{
  // prepare the simulation

  HubbardModelVMC model = prepare_model( opts, vpar, mpicomm );
  vector< unique_ptr<Observable> > obscalc = prepare_obscalcs( obs, opts );
  ObservableCache obscache;

  // equilibrate the system

  for (
    unsigned int mcs = 0;
    mcs < opts["calc.num-mcs-equil"].as<unsigned int>();
    ++mcs )
  {
    model.mcs();
  }

  // run this slaves part of the Monte Carlo cycle

  unsigned int completed_bins_thisslave = 0;
  bool master_out_of_work = false;
  unsigned int scheduled_bins_thisslave;
  mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
  mpicomm.recv( 0, MSGTAG_M_S_DISPATCHED_BINS, scheduled_bins_thisslave );
  master_out_of_work = ( scheduled_bins_thisslave == 0 );

  while ( scheduled_bins_thisslave > 0 ) {

    unsigned int new_scheduled_bins_thisslave;
    mpi::request master_answer;
    if ( !master_out_of_work ) {
      // ask the master for more work
      mpicomm.send( 0, MSGTAG_S_M_REQUEST_BINS );
      master_answer = mpicomm.irecv(
        0, MSGTAG_M_S_DISPATCHED_BINS,
        new_scheduled_bins_thisslave
      );
    }

    for (
      unsigned int mcs = 0;
      mcs < opts["calc.num-binmcs"].as<unsigned int>();
      ++mcs )
    {
      // perform a Monte Carlo step
      model.mcs();

     // measure observables
      for ( const unique_ptr<Observable>& o : obscalc ) {
        o->measure( model, obscache );
      }
      obscache.clear();
    }

    // tell the observables that a bin has been completed
    for ( const unique_ptr<Observable>& o : obscalc ) {
      o->completebin();
    }

    // report completion of the work
    mpicomm.send( 0, 2 );
    ++completed_bins_thisslave;
    --scheduled_bins_thisslave;

    if ( !master_out_of_work ) {
      // wait for answer from master concerning the next bin
      master_answer.wait();
      if ( new_scheduled_bins_thisslave == 1 ) {
        ++scheduled_bins_thisslave;
      } else {
        master_out_of_work = true;
      }
    }
  }

  // send floating point precision control data to master
  mpi::gather( mpicomm, model.get_W_devstat(), 0 );
  mpi::gather( mpicomm, model.get_T_devstat(), 0 );

  // send observables to master
  for ( const unique_ptr<Observable>& o : obscalc ) {
    o->send_results_to_master( mpicomm );
  }
}
Beispiel #15
0
  // Prijem a zpracovani prichozich zprav ve stavu, kdy proces nema praci
  void inactiveRecvLoop() {
    string recvMsg;
    
    while (true) {
                
      // Blokujici cekani dokud neprijde zprava 
      mpi::status status = com.probe();

      // Prisly data - hura do prace!
      if (status.tag() == DATA) {
        //cout << "inactive: recv data from " << status.source() << endl;
        com.recv(status.source(), status.tag(), data.startString);
        work();
      } 
                
      // Pozadany proces nema zadnou praci na rozdani
      // Posle se novy pozadavek o praci nahodnemu procesu
      else if (status.tag() == NO_WORK) {
        //cout << "inactive: recv no_work from " << status.source() << endl;
        com.recv(status.source(), status.tag());
        sendRequest();
      } 
      
      // Prisel pozadavek o praci, sam ale zadnou nemam
      else if (status.tag() == WORK_REQUEST) {
        //cout << "inactive: recv work_req from " << status.source() << endl;
        com.recv(status.source(), status.tag());
        //cout << "Sending NO_WORK to " << status.source() << endl;
        com.send(status.source(), NO_WORK);
      } 
      
      // Prisel token, zpracovani se lisi podle toho, zda jsem master nebo slave proces
      else if (status.tag() == TOKEN) {
        bool isWhiteToken;
        //cout << "inactive: recv token from " << status.source() << endl;
        com.recv(status.source(), status.tag(), isWhiteToken);
        if (handleToken(isWhiteToken)) {
          return;
        }
      } 

      // Prislo oznameni o nalezeni nejake podsekvence
      // Pokud je delsi nez moje nejlepsi nalezena, ulozim si ji
      else if (status.tag() == FOUND) {
        cout << "inactive: recv found from " << status.source() << endl;
        com.recv(status.source(), status.tag(), recvMsg);
        if (recvMsg.length() > myLongest.length()) {
          myLongest = recvMsg;
        }
      }
                
      // Nektery proces nasel nejlepsi vysledek, ulozim si ho a koncim cekani/praci
      else if (status.tag() == FOUND_BEST) {
        cout << "inactive: recv found best from " << status.source() << endl;
        com.recv(status.source(), status.tag(), myLongest);
        return;
      }
      
      // Info od master procesu, ze vypocet uspesne skoncil - je mozne se vypnout
      else if (status.tag() == END) {
        //cout << "inactive: recv end from " << status.source() << endl;
        return;
      }
      
      // Prisla zprava, kterou prave neumim zpracovat, vyzvednu a zahodim
      else {
        //cout << "inactive: recv from " << status.source() << endl;
        com.recv(status.source(), status.tag());
      }
    } 
  }