Пример #1
0
void
all_gatherv_test(const mpi::communicator& comm, Generator generator,
                 std::string kind)
{
  typedef typename Generator::result_type value_type;
  using boost::mpi::all_gatherv;

  std::vector<value_type> myvalues, expected, values;
  std::vector<int>        sizes;
  for(int r = 0; r < comm.size(); ++r) {
    value_type value = generator(r);
    sizes.push_back(r+1);
    for (int k=0; k < r+1; ++k) {
      expected.push_back(value);
      if(comm.rank() == r) {
        myvalues.push_back(value);
      }
    }
  }
  if (comm.rank() == 0) {
    std::cout << "Gathering " << kind << "...";
    std::cout.flush();
  }
  
  mpi::all_gatherv(comm, myvalues, values, sizes);
  
  BOOST_CHECK(values == expected);
  
  if (comm.rank() == 0 && values == expected)
    std::cout << "OK." << std::endl;
  
  (comm.barrier)();
}
std::string run_model(Properties& props, boost::mpi::communicator& world) {

	try {
		repast::relogo::SimulationRunner runner(&world);

		if (world.rank() == 0) {
			std::string time;
			repast::timestamp(time);
			std::cout << "Start Time: " << time << std::endl;
		}

		repast::Timer timer;
		timer.start();
		runner.run<ZombieObserver, repast::relogo::Patch>(props);

		if (world.rank() == 0) {
			std::string time;
			repast::timestamp(time);
			std::cout << "End Time: " << time << "\nElapsed Time: "
					<< timer.stop() << std::endl;
		}

	} catch (std::exception& exp) {
		// catch any exception (e.g. if data files couldn't be opened) and
		// print out the errors.
		std::cerr << "ERROR: " << exp.what() << std::endl;
	}
	return props.getProperty(OUTPUT_KEY);
}
Пример #3
0
void TwoParticleGF::compute(bool clear, const boost::mpi::communicator & comm)
{
    if (Status < Prepared) throw (exStatusMismatch());
    if (Status >= Computed) return;
    if (!Vanishing) {
        // Create a "skeleton" class with pointers to part that can call a compute method
        pMPI::mpi_skel<ComputeAndClearWrap> skel;
        bool fill_container = m_data_.NBosonic() > 0 && m_data_.NFermionic() > 0; 
        skel.parts.reserve(parts.size());
        for (size_t i=0; i<parts.size(); i++) { 
            skel.parts.push_back(ComputeAndClearWrap(&m_data_, parts[i], clear, fill_container, 1));
            };
        std::map<pMPI::JobId, pMPI::WorkerId> job_map = skel.run(comm, true); // actual running - very costly
        int rank = comm.rank();
        int comm_size = comm.size(); 

        // Start distributing data
        //DEBUG(comm.rank() << getIndex(0) << getIndex(1) << getIndex(2) << getIndex(3) << " Start distributing data");
        comm.barrier();
         

        if (!clear) { 
            for (size_t p = 0; p<parts.size(); p++) {
                boost::mpi::broadcast(comm, parts[p]->NonResonantTerms, job_map[p]);
                boost::mpi::broadcast(comm, parts[p]->ResonantTerms, job_map[p]);
                if (rank == job_map[p]) { 
                    parts[p]->Status = TwoParticleGFPart::Computed;
                     };
                };
            comm.barrier();
        }
    };
    Status = Computed;
}
void runMaster(mpi::communicator world, int size, int grid_dimension)
{
  // Start timer and go.
  boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();

  // Send
  Matrix      A(Size(size, size));
  Matrix result(Size(size, size));

  for(int row = 0; row < A.size.rows; ++row){
    for(int col = 0; col < A.size.cols; ++col){
      A.data[row][col] = (row % 11) + (col % 11);
    }
  }
  //cout << A << endl;
  //cout << "\nProduct:\n" << A*A << endl;

  // Do sequential
  if (grid_dimension == 0)
    A.square(result);

  // Else parallel
  else{
    // Split matrix up and send to slaves
    int slave_id = 1;
    int sub_matrix_sizes = size / grid_dimension;

    for(int i = 0; i < size; i += sub_matrix_sizes){
      for(int j = 0; j < size; j += sub_matrix_sizes){
        MatrixCrossSection cs = getCrossSection( A, i, j, sub_matrix_sizes);
        world.send(slave_id, 0, cs);
        slave_id ++;
      }
    }

    // Recieve
    std::vector<Matrix> saved;
    int num_slaves = world.size() -1;

    for(int i = 1; i <= num_slaves; ++i){
      Matrix r;
      world.recv(i, 0, r);
      result.insertSubMatrix(r);
    }
  }

  // Done
  boost::chrono::duration<double> sec = boost::chrono::system_clock::now() - start;
  cout << sec.count() << endl;

  // Print Result
  //cout << "\nResult:\n" << result << endl;

  //assert ( result == A*A);
}
void runSlave(mpi::communicator world)
{
  // Recieve
  MatrixCrossSection cs;
  world.recv(0, 0, cs);

  Matrix subMatrix(Size(cs.row_data.size(), cs.row_data.size()));
  cs.calculateVectorProduct(subMatrix);

  world.send(0, 0, subMatrix);
}
Пример #6
0
 // Posle pozadavek o praci nahodne vybranemu procesu
 void sendRequest() {
   int pn = rand() % com.size();
   srand(time(NULL));
   
   // Pokud by se nahodne cislo trefilo na moje cislo, generuj nove
   if (pn == com.rank()) {
     pn = rand() % com.size();
   }
   
   //cout << "Sending WORK_REQUEST to " << pn << endl;
   com.send(pn, WORK_REQUEST);
 }
Пример #7
0
int main()
{
  int numClients = 2;
  
  // client settings
  float minClientActionDelay = 0.2f;	// min wait time for a client before starting new actions
  float maxClientActionDelay = 0.4f;	// max wait time for a client before it has to complete a new action
  float clientQueryWeight = 30.0f;		// possibility of a query to happen
  float clientReplyWeight = 30.0f;		// possibility of a reply to happen
  float clientPostWeight = 40.0f;		// possibility of a new post (update) to happen
  
  // set the global MPI variabes
  gRank = gWorld.rank();
  gNumFE = numClients;
  gNumRM = gWorld.size() - numClients;

  // early out if there are not enough nodes for at least one RM
  if(numClients + 1 > gWorld.size() && gWorld.rank() == 0) {
	  std::cout << "ERROR: there are not enough nodes for at least 1 RM, please increase the number of nodes" << std::endl;
	  exit(-1);
  }

  if (gWorld.rank() == 0) {
      std::cout << " num RM: " << gNumRM << " num FE: " << gNumFE << std::endl;
  }

    Log::singleton().open("Bulletin_" + std::to_string(gRank) + ".log"); // set log file
    Log::singleton().setVerbosity(LV_Normal);

  //the last 'numClients' ranks are front ends
  if (gWorld.rank() >= gWorld.size() - numClients) {
	  std::cout << "P" << gWorld.rank() << ": assigned as a client" << std::endl;

	  // create client instance
	  Client client;

	  // set client variables as defined above
	  client.setMinActionDelay(minClientActionDelay);
	  client.setMaxActionDelay(maxClientActionDelay);
	  client.setQueryWeight(clientQueryWeight);
	  client.setReplyWeight(clientReplyWeight);
	  client.setPostWeight(clientPostWeight);

	  // run the client
	  // the client will now call the Frontend classes specific functions
	  // whenever it wants to complete an action.
	  client.run();
  }
  else
  {
	  std::cout << "P" << gWorld.rank() << ": assigned as a replicator" << std::endl;
      ReplicaManager RM;

      RM.run();
  }

  return 0;
}
Пример #8
0
void
printit(const boost::mpi::communicator& comm,
        const std::vector<T>& things,
        const std::string& caption)
{
    if (!caption.empty() && comm.rank() == 0) {
        std::cout << caption << std::endl;
        std::cout.flush();
    }
    for (int p = 0; p < comm.size(); ++p) {
        if (comm.rank() == p) {
            std::cout << p << ": ";
            std::copy(things.begin(), things.end(),
                      std::ostream_iterator<T>(std::cout, ","));
            std::cout << std::endl;
            std::cout.flush();
        }
        comm.barrier();
    }

    comm.barrier();
    size_t global_size;
    boost::mpi::reduce(comm, things.size(), global_size, std::plus<size_t>(), 0);
    if (comm.rank() == 0) {
        if (!caption.empty()) {
            std::cout << caption;
        }
        std::cout << "Number of things: " << global_size << std::endl;
    }
    comm.barrier();
    std::cout << comm.rank() << ": leaving printit()" << std::endl;
}
Пример #9
0
static void slave(mpi::communicator world) {

  int work;
  int result;

  while (1) {

    mpi::status status = world.recv(mpi::any_source,mpi::any_tag,work);
    if (status.tag() == DIETAG) {
      return;
    }
      do_work(work,result);
      world.send(0,0,result);
  }
}
Пример #10
0
  // Vyrizeni pozadavku o praci
  void handleWorkRequest(int source, stack<string>& workStack) {
    string response = splitStack(workStack);

    // Nemam co poslat 
    if (response.empty()) {
      //cout << "Sending NO_WORK to " << source << endl;
      com.send(source, NO_WORK);
    } else {
      // Pokud bych posilal data procesu s nizsim indexem, prebarvim se na cerno
      if (source < com.rank()) {
		    isWhiteProcess = false;
      }
      //cout << "Sending DATA to " << source << endl;
      com.send(source, DATA, response);
    }
  }
Пример #11
0
void bi::MarginalSISHandler<B,A,S>::handleAdapterSamples(
    boost::mpi::communicator child, boost::mpi::status status) {
  typedef typename temp_host_matrix<real>::type matrix_type;

  static const int N = B::NP;

  /* add samples */
  boost::optional<int> n = status.template count<real>();
  if (n) {
    matrix_type Z(N + T, *n / (N + T));
    child.recv(status.source(), status.tag(), Z.buf(), *n);

    for (int j = 0; j < Z.size2(); ++j) {
      adapter.add(subrange(column(Z,j), 0, N), subrange(column(Z,j), N, T));
    }
  }

  /* send new proposal if necessary */
  if (adapter.stop(t)) {
    adapter.adapt(t);
    BOOST_AUTO(q, adapter.get(t));
    BOOST_AUTO(iter, node.children.begin());
    for (; iter != node.children.end(); ++iter) {
      node.requests.push_front(iter->isend(0, MPI_TAG_ADAPTER_PROPOSAL, q));
    }
    ///@todo Serialize q into archive just once, then send to all. This may
    ///be how broadcast is already implemented in Boost.MPI.
  }
}
Пример #12
0
void print_section (const std::string& str)
{
    if (!comm.rank()) {
        std::cout << std::string(str.size(),'=') << std::endl;
        std::cout << str << std::endl;
        std::cout << std::string(str.size(),'=') << std::endl;
    };
}
Пример #13
0
void
broadcast_test(const mpi::communicator& comm, const T& bc_value,
               std::string const& kind)
{
  for (int root = 0; root < comm.size(); ++root) {
    broadcast_test(comm, bc_value, kind, root);
  }
}
Пример #14
0
    void init(const mpi::communicator& comm)
    {
        PetscErrorCode ierr;
        PetscInt lo, hi;
        ierr = VecCreate(comm,&x); // CHKERRQ(ierr);
        ierr = VecSetSizes(x, PETSC_DECIDE, 5*comm.size()); // CHKERRQ(ierr);
        ierr = VecSetFromOptions(x); // CHKERRQ(ierr);

        ierr = VecGetOwnershipRange(x, &lo, &hi);
        for (PetscInt i = lo; i <= hi; ++i) {
            std::complex<double> v(i, 5*comm.size() - i - 1);
            ierr = VecSetValue(x, i, v, INSERT_VALUES);
        }

        ierr = VecAssemblyBegin(x); // CHKERRQ(ierr);
        ierr = VecAssemblyEnd(x); // CHKERRQ(ierr);
    }
Пример #15
0
void DocumentDecoder::manageTranslators(
	boost::mpi::communicator comm,
	NistXmlCorpus &testset
) {
	namespace mpi = boost::mpi;

	mpi::request reqs[2];
	int stopped = 0;

	NumberedOutputDocument translation;
	reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
	reqs[1] = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);

	NistXmlCorpus::const_iterator it = testset.begin();
	uint docno = 0;
	for(int i = 0; i < comm.size() && it != testset.end(); ++i, ++docno, ++it) {
		LOG(logger_, debug, "S: Sending document " << docno << " to translator " << i);
		comm.send(i, TAG_TRANSLATE, std::make_pair(docno, *(*it)->asMMAXDocument()));
	}

	for(;;) {
		std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
		if(wstat.first.tag() == TAG_STOP_COLLECTING) {
			stopped++;
			LOG(logger_, debug, "C: Received STOP_COLLECTING from translator "
				<< wstat.first.source() << ", now " << stopped << " stopped translators.");
			if(stopped == comm.size()) {
				reqs[0].cancel();
				return;
			}
			*wstat.second = comm.irecv(mpi::any_source, TAG_STOP_COLLECTING);
		} else {
			LOG(logger_, debug, "C: Received translation of document " <<
				translation.first << " from translator " << wstat.first.source());
			reqs[0] = comm.irecv(mpi::any_source, TAG_COLLECT, translation);
			if(it != testset.end()) {
				LOG(logger_, debug, "S: Sending document " << docno <<
					" to translator " << wstat.first.source());
				comm.send(wstat.first.source(), TAG_TRANSLATE,
					std::make_pair(docno, *(*it)->asMMAXDocument()));
				++docno; ++it;
			} else {
				LOG(logger_, debug,
					"S: Sending STOP_TRANSLATING to translator " << wstat.first.source());
				comm.send(wstat.first.source(), TAG_STOP_TRANSLATING);
			}
			testset[translation.first]->setTranslation(translation.second);
		}
	}
}
Пример #16
0
 // Poslani zpravy vsem ostatnim procesum 
 void broadcastMessage(int msgType) {
   for (int i = 0; i < com.size(); i++) {
     // Sobe nic neposilam
     if (i == com.rank()) { continue; }
     
     // Pokud jsem nasel vysledek, poslu ho
     if (msgType == FOUND || msgType == FOUND_BEST) {
       //cout << "Sending (BEST)FOUND to " << i << endl;
       com.send(i, msgType, myLongest);
     }
     
     // Pri oznameni konce vypoctu neni treba posilat zadna data
     else if (msgType == END) {
       //cout << "Sending end to " << i << endl;
       com.send(i, msgType);
     }
   }
 }
Пример #17
0
 void gather_resample_weight() const
 {
     weight_.resize(this->size());
     this->read_weight(weight_.data());
     if (world_.rank() == 0)
         ::boost::mpi::gather(world_, weight_, weight_all_, 0);
     else
         ::boost::mpi::gather(world_, weight_, 0);
 }
void reduce_and_check(const boost::mpi::communicator &comm, bool local_value) {
  if(comm.rank() == 0) {
    bool total;
    boost::mpi::reduce(comm, local_value, total, std::logical_and<bool>(), 0);
    BOOST_CHECK(total);
  } else {
    boost::mpi::reduce(comm, local_value, std::logical_and<bool>(), 0);
  }
}
Пример #19
0
void 
random_scattered_vector(const boost::mpi::communicator& comm, 
                        const int& global_size,
                        std::vector<I>& local_values)
{
  int me(comm.rank());
  int nproc(comm.size());

  std::vector< std::vector<I> > toscatter(nproc);

  if (me == 0) {
    for (int i = 0; i < global_size; ++i) {
      boost::random::uniform_int_distribution<> dist(0, nproc-1);
      int p(dist(gen));
      toscatter[p].push_back(i);
    }
  }
  scatter(comm, toscatter, local_values, 0);
}
Пример #20
0
void mpi_send_workaround(int dest, int tag, const T& value,
                         boost::mpi::communicator & comm)
{
    // serialize T into a string
    std::ostringstream oss;
    boost::archive::text_oarchive oa(oss);
    oa << value;
    // send the string
    comm.send(dest, tag, oss.str());
}
Пример #21
0
void
report_features(mpi::communicator const& comm) {
  if (comm.rank() == 0) {
    std::cout << "Assuming working MPI_Improbe:" <<
#if defined(BOOST_MPI_USE_IMPROBE)
      "yes" << '\n';
#else
      "no"  << '\n';
#endif
  }
}
Пример #22
0
    /// Reduce the results of the measures, and reports some statistics
    void collect_results(boost::mpi::communicator const & c) {

     uint64_t nmeasures_tot;
     MCSignType sum_sign_tot;
     boost::mpi::reduce(c, nmeasures, nmeasures_tot, std::plus<uint64_t>(), 0);
     boost::mpi::reduce(c, sum_sign, sum_sign_tot, std::plus<MCSignType>(), 0);

     report(3) << "[Node "<<c.rank()<<"] Acceptance rate for all moves:\n" << AllMoves.get_statistics(c);
     report(3) << "[Node "<<c.rank()<<"] Simulation lasted: " << double(Timer) << " seconds" << std::endl;
     report(3) << "[Node "<<c.rank()<<"] Number of measures: " << nmeasures  << std::endl;
     report(3) << "[Node "<<c.rank()<<"] Average sign: " << sum_sign / double(nmeasures) << std::endl << std::endl << std::flush;
  
     if (c.rank()==0) {
      sign_av = sum_sign_tot / double(nmeasures_tot);
      report(2) << "Total number of measures: " << nmeasures_tot << std::endl;
      report(2) << "Average sign: " << sign_av << std::endl << std::endl << std::flush;
     }
     boost::mpi::broadcast(c, sign_av, 0);
     AllMeasures.collect_results(c);

    }
Пример #23
0
boost::mpi::status mpi_recv_workaround(int source, int tag, T& value,
                                       boost::mpi::communicator & comm)
{
    // receive a string
    std::string s;
    boost::mpi::status st = comm.recv(source, tag, s);
    // serialize into T
    std::istringstream iss(s);
    boost::archive::text_iarchive ia(iss);
    ia >> value;
    return st;
}
Пример #24
0
void generate_data(mpi::communicator local, mpi::communicator world)
{
  using std::srand;
  using std::rand;

  // The rank of the collector within the world communicator
  int master_collector = local.size();

  srand(time(0) + world.rank());

  // Send out several blocks of random data to the collectors.
  int num_data_blocks = rand() % 3 + 1;
  for (int block = 0; block < num_data_blocks; ++block) {
    // Generate some random data
    int num_samples = rand() % 1000;
    std::vector<int> data;
    for (int i = 0; i < num_samples; ++i) {
      data.push_back(rand());
    }

    // Send our data to the master collector process.
    std::cout << "Generator #" << local.rank() << " sends some data..."
              << std::endl;
    world.send(master_collector, msg_data_packet, data);
  }

  // Wait for all of the generators to complete
  (local.barrier)();

  // The first generator will send the message to the master collector
  // indicating that we're done.
  if (local.rank() == 0)
    world.send(master_collector, msg_finished);
}
Пример #25
0
  // Prijmuti a zpracovani zpravy behem aktivni prace procesu
  // Vraci true, pokud nektery proces poslal info o nalezeni nejlepsiho mozneho 
  // vysledku = nema smysl dal pracovat
  bool activeRecv(stack<string>& workStack) {
    string recvMsg;
    
    // Prisla nejaka zprava?
    boost::optional<mpi::status> inc = com.iprobe();

    // Neprisla, zpatky do prace!
    if (!inc) {
      return false;
    }
       
    mpi::status status = *inc;

    // Prisel token, zatim ulozim a dal zpracuji az po dokonceni vlastni prace
    if (status.tag() == TOKEN) {
      bool isWhiteToken;
      //cout << "active: recv token from " << status.source() << endl;
      com.recv(status.source(), status.tag(), isWhiteToken);
      if (isWhiteToken) {
        token = WHITE_TOKEN;
      } else {
        token = BLACK_TOKEN;
      }
    } 
    
    // Prisel pozadavek o praci
    else if (status.tag() == WORK_REQUEST) {
      //cout << "active: recv work_req from " << status.source() << endl;
      com.recv(status.source(), status.tag());
      handleWorkRequest(status.source(), workStack);
    }
    
    // Prislo oznameni o nalezeni nejake podsekvence
    // Pokud je delsi nez moje nejlepsi nalezena, ulozim si ji
    else if (status.tag() == FOUND) {
      cout << "active: recv found from " << status.source() << endl;
      com.recv(status.source(), status.tag(), recvMsg);
      if (recvMsg.length() > myLongest.length()) {
        myLongest = recvMsg;
      }
    }
        
    // Nektery proces uz nasel nejlepsi mozny vysledek
    else if (status.tag() == FOUND_BEST) {
      cout << "active: recv found best from " << status.source() << endl;
      com.recv(status.source(), status.tag(), myLongest);
      return true;
    } 
    
    // Prisla zprava, kterou prave neumim zpracovat, vyzvednu a zahodim
    else {
      //cout << "active: recv from " << status.source() << endl;
      com.recv(status.source(), status.tag());
    }
    
    return false;
  }
Пример #26
0
void DocumentDecoder::translate() {
	namespace mpi = boost::mpi;

	mpi::request reqs[2];
	reqs[1] = communicator_.irecv(0, TAG_STOP_TRANSLATING);
	NumberedInputDocument input;
	for(;;) {
		reqs[0] = communicator_.irecv(0, TAG_TRANSLATE, input);
		std::pair<mpi::status, mpi::request *> wstat = mpi::wait_any(reqs, reqs + 2);
		if(wstat.first.tag() == TAG_STOP_TRANSLATING) {
			LOG(logger_, debug, "T: Received STOP_TRANSLATING.");
			reqs[0].cancel();
			communicator_.send(0, TAG_STOP_COLLECTING);
			return;
		} else {
			NumberedOutputDocument output;
			LOG(logger_, debug, "T: Received document " << input.first << " for translation.");
			output.first = input.first;
			output.second = runDecoder(input);
			LOG(logger_, debug, "T: Sending translation of document " << input.first << " to collector.");
			communicator_.send(0, TAG_COLLECT, output);
		}
	}
}
Пример #27
0
  // Hlavni metoda hledani podsekvence.
  // Generuje nove stringy z pocatecniho stringu a urcuje, jestli se jedna o podsekvence
  void findAndGenerate() {
    stack<string> inputStack;
    unsigned int counter = 0;
    
    // Vlozeni pocatecniho stringu na zasobnik
    inputStack.push(data.startString);
    
    cout << com.rank() << ": starting on: " << data.startString << endl;
    while (!inputStack.empty()) {
      
      // Jednou za cas provede kontrolu prichozich zprav
      counter = (counter + 1) % 100;
      if (counter == 0) {
        // Pokud prijme zpravu o nalezeni nejlepsiho mozneho vysledku, ukonci praci
        if (activeRecv(inputStack)) {
          return;
        }
      }

      string current = inputStack.top();
      inputStack.pop();
      
      // Behem hledani byla nalezena maximalni mozna podsekvence.
      if (findSubsequence(current, inputStack)) {
        cout << com.rank() << ": found best: " << myLongest << endl;
        broadcastMessage(FOUND_BEST);
        return;
      }
      
      // Generuje vetsi stringy, dokud se nedosahne delky nejkratsiho ze vstupnich stringu
      if (current.length() < data.shortestLen) {
        generateSubsequences(current, inputStack);
      }
    }
    cout << com.rank() << ": found : " << myLongest << endl;
  }
Пример #28
0
void
broadcast_test(const mpi::communicator& comm, const T& bc_value,
               std::string const& kind, int root) {
  using boost::mpi::broadcast;
  
  T value;
  if (comm.rank() == root) {
    value = bc_value;
    std::cout << "Broadcasting " << kind << " from root " << root << "...";
    std::cout.flush();
  }
  
  
  broadcast(comm, value, root);
  BOOST_CHECK(value == bc_value);
  if (comm.rank() == root) {
    if (value == bc_value) {
      std::cout << "OK." << std::endl;
    } else {
      std::cout << "FAIL." << std::endl;
    }
  }
  comm.barrier();
}
Пример #29
0
void
all_gather_test(const mpi::communicator& comm, Generator generator,
                std::string kind)
{
  typedef typename Generator::result_type value_type;
  value_type value = generator(comm.rank());

  std::vector<value_type> values;
  if (comm.rank() == 0) {
    std::cout << "Gathering " << kind << "...";
    std::cout.flush();
  }

  mpi::all_gather(comm, value, values);

  std::vector<value_type> expected_values;
  for (int p = 0; p < comm.size(); ++p)
    expected_values.push_back(generator(p));
  BOOST_CHECK(values == expected_values);
  if (comm.rank() == 0 && values == expected_values)
    std::cout << "OK." << std::endl;

  (comm.barrier)();
}
Пример #30
0
void
test_version(mpi::communicator const& comm) {
#if defined(MPI_VERSION)
  int mpi_version    = MPI_VERSION;
  int mpi_subversion = MPI_SUBVERSION;
#else
  int mpi_version = 0;
  int mpi_subversion = 0;
#endif
  
  std::pair<int,int> version = mpi::environment::version();
  if (comm.rank() == 0) {
    std::cout << "MPI Version: " << version.first << ',' << version.second << '\n';
  }
  BOOST_CHECK(version.first == mpi_version);
  BOOST_CHECK(version.second == mpi_subversion);
}