예제 #1
0
void Communicator::waitForMessage()
{
	if(!initialized) return;

	MPI_Status status;
	MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
void MPICommunicator::communicate(DataField* field){
	std::string controlName = "CS";
	MPI_Comm csComm = clientNameCommMap[controlName];
	MPI_Status status;

	// Receiving the clinet name to communicate with
	MPI_Probe(0, MPI_ANY_TAG, csComm, &status);
	int lengthOfClientName;
        MPI_Get_count(&status, MPI_CHAR, &lengthOfClientName);


	char *buf = new char[lengthOfClientName];
	MPI_Recv(buf,lengthOfClientName,MPI_CHAR,0,MPI_ANY_TAG,csComm, &status);
	std::string clientName(buf, lengthOfClientName);
	delete [] buf;

	// Receiving what to do with the client
	int whatToDo;
	MPI_Recv(&whatToDo,1,MPI_INT,0,MPI_ANY_TAG,csComm, &status);

	switch(whatToDo){
	    case 1: // Receive
		MPI_Comm clientToRecvFrom = clientNameCommMap[clientName];
		field->receive(clientToRecvFrom);
                break;
		
	    case 2: // Send
		MPI_Comm clientToSendTo = clientNameCommMap[clientName];
		field->send(clientToSendTo);
                break;
       }
}
예제 #3
0
int MPI_Recv_vector(int id, std::vector<T> &buf)
{
	MPI_Status status;
	MPI_Datatype mpitype;
	int count;
	long ind;

	MPI_Probe(id, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
	
	if (status.MPI_TAG == TAG_LDOUBLE_COMPLEX_VEC)
	{
		if (typeid(T) != typeid(long double)) throw std::runtime_error("Types don't match");
		mpitype = MPI_LONG_DOUBLE;
	} else if (status.MPI_TAG == TAG_DOUBLE_COMPLEX_VEC) {
		if (typeid(T) != typeid(double)) throw std::runtime_error("Types don't match");
		mpitype = MPI_DOUBLE;
	} else if (status.MPI_TAG == TAG_FLOAT_COMPLEX_VEC) {
		if (typeid(T) != typeid(float)) throw std::runtime_error("Types don't match");
		mpitype = MPI_FLOAT;
	}

	MPI_Get_count(&status, mpitype, &count);
	buf.resize(count);

	if (buf.size() < count) throw std::runtime_error("Vector smaller than count.");
	MPI_Recv(buf.data(), count, mpitype, id, status.MPI_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

	return 0;
}
예제 #4
0
파일: vsgpackedmsg.c 프로젝트: pigay/vsg
/**
 * vsg_packed_msg_recv:
 * @pm: a #VsgPackedMsg.
 * @src: the source task id. Can be %MPI_ANY_SOURCE.
 * @tag: an integer message tag. Can be %MPI_ANY_TAG.
 *
 * Receives a message from source @src with @tag message tag and stores it in
 * @pm. any previously stored data will be lost.
 */
void vsg_packed_msg_recv (VsgPackedMsg *pm, gint src, gint tag)
{
  MPI_Status status;
  gint ierr;
  gint rsize = 0;

  g_assert (pm->own_buffer == TRUE);

  MPI_Probe (src, tag, pm->communicator, &status);

  MPI_Get_count (&status, MPI_PACKED, &rsize);

  if (rsize > pm->allocated)
    {
      pm->buffer = g_realloc (pm->buffer, rsize);
      pm->allocated = rsize;
    }

  pm->size = rsize;

  ierr = MPI_Recv (pm->buffer, rsize, MPI_PACKED, status.MPI_SOURCE,
                   status.MPI_TAG, pm->communicator, &status);

  _recv_count ++;
  _recv_size += rsize;

  _trace_write_msg_recv (pm, "recv", status.MPI_SOURCE, status.MPI_TAG);

  pm->position = _PM_BEGIN_POS;

  if (ierr != MPI_SUCCESS) vsg_mpi_error_output (ierr);
}
예제 #5
0
int main(int argc, char** argv){
	MPI_Init(NULL, NULL);
	
	int world_size;
	MPI_Comm_size(MPI_COMM_WORLD, &world_size);

	int world_rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);

	const int MAX_NUMBERS = 100;
	int numbers[MAX_NUMBERS];
	int number_count;

	if(world_rank == 0){
		srand(time(NULL));
		number_count = (rand() / (float)RAND_MAX) * MAX_NUMBERS; 
		MPI_Send(numbers, number_count, MPI_INT, 1, 0, MPI_COMM_WORLD );
		printf("Number count: %d\n", number_count);

	}else if(world_rank == 1){
		MPI_Status status;
		MPI_Probe(0, 0, MPI_COMM_WORLD, &status);
		MPI_Get_count(&status, MPI_INT, &number_count);
		int* number_buff = (int*)malloc(sizeof(int) * number_count);
		MPI_Recv(number_buff, number_count, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
		printf("Dynamically received: %d from 0.\n", number_count);
		free(number_buff);
	}
	MPI_Finalize();

}
예제 #6
0
void Foam::mpiIPstreamImpl::init
(
    const PstreamImpl::commsTypes commsType,
    const label bufSize,
    int& fromProcNo,
    label& messageSize,
    List<char>& buf
)
{
    MPI_Status status;

    // If the buffer size is not specified, probe the incoming message
    // and set it
    if (!bufSize)
    {
        MPI_Probe(Pstream::procID(fromProcNo), Pstream::msgType(), MPI_COMM_WORLD, &status);
        MPI_Get_count(&status, MPI_BYTE, &messageSize);

        buf.setSize(messageSize);
    }

    messageSize = read(commsType, fromProcNo, buf.begin(), buf.size());

    if (!messageSize)
    {
        FatalErrorIn
        (
            "mpiIPstreamImpl::mpiIPstreamImpl(const commsTypes commsType, const label bufSize, "
            "const int fromProcNo, label& messageSize, List<char>& buf)"
        )   << "read failed"
            << Foam::abort(FatalError);
    }
}
예제 #7
0
int main( int argc, char *argv[] )
{
    int rank, size;
    int provided;
    char buffer[100];
    MPI_Status status;

    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    if (provided != MPI_THREAD_MULTIPLE)
    {
	if (rank == 0)
	{
	    printf("MPI_Init_thread must return MPI_THREAD_MULTIPLE in order for this test to run.\n");
	    fflush(stdout);
	}
	MPI_Finalize();
	return -1;
    }

    MTest_Start_thread(send_thread, NULL);

    MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);

    MPI_Recv(buffer, sizeof(buffer), MPI_CHAR, rank, 0, MPI_COMM_WORLD, &status);

    MTest_Join_threads();

    MTest_Finalize(0);
    MPI_Finalize();
    return 0;
}
예제 #8
0
int MPI_Slave_Recv_buf_from_Scalar_real(T *buf, const ptrdiff_t local_n0, const ptrdiff_t local_0_start, const ptrdiff_t N0, const ptrdiff_t N1)
{
	MPI_Status status;
	auto mpitype = ionsim::convert_typeid_to_mpi<T>();
	int count;

	MPI_Send_local(local_n0, local_0_start, N0, N1);

	MPI_Probe(0, TAG_MASTER_SLAVE, MPI_COMM_WORLD, &status);
	MPI_Get_count(&status, mpitype, &count);

	if (count != local_n0*N1) throw std::runtime_error("Receiving incommensurate number of counts!");

	MPI_Recv(buf, local_n0*N1, mpitype, 0, TAG_MASTER_SLAVE, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

	JTF_PRINTVAL(buf[0]);

	T sum = 0;
	std::vector<ptrdiff_t> x_pts = {local_n0, N1};
	for (ptrdiff_t i=0; i<local_n0; i++)
	{
		for (ptrdiff_t j=0; j<N1; j++)
		{
			sum += buf[ionsim::row_major(x_pts, i, j)];
		}
	}
	JTF_PRINT_NOEND(Sum of received: ) << sum << std::endl;
	return 0;
}
int edaMpiWrapperControl::polling( int &nodeID )
{
  MPI_Status mpiStat;
  MPI_Probe( MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &mpiStat );
  nodeID = mpiStat.MPI_SOURCE;
  return POLLING_FINISHED;
}
예제 #10
0
/**
 * Performs map reduce using mpi message passing.
 * Assumes numprocs is a power of 2. The result will
 * be stored in the map on process with rank 0.
 * @param map local map to reduce
 * @param rank mpi process rank
 * @param numprocs number of mpi processes.
 */
void mpiMapReduce(HashMap* map, int rank, int numprocs) {
  int s;
  int count;
  MPI_Status status;
  char* buffer;
  //printf("starting map reduce\n");
  for (s = numprocs / 2; s > 0; s = s/2) {
    if (rank < s) {
      // Get the number of elements being sent to us.
      //printf("getting next reduce for rank %d\n", rank);
      MPI_Probe(rank + s, 0, MPI_COMM_WORLD, &status);
      MPI_Get_count(&status, MPI_CHAR, &count);
      // receive from higher process. merge data.
      buffer = (char*)malloc(sizeof(char) * count);
      //printf("received %d bytes at %d from %d\n", count, rank, rank + s);
      MPI_Recv(buffer, count, MPI_CHAR, rank  + s, 0,
	       MPI_COMM_WORLD, MPI_STATUS_IGNORE);
      // do stuff with data
      addSerializedToMap(map, (unsigned char*) buffer, count);
      //printf("finished reduce at %d\n", rank);
      free(buffer);
    } else if (rank < 2 * s) {
      // serialize hashmap for sending.
      uint32_t nBytes;
      //printf("sending data from %d to %d\n", rank, rank - s);
      buffer = (char*)serializeMap(map, &nBytes);
      //printf("serialized map for sending in rank %d\n", rank);
      MPI_Send(buffer, nBytes, MPI_CHAR, rank - s, 0, MPI_COMM_WORLD);
      free(buffer);
    }
  }
}
예제 #11
0
파일: srpmpi.c 프로젝트: blami/mi-par
/**
 * MPI prijem zasobniku a spojeni s lokalnim zasobnikem.
 */
void mpi_recv_stack(const int src) {
	assert(t);
	assert(s);
	MPI_Status mpi_status;
	unsigned int l;
	char *b = NULL;

	MPI_Recv(&l, 1, MPI_UNSIGNED, src, MSG_STACK,
		MPI_COMM_WORLD, &mpi_status);

	srpdebug("mpi", node, "prijeti zasobnik <l=%db, src=%d>",
		l, mpi_status.MPI_SOURCE);

	// naalokuju buffer a zahajim blokujici cekani na MSG_STACK_DATA
	srpdebug("mpi", node, "cekani na zpravu <delka=%db>", l);
	MPI_Probe(src, MSG_STACK_DATA, MPI_COMM_WORLD,
		&mpi_status);
	b = (char *)utils_malloc(l * sizeof(char));
	MPI_Recv(b, l, MPI_PACKED, src, MSG_STACK_DATA,
		MPI_COMM_WORLD, &mpi_status);

	stack_t *sn = stack_mpiunpack(b, t, l);
	free(b);

	srpdebug("mpi", node, "prijeti zasobnik <s=%d>", sn->s);

	// sloucit zasobniky
	stack_merge(s, sn);
}
예제 #12
0
void BIL_Sched_swap_and_merge_unique_group_names(
    int is_sender, int swapper_rank, char** unique_group_names_ret,
    int* num_unique_groups_ret) {
  char* unique_group_names = *unique_group_names_ret;
  int num_unique_groups = *num_unique_groups_ret;

  int group_name_size = BIL_MAX_FILE_NAME_SIZE + BIL_MAX_VAR_NAME_SIZE;
  if (is_sender == 1) {
    MPI_Send((char*)unique_group_names, group_name_size * num_unique_groups,
             MPI_CHAR, swapper_rank, 0, BIL->world_comm);
  } else {
    // Probe for the message size.
    MPI_Status status;
    MPI_Probe(swapper_rank, 0, BIL->world_comm, &status);
    int group_name_recv_amount;
    MPI_Get_count(&status, MPI_CHAR, &group_name_recv_amount);
    unique_group_names = BIL_Misc_realloc(unique_group_names,
                                          num_unique_groups * group_name_size +
                                          group_name_recv_amount);
    // Receive group names from the outer processes.
    MPI_Recv(unique_group_names + (num_unique_groups * group_name_size),
             group_name_recv_amount, MPI_CHAR,
             swapper_rank, 0, BIL->world_comm, MPI_STATUS_IGNORE);

    num_unique_groups += group_name_recv_amount / group_name_size;
    // Merge the group names into an array of unique names.
    BIL_Sched_merge_unique_group_names(&unique_group_names, 
                                       &num_unique_groups);
    *num_unique_groups_ret = num_unique_groups;
    *unique_group_names_ret = unique_group_names;
  }
}
예제 #13
0
// unsafe MProbe
inline int MPI_Mprobe(int source, int tag, MPI_Comm comm,
    MPI_Message *message, MPI_Status *status){
	message->__tag = tag;
	message->__comm = comm;
	message->__source = source;
	return MPI_Probe(source, tag, comm, status);
}
예제 #14
0
int wait_for_signal(MPI_Comm comm, int signal_num) {
    int new_failures, ret;
    MPI_Status status;



    while(cur_epoch < signal_num) {
        /*
         * MPI_Probe is a blocking call that will unblock when it encounters an
         * error on the indicated peer. By passing MPI_ANY_SOURCE, it will cause
         * MPI_Probe to unblock on the first un-recognized failed process.
         *
         * The error handler will be called before MPI_Probe returns.
         *
         * In this example, MPI_Probe will only return when there is an error
         * since we do not send messages in this example. But we check the
         * return code anyway, just for good form.
         */
        ret = MPI_Probe(MPI_ANY_SOURCE, TAG_FT_DETECT, comm, &status);

        if( MPI_SUCCESS != ret ) {
            printf("%2d of %2d) MPI_Probe() Error: Some rank failed (error = %3d)\n",
                   mpi_mcw_rank, mpi_mcw_size,
                   status.MPI_ERROR);

            /* Recognize the failure and move on */
            OMPI_Comm_failure_ack(comm);
        }
        sleep(1);
    }

    return 0;
}
예제 #15
0
void scribe_process() {
  MPI_Status status;

  int array_buffer_size;
  int * work_array;
  int current_largest_size = 0;
  
  work_item work;

  while (TRUE) {
    MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    
    if (status.MPI_TAG == TERMINATE)
      return;

    MPI_Get_count(&status, MPI_INT, &array_buffer_size);
    work_array = (int *) malloc(sizeof(int) * array_buffer_size);

    MPI_Recv(work_array, array_buffer_size, MPI_INT, status.MPI_SOURCE, SEQUENCE_TERMINATED, MPI_COMM_WORLD, &status);
    work = convert_array_to_work(work_array);
    
    if (work->size > current_largest_size) {
      current_largest_size = work->size;
      display_work_item(work);
    }

    free(work);
    free(work_array);
  }
}
void master() {
    MPI_Status stat , stat2 ;

    int j;
    int intervals = 1000000000;
    double delta,x, pi = 0.0;
    delta = 1.0 / (double)intervals;
    int job = 0;
    //loop

    while ( intervals >= 0 || job > 0 ) {
        // Wait for any incomming message
        MPI_Probe (MPI_ANY_SOURCE , MPI_ANY_TAG , MPI_COMM_WORLD , & stat ) ;
        // Store rank of receiver into slave_rank
        int slave_rank = stat.MPI_SOURCE;
            // Decide according to the tag which type of message we have got
        if ( stat.MPI_TAG == TAG_ASK_FOR_JOB ) {
            MPI_Recv (... , slave_rank , TAG_ASK_FOR_JOB , MPI_COMM_WORLD , & stat2 ) ;
            if (/* there are unprocessed jobs */) {
                // here we have unprocessed jobs , we send one job to the slave
                job++;
                    /* pack data of job into the buffer msg_buffer */
                MPI_Send ( msg_buffer , 1 , slave_rank , TAG_JOB_DATA , MPI_COMM_WORLD );
                /* mark slave with rank my_rank as working on a job */
            } else {
                // send stop msg to slave
                MPI_Send (... , slave_rank , TAG_STOP , MPI_COMM_WORLD);
                job--;
            }
        } else {
예제 #17
0
/* Function:  p7_hmm_mpi_Recv()
 * Synopsis:  Receives an HMM as a work unit from an MPI sender.
 *
 * Purpose:   Receive a work unit that consists of a single HMM
 *            sent by MPI <source> (<0..nproc-1>, or
 *            <MPI_ANY_SOURCE>) tagged as <tag> for MPI communicator <comm>.
 *            
 *            Work units are prefixed by a status code that gives the
 *            number of HMMs to follow; here, 0 or 1 (but in the future,
 *            we could easily extend to sending several HMMs in one 
 *            packed buffer). If we receive a 1 code and we successfully
 *            unpack an HMM, this routine will return <eslOK> and a non-<NULL> <*ret_hmm>.
 *            If we receive a 0 code (a shutdown signal), 
 *            this routine returns <eslEOD> and <*ret_hmm> is <NULL>.
 *   
 *            Caller provides a working buffer <*buf> of size
 *            <*nalloc> characters. These are passed by reference, so
 *            that <*buf> can be reallocated and <*nalloc> increased
 *            if necessary. As a special case, if <*buf> is <NULL> and
 *            <*nalloc> is 0, the buffer will be allocated
 *            appropriately, but the caller is still responsible for
 *            free'ing it.
 *            
 *            Caller may or may not already know what alphabet the HMM
 *            is expected to be in.  A reference to the current
 *            alphabet is passed in <byp_abc>. If the alphabet is unknown,
 *            pass <*byp_abc = NULL>, and when the HMM is received, an
 *            appropriate new alphabet object is allocated and passed
 *            back to the caller via <*abc>.  If the alphabet is
 *            already known, <*byp_abc> is that alphabet, and the new
 *            HMM's alphabet type is verified to agree with it. This
 *            mechanism allows an application to let the first HMM
 *            determine the alphabet type for the application, while
 *            still keeping the alphabet under the application's scope
 *            of control.
 *
 * Args:      source  - index of MPI sender, 0..nproc-1 (0=master), or MPI_ANY_SOURCE
 *            tag     - MPI message tag;  MPI_ANY_TAG, or a specific message tag (0..32767 will work on any MPI)
 *            comm    - MPI communicator; MPI_COMM_WORLD, or a specific MPI communicator
 *            buf     - working buffer (for receiving packed message);
 *                      if <*buf> == NULL, a <*buf> is allocated and returned;
 *                      if <*buf> != NULL, it is used (and may be reallocated)
 *            nalloc  - allocation size of <*buf> in bytes; pass 0 if <*buf==NULL>.           
 *            byp_abc - BYPASS: <*byp_abc> == ESL_ALPHABET *> if known;
 *                              <*byp_abc> == NULL> if alphabet unknown.
 *            ret_hmm  - RETURN: newly allocated/received profile
 *
 * Returns:   <eslOK> on success. <*ret_hmm> contains the received HMM;
 *            it is allocated here, and the caller is responsible for
 *            free'ing it.  <*buf> may have been reallocated to a
 *            larger size, and <*nalloc> may have been increased.  If
 *            <*abc> was passed as <NULL>, it now points to an
 *            <ESL_ALPHABET> object that was allocated here; caller is
 *            responsible for free'ing this.
 *            
 *            Returns <eslEOD> if an end-of-data signal was received.
 *            In this case, <*buf>, <*nalloc>, and <*abc> are left unchanged,
 *            and <*ret_hmm> is <NULL>.
 *            
 *            Returns <eslEINCOMPAT> if the HMM is in a different alphabet
 *            than <*abc> said to expect. In this case, <*abc> is unchanged,
 *            <*buf> and <*nalloc> may have been changed, and <*ret_hmm> is
 *            <NULL>.
 *            
 * Throws:    <eslEMEM> on allocation error, and <eslESYS> on MPI communication
 *            errors; in either case <*ret_hmm> is <NULL>.           
 */
int
p7_hmm_mpi_Recv(int source, int tag, MPI_Comm comm, char **buf, int *nalloc, ESL_ALPHABET **byp_abc, P7_HMM **ret_hmm)
{
  int         pos = 0;
  int         code;
  int         n;
  MPI_Status  mpistatus;
  int         status;

  /* Probe first, because we need to know if our buffer is big enough. */
  if ( MPI_Probe(source, tag, comm, &mpistatus)  != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi probe failed");
  if ( MPI_Get_count(&mpistatus, MPI_PACKED, &n) != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi get count failed");

  /* Make sure the buffer is allocated appropriately */
  if (*buf == NULL || n > *nalloc) 
    {
      ESL_REALLOC(*buf, sizeof(char) * n);
      *nalloc = n; 
    }

  /* Receive the entire packed work unit */
  if (MPI_Recv(*buf, n, MPI_PACKED, source, tag, comm, &mpistatus) != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi recv failed");

  /* Unpack the status code prefix */
  if (MPI_Unpack(*buf, n, &pos, &code, 1, MPI_INT, comm) != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi unpack failed");

  if      (code == 0) { status = eslEOD; *ret_hmm = NULL; }
  else if (code == 1)   status = p7_hmm_mpi_Unpack(*buf, *nalloc, &pos, comm, byp_abc, ret_hmm);
  else                  ESL_EXCEPTION(eslESYS, "bad mpi buffer transmission code");
  return status;

 ERROR: /* from ESL_REALLOC only */
  *ret_hmm = NULL;
  return status;
}
예제 #18
0
파일: mpi.c 프로젝트: Denis84/EPA-WorkBench
/* Function:  p7_oprofile_MPIRecv()
 * Synopsis:  Receives an OPROFILE as a work unit from an MPI sender.
 * Incept:    MSF, Wed Oct 21, 2009 [Janelia]
 *
 * Purpose:   Receive a work unit that consists of a single OPROFILE
 *            sent by MPI <source> (<0..nproc-1>, or
 *            <MPI_ANY_SOURCE>) tagged as <tag> for MPI communicator <comm>.
 *            
 *            Work units are prefixed by a status code. If the unit's
 *            code is <eslOK> and no errors are encountered, this
 *            routine will return <eslOK> and a non-<NULL> <*ret_om>.
 *            If the unit's code is <eslEOD> (a shutdown signal), 
 *            this routine returns <eslEOD> and <*ret_om> is <NULL>.
 *   
 *            Caller provides a working buffer <*buf> of size
 *            <*nalloc> characters. These are passed by reference, so
 *            that <*buf> can be reallocated and <*nalloc> increased
 *            if necessary. As a special case, if <*buf> is <NULL> and
 *            <*nalloc> is 0, the buffer will be allocated
 *            appropriately, but the caller is still responsible for
 *            free'ing it.
 *            
 *            Caller may or may not already know what alphabet the OPROFILE
 *            is expected to be in.  A reference to the current
 *            alphabet is passed in <abc>. If the alphabet is unknown,
 *            pass <*abc = NULL>, and when the OPROFILE is received, an
 *            appropriate new alphabet object is allocated and passed
 *            back to the caller via <*abc>.  If the alphabet is
 *            already known, <*ret_abc> is that alphabet, and the new
 *            OPROFILE's alphabet type is verified to agree with it. This
 *            mechanism allows an application to let the first OPROFILE
 *            determine the alphabet type for the application, while
 *            still keeping the alphabet under the application's scope
 *            of control.
 *
 * Returns:   <eslOK> on success. <*ret_om> contains the received OPROFILE;
 *            it is allocated here, and the caller is responsible for
 *            free'ing it.  <*buf> may have been reallocated to a
 *            larger size, and <*nalloc> may have been increased.  If
 *            <*abc> was passed as <NULL>, it now points to an
 *            <ESL_ALPHABET> object that was allocated here; caller is
 *            responsible for free'ing this.
 *            
 *            Returns <eslEOD> if an end-of-data signal was received.
 *            In this case, <*buf>, <*nalloc>, and <*abc> are left unchanged,
 *            and <*ret_om> is <NULL>.
 *            
 *            Returns <eslEINCOMPAT> if the OPROFILE is in a different alphabet
 *            than <*abc> said to expect. In this case, <*abc> is unchanged,
 *            <*buf> and <*nalloc> may have been changed, and <*ret_om> is
 *            <NULL>.
 *            
 * Throws:    <eslEMEM> on allocation error, in which case <*ret_om> is 
 *            <NULL>.           
 */
int
p7_oprofile_MPIRecv(int source, int tag, MPI_Comm comm, char **buf, int *nalloc, ESL_ALPHABET **abc, P7_OPROFILE **ret_om)
{
  int         status;
  int         code;
  P7_OPROFILE     *om     = NULL;
  int         n;
  int         pos;
  MPI_Status  mpistatus;

  /* Probe first, because we need to know if our buffer is big enough. */
  MPI_Probe(source, tag, comm, &mpistatus);
  MPI_Get_count(&mpistatus, MPI_PACKED, &n);

  /* Make sure the buffer is allocated appropriately */
  if (*buf == NULL || n > *nalloc) {
    void *tmp;
    ESL_RALLOC(*buf, tmp, sizeof(char) * n);
    *nalloc = n; 
  }

  /* Receive the packed work unit */
  MPI_Recv(*buf, n, MPI_PACKED, source, tag, comm, &mpistatus);

  /* Unpack it, looking at the status code prefix for EOD/EOK  */
  pos = 0;
  if (MPI_Unpack(*buf, n, &pos, &code, 1, MPI_INT, comm) != 0) ESL_XEXCEPTION(eslESYS, "mpi unpack failed");
  if (code == eslEOD)  { *ret_om = NULL;  return eslEOD; }

  return p7_oprofile_MPIUnpack(*buf, *nalloc, &pos, comm, abc, ret_om);

 ERROR:
  if (om != NULL) p7_oprofile_Destroy(om);
  return status;
}
예제 #19
0
void Thread::receivePendingMessages() {
  MPI_Status status;
  bool* myLeftRecvBorder = (bool*) malloc(chunkWidth * sizeof(bool));
  bool* myRightRecvBorder = (bool*) malloc(chunkWidth * sizeof(bool));
  std::vector<bool> leftBorder,rightBorder;
  int source;
  int tag;
  while((leftBorders.empty() ||rightBorders.empty() || stopped) && !quit) {
    MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
    source = status.MPI_SOURCE;
    tag = status.MPI_TAG;
    if (source == leftThread) {
      //std::cout << threadNumber << ": receiving from left thread" << std::endl;
      MPI_Recv(myLeftRecvBorder,chunkWidth,MPI::BOOL,source,tag,MPI_COMM_WORLD,&status);
      leftBorder.clear();
      for (int i = 0; i< chunkWidth;i++) {
        leftBorder.push_back(myLeftRecvBorder[i]);
      }
      leftBorders.push(leftBorder);
    } else if (source == rightThread) {
      //std::cout << threadNumber << ": receiving from right thread" << std::endl;
      MPI_Recv(myRightRecvBorder,chunkWidth,MPI::BOOL,source,tag,MPI_COMM_WORLD,&status);
      rightBorder.clear();
      for (int i = 0; i< chunkWidth;i++) {
        rightBorder.push_back(myRightRecvBorder[i]);
      }
      rightBorders.push(rightBorder);
    } else { //it is master.
      //std::cout << threadNumber << ": receiving from master" << std::endl;
      if (tag == STOP_TAG) {
        //std::cout << threadNumber << ": stop tag" << std::endl;
        int value;
        MPI_Recv(&value,0,MPI_INT,source,tag,MPI_COMM_WORLD,&status);
        stopped = true;
        MPI_Send(&currentIteration,1,MPI_LONG_LONG,0,SYNC_TAG,MPI_COMM_WORLD);
        //std::cout << threadNumber << ": sent iter" << std::endl;
      } /*else if (tag == STOP_TAG_2){
        std::cout << threadNumber << ": stop tag 2" << std::endl;
        int value;
        MPI_Recv(&value,0,MPI_INT,source,tag,MPI_COMM_WORLD,&status);
        stopped = true;
        sendComputedPart();
      }*/ else if (tag == QUIT_TAG) {
        //std::cout << threadNumber << ": quit tag" << std::endl;
        int value;
        MPI_Recv(&value,0,MPI_INT,source,tag,MPI_COMM_WORLD,&status);
        quit = true;
        sendComputedPart();
      } else { //it is run.
        ll newIters;
        MPI_Recv(&newIters,1,MPI_LONG_LONG,source,tag,MPI_COMM_WORLD,&status);
        updateIterations(newIters);
        //std::cout << threadNumber << ": run tag" << newIters << std::endl;
        stopped = false;
      }
    }
  }
  free(myRightRecvBorder);
  free(myLeftRecvBorder);
}
예제 #20
0
int RecvMPIMessage(char** buf_, int* size_, int* who_, int* tag_, bool block)	{
	int& who = *who_;
	int& tag = *tag_;
	int& size = *size_;
	char*& buf = *buf_;
	
	int flag;
	MPI_Status status;
	
	MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
	
	if (flag == 0)	{
		if (block == false)
			return 0;
		else
			MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
	}

	MPI_Get_count(&status, MPI_CHAR, &size);
	buf = new char[size];
	MPI_Recv(buf, size, MPI_CHAR, status.MPI_SOURCE, status.MPI_TAG, MPI_COMM_WORLD, &status);
	who = status.MPI_SOURCE;
	tag = status.MPI_TAG;
	
	return 1;
}
예제 #21
0
void root_node(shared_ptr<Net<Dtype> > net, int iters, Dtype lr)
{
    const std::vector<Blob<Dtype>*>& result = net -> output_blobs();

    boost::posix_time::ptime timer = boost::posix_time::microsec_clock::local_time();

    MPI_Status status;
    std::vector<Blob<Dtype>*> bottom_vec;
    float loss;

    init_buffer(iters, FLAGS_snapshot_intv, net);

    for (int i = 0; i < iters; i++) {

        MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);

        ApplyUpdate(net, lr, status.MPI_SOURCE);
        std::cout << i << std::endl;

        if (i % FLAGS_snapshot_intv == 0)
            snapshot(net, i, (boost::posix_time::microsec_clock::local_time() - timer).total_milliseconds());
    }

    save_snapshot(FLAGS_snap_path);
}
예제 #22
0
std::unordered_set<std::string>
computeDefunctWellNames(const std::vector<std::vector<int> >& wells_on_proc,
                        const Opm::EclipseStateConstPtr eclipseState,
                        const CollectiveCommunication<MPI_Comm>& cc,
                        int root)
{
    std::vector<const Opm::Well*>  wells  = eclipseState->getSchedule()->getWells();
    std::vector<int> my_well_indices;
    const int well_information_tag = 267553;

    if( root == cc.rank() )
    {
        std::vector<MPI_Request> reqs(cc.size(), MPI_REQUEST_NULL);
        my_well_indices = wells_on_proc[root];
        for ( int i=0; i < cc.size(); ++i )
        {
            if(i==root)
            {
                continue;
            }
            MPI_Isend(const_cast<int*>(wells_on_proc[i].data()),
                      wells_on_proc[i].size(),
                      MPI_INT, i, well_information_tag, cc, &reqs[i]);
        }
        std::vector<MPI_Status> stats(reqs.size());
        MPI_Waitall(reqs.size(), reqs.data(), stats.data());
    }
    else
    {
        MPI_Status stat;
        MPI_Probe(root, well_information_tag, cc, &stat);
        int msg_size;
        MPI_Get_count(&stat, MPI_INT, &msg_size);
        my_well_indices.resize(msg_size);
        MPI_Recv(my_well_indices.data(), msg_size, MPI_INT, root,
                 well_information_tag, cc, &stat);
    }

    // Compute defunct wells in parallel run.
    std::vector<int> defunct_wells(wells.size(), true);

    for(auto well_index : my_well_indices)
    {
        defunct_wells[well_index] = false;
    }

    // We need to use well names as only they are consistent.
    std::unordered_set<std::string> defunct_well_names;

    for(auto defunct = defunct_wells.begin(); defunct != defunct_wells.end(); ++defunct)
    {
        if ( *defunct )
        {
            defunct_well_names.insert(wells[defunct-defunct_wells.begin()]->name());
        }
    }

    return defunct_well_names;
}
예제 #23
0
int Communicator::probeSize<double>(int source, int tag) const
{
    MPI_Status status;
    int count;
    MPI_Probe(source, tag, comm_, &status);
    MPI_Get_count(&status, MPI_DOUBLE, &count);
    return count;
}
예제 #24
0
void mpi_probe (MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm,
		MPI_Fint *status, MPI_Fint *__ierr)
{
  MPI_Status c_status;
  *__ierr = MPI_Probe ((int) *source, (int) *tag, MPI_Comm_f2c (*comm),
		       &c_status );
  MPI_Status_c2f (&c_status, status);
}
예제 #25
0
int Communicator::probeSize<unsigned long>(int source, int tag) const
{
    MPI_Status status;
    int count;
    MPI_Probe(source, tag, comm_, &status);
    MPI_Get_count(&status, MPI_UNSIGNED_LONG, &count);
    return count;
}
예제 #26
0
HYPRE_Int
hypre_MPI_Probe( HYPRE_Int         source,
                 HYPRE_Int         tag,
                 hypre_MPI_Comm    comm,
                 hypre_MPI_Status *status )
{
   return (HYPRE_Int) MPI_Probe((hypre_int)source, (hypre_int)tag, comm, status);
}
예제 #27
0
void
mergeNonLocal(Prof::CallPath::Profile* profile, int rank_x, int rank_y,
	      int myRank, MPI_Comm comm)
{
  int tag = rank_y; // sender

  uint8_t* profileBuf = NULL;

  Prof::CallPath::Profile* profile_x = NULL;
  Prof::CallPath::Profile* profile_y = NULL;

  if (myRank == rank_x) {
    profile_x = profile;

    // rank_x probes rank_y
    int profileBufSz = 0;
    MPI_Status mpistat;
    MPI_Probe(rank_y, tag, comm, &mpistat);
    MPI_Get_count(&mpistat, MPI_BYTE, &profileBufSz);
    profileBuf = new uint8_t[profileBufSz];

    // rank_x receives profile from rank_y
    MPI_Recv(profileBuf, profileBufSz, MPI_BYTE, rank_y, tag, comm, &mpistat);

    profile_y = unpackProfile(profileBuf, (size_t)profileBufSz);
    delete[] profileBuf;

    if (DBG_CCT_MERGE) {
      string pfx0 = "[" + StrUtil::toStr(rank_x) + "]";
      string pfx1 = "[" + StrUtil::toStr(rank_y) + "]";
      DIAG_DevMsgIf(1, profile_x->metricMgr()->toString(pfx0.c_str()));
      DIAG_DevMsgIf(1, profile_y->metricMgr()->toString(pfx1.c_str()));
    }
    
    int mergeTy = Prof::CallPath::Profile::Merge_MergeMetricByName;
    profile_x->merge(*profile_y, mergeTy);

    if (DBG_CCT_MERGE) {
      string pfx = ("[" + StrUtil::toStr(rank_y)
		    + " => " + StrUtil::toStr(rank_x) + "]");
      DIAG_DevMsgIf(1, profile_x->metricMgr()->toString(pfx.c_str()));
    }

    delete profile_y;
  }

  if (myRank == rank_y) {
    profile_y = profile;

    size_t profileBufSz = 0;
    packProfile(*profile_y, &profileBuf, &profileBufSz);

    // rank_y sends profile to rank_x
    MPI_Send(profileBuf, (int)profileBufSz, MPI_BYTE, rank_x, tag, comm);

    free(profileBuf);
  }
}
    status operator()(MPI_Comm comm, int source, int tag, std::vector<U>& x) const
    {
      status s;

      MPI_Probe(source, tag, comm, &s.s);
      x.resize(s.count<U>());
      MPI_Recv(&x[0], x.size(), get_mpi_datatype<U>(), source, tag, comm, &s.s);
      return s;
    }
예제 #29
0
/**
 * @brief Wrapper around MPI_Probe
 *
 * We check the error code to detect MPI errors and use the default communicator
 * MPI_WORLD.
 *
 * @param source Rank of the source process
 * @param tag Tag identifying the message
 * @param status MPI status object to store information in
 */
inline void MyMPI_Probe(int source, int tag, MPI_Status* status) {
    MPIGlobal::idletimer.start();
    int result = MPI_Probe(source, tag, MPI_COMM_WORLD, status);
    if(result != MPI_SUCCESS) {
        std::cerr << "Error in MPI_Probe!" << std::endl;
        my_exit();
    }
    MPIGlobal::idletimer.stop();
}
 inline void MyMPI_Recv (string & s, int src, int tag)
 {
   MPI_Status status;
   int len;
   MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
   MPI_Get_count (&status, MPI_CHAR, &len);
   s.assign (len, ' ');
   MPI_Recv( &s[0], len, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status);
 }