Beispiel #1
0
        int MpiChannelInterface::sendBytes( void * data, size_type headerSize, size_type bodySize, int rcverLocalId )
        {
            VT_FUNC_I( "MPI::sendBytes" );
            CNC_ASSERT( 0 <= rcverLocalId && rcverLocalId < numProcs() );
            if( bodySize + headerSize > INT_MAX ) {
                std::cerr << "MPI_Get_count doesn't allow a count > " << INT_MAX << ". No workaround implemented yet." << std::endl;
                MPI_Abort( m_communicator, 1234 );
            }
            char* header_data = static_cast<char*>( data );
            MPI_Request request = 0;
            if( headerSize+bodySize < BUFF_SIZE ) {
                MPI_Isend( header_data, headerSize+bodySize, MPI_CHAR, rcverLocalId, FIRST_MSG, m_communicator, &request );
            } else {
                CNC_ASSERT( bodySize > 0 );
                // header Tag should not be equal to localId()
                MPI_Send( header_data, headerSize, MPI_CHAR, rcverLocalId, FIRST_MSG, m_communicator );
                char * body_data = header_data+headerSize;
                MPI_Isend( body_data, bodySize, MPI_CHAR, rcverLocalId, SECOND_MSG, m_communicator, &request );
            }
            //            { Speaker oss; oss << "sendBytes " << headerSize << " " << bodySize; }
#ifdef PRE_SEND_MSGS
            return request;
#else
            MPI_Wait( &request, MPI_STATUS_IGNORE );
            return 0;
#endif
        }
Beispiel #2
0
        serializer * MpiChannelInterface::waitForAnyClient( int & senderLocalId )
        {
            VT_FUNC_I( "MPI::waitForAnyClient" );

            MPI_Status status;
            MPI_Wait( &m_request, &status );
            senderLocalId = status.MPI_SOURCE;
            CNC_ASSERT( 0 <= senderLocalId && senderLocalId < numProcs() );
            int _cnt;
            MPI_Get_count( &status, MPI_CHAR, &_cnt );
            
            size_type _bodySize = m_ser1->unpack_header(); // throws an exception in case of error
            CNC_ASSERT( _bodySize + m_ser1->get_header_size() == _cnt || m_ser1->get_header_size() == _cnt );

            // if we did not receive the body yet, we need to do so now
            if( _bodySize != 0 ) {
                CNC_ASSERT( _bodySize != Buffer::invalid_size );
                BufferAccess::acquire( *m_ser1, _bodySize ); // this is needed even if all is received: sets current pointer in buffer
                if( _cnt == m_ser1->get_header_size() ) {
                    // Enlarge the buffer if needed
                    MPI_Recv( m_ser1->get_body(), _bodySize, MPI_CHAR, senderLocalId, SECOND_MSG, m_communicator, MPI_STATUS_IGNORE );
                }
            }

            std::swap( m_ser1, m_ser2 ); // double buffer exchange
            m_ser1->set_mode_unpack();
            MPI_Irecv( m_ser1->get_header(), BufferAccess::capacity( *m_ser1 ), MPI_CHAR, MPI_ANY_SOURCE, FIRST_MSG, m_communicator, &m_request );
            
            //            { Speaker oss; oss << "recvBytes " << _bodySize; }

            return _bodySize != 0 ? m_ser2 : NULL;
        }
Beispiel #3
0
        void GenericCommunicator::fini()
        {
            VT_FUNC_I( "Dist::GenComm::fini" );
            // Something to do at all?
            if ( ! m_hasBeenInitialized ) {
                return;
            } else {
                m_hasBeenInitialized = false;
            }

            // Host sends termination requests to the remote clients.
            // Each client will send a response.
            if( m_channel->localId() == 0 ) {
                for ( int client = 1; client < numProcs(); ++client ) {
                    send_termination_request( client );
                }
            }

            // Stop sender and receiver threads:
            if ( m_recvThread ) {
                m_recvThread->stop();
            }
            if ( m_sendThread ) {
                m_sendThread->stop();
            }

            // Cleanup:
            delete m_recvThread;
            m_recvThread = NULL;
            delete m_sendThread;
            m_sendThread = NULL;

            // Cleanup ITAC stuff:
            VT_FINALIZE();
        }
Beispiel #4
0
 int compute_on( const int & p, my_context & ) const
 {
     return ( 1 + p / ( 1000000 / 20 ) ) % numProcs();//( p * 5 / 3 + 1 ) % 5;
 }
Beispiel #5
0
 void MpiChannelInterface::recvBodyBytes( void * body, size_type bodySize, int senderLocalId )
 {
     VT_FUNC_I( "MPI::recvBodyBytes" );
     CNC_ASSERT( 0 <= senderLocalId && senderLocalId < numProcs() );
     MPI_Recv( body, bodySize, MPI_CHAR, senderLocalId, SECOND_MSG, m_communicator, MPI_STATUS_IGNORE );
 }
Beispiel #6
0
void distributor::recv_msg( serializer *ser, int pid )
{
    const int terminationId = -111111;

    // Host initialization stuff:
    if ( ! remote() && g_host_data == 0 ) {
        CNC_ASSERT( ! g_host_communication_done );
        g_host_data = new HostData( g_mySocketComm.numProcs() - 1 );
    }

    // Unpack message:
    BufferAccess::initUnpack( *ser );
    int dummy;
    (*ser) & dummy;
    printf( "PROC %d: received %d from %d\n", myPid(), dummy, pid );
    fflush( stdout );

    // Host checks for finalization messages:
    if ( ! remote() && dummy == terminationId ) { // indicates that client is finished
        CNC_ASSERT( 0 < pid && pid <= g_host_data->m_numClients );
        CNC_ASSERT( g_host_data->m_clientsFinished[pid] == false );
        g_host_data->m_clientsFinished[pid] = true;
        ++g_host_data->m_numClientsFinished;
        if ( g_host_data->m_numClientsFinished == g_host_data->m_numClients ) {
            g_host_communication_done = true;
            // wakes up host application thread
        }
    }

    // SENDS:
    if ( g_recv_counter == 0 )
    {
        // Clients send some messages:
        // [0 --> 1], then
        // 1 --> 2, 2 --> 3, ..., N-1 --> N, N --> 0 [=host]
        if ( remote() ) {
            int dummy = ( myPid() + 1 ) * 123;
            int recver = ( myPid() + 1 ) % numProcs();
            serializer * ser = new_serializer( 0 );
            (*ser) & dummy;
            send_msg( ser, recver );
        }

        // last client makes a bcast:
        if ( myPid() == numProcs() - 1 ) {
            int dummy = 5555;
            serializer * ser = new_serializer( 0 );
            (*ser) & dummy;
#if 1
            // bcast to all others:
            bcast_msg( ser );
#else
            // restricted bcast variant:
            std::vector< int > rcverArr;
            rcverArr.push_back( 0 );
            bcast_msg( ser, rcverArr );
#endif
        }

        // Finally each client sends a termination message to the host:
        if ( remote() ) {
            int dummy = terminationId;
            serializer * ser = new_serializer( 0 );
            (*ser) & dummy;
            send_msg( ser, 0 );
        }
    }

    // Adjust recv counter:
    ++g_recv_counter;
}
Beispiel #7
0
//------------------------------------------------------------------------
int mirrorCommPattern(MPI_Comm comm, comm_map* inPattern, comm_map*& outPattern)
{
#ifdef FEI_SER
  (void)inPattern;
  (void)outPattern;
#else
  int localP = localProc(comm);
  int numP  = numProcs(comm);

  if (numP < 2) return(0);

  std::vector<int> buf(numP*2, 0);

  int numInProcs = inPattern->getMap().size();
  std::vector<int> inProcs(numInProcs);
  fei::copyKeysToVector(inPattern->getMap(), inProcs);

  std::vector<int> outProcs;

  int err = mirrorProcs(comm, inProcs, outProcs);
  if (err != 0) ERReturn(-1);

  std::vector<int> recvbuf(outProcs.size(), 0);

  outPattern = new comm_map(0,1);

  MPI_Datatype mpi_ttype = fei::mpiTraits<int>::mpi_type();

  //now recv a length (the contents of buf[i]) from each "out-proc", which
  //will be the length of the equation data that will also be recvd from that
  //proc.
  std::vector<MPI_Request> mpiReqs(outProcs.size());
  std::vector<MPI_Status> mpiStss(outProcs.size());
  MPI_Request* requests = &mpiReqs[0];
  MPI_Status* statuses = &mpiStss[0];

  int firsttag = 11117;
  int offset = 0;
  int* outProcsPtr = &outProcs[0];
  for(unsigned i=0; i<outProcs.size(); ++i) {
    if (MPI_Irecv(&(recvbuf[i]), 1, MPI_INT, outProcsPtr[i], firsttag,
                  comm, &requests[offset++]) != MPI_SUCCESS) ERReturn(-1);
  }

  comm_map::map_type& in_row_map = inPattern->getMap();
  comm_map::map_type::iterator
    in_iter = in_row_map.begin(),
    in_end  = in_row_map.end();
 
  int* inProcsPtr = &inProcs[0];
  for(int ii=0; in_iter!= in_end; ++in_iter, ++ii) {
    comm_map::row_type* in_row = in_iter->second;
    buf[ii] = in_row->size();
    if (MPI_Send(&(buf[ii]), 1, MPI_INT, inProcsPtr[ii], firsttag,
                 comm) != MPI_SUCCESS) ERReturn(-1);
  }

  int numOutProcs = outProcs.size();

  MPI_Waitall(numOutProcs, requests, statuses);
  std::vector<int> lengths(numOutProcs);
  int totalRecvLen = 0;
  offset = 0;
  for(int ii=0; ii<numOutProcs; ++ii) {
    if (recvbuf[ii] > 0) {
      lengths[offset++] = recvbuf[ii];
      totalRecvLen += recvbuf[ii];
    }
  }

  //now we need to create the space into which we'll receive the
  //lists that other procs send to us.
  std::vector<int> recvData(totalRecvLen, 999999);

  int tag2 = 11118;
  offset = 0;
  for(int ii=0; ii<numOutProcs; ++ii) {
    CHK_MPI(MPI_Irecv(&(recvData[offset]), lengths[ii], mpi_ttype,
                      outProcs[ii], tag2, comm, &requests[ii]) );
    offset += lengths[ii];
  }

  std::vector<int> sendList;

  in_iter = in_row_map.begin();

  for(int ii=0; in_iter != in_end; ++in_iter,++ii) {
    if (inProcs[ii] == localP) {
      continue;
    }
    sendList.resize(in_iter->second->size());
    fei::copySetToArray(*(in_iter->second), sendList.size(), &sendList[0]);

    CHK_MPI(MPI_Send(&sendList[0], sendList.size(), mpi_ttype,
                     inProcs[ii], tag2, comm) );
  }

  //our final communication operation is to catch the Irecvs we started above.
  for(int ii=0; ii<numOutProcs; ++ii) {
    MPI_Wait(&requests[ii], &statuses[ii]);
  }

  //now we've completed all the communication, so we're ready to put the data
  //we received into the outPattern object.
  offset = 0;
  for(int ii=0; ii<numOutProcs; ii++) {
    outPattern->addIndices(outProcs[ii], lengths[ii],
                           &(recvData[offset]));
    offset += lengths[ii];
  }

#endif
  return(0);
}