Ejemplo n.º 1
0
void Module_DCREATE::receive_from_master() const {
    size_t length;
    MPI::Status status;
    int k;
    int blockLength;

    MPI::COMM_WORLD.Recv(&k,1,MPI::INT,0,COMMUNICATION_CHANNEL);

    MPI::COMM_WORLD.Recv(&blockLength,1,MPI::INT,0,COMMUNICATION_CHANNEL);

    MPI::COMM_WORLD.Probe(0,COMMUNICATION_CHANNEL,status);
    length = status.Get_count(MPI::CHAR);
    char input[length];
    MPI::COMM_WORLD.Recv(input,length,MPI::CHAR,0,COMMUNICATION_CHANNEL);

    MPI::COMM_WORLD.Probe(0,COMMUNICATION_CHANNEL,status);
    length = status.Get_count(MPI::CHAR);
    char output[length];
    MPI::COMM_WORLD.Recv(output,length,MPI::CHAR,0,COMMUNICATION_CHANNEL);

    DEFAULT_CHANNEL << "Informations from master received by node " << my_rank << endl;

    if (strlen(input) != 0 and strlen(output) != 0)
    	compute_hash(k,blockLength,input,output,false); //TODO handle methyl_hash

}
Ejemplo n.º 2
0
    bool recvData(std::vector<double>& receivedData)
    {
        bool isDataReceived = false;
        if ( intraComm != MPI::COMM_NULL)
        {
            MPI::Status status;
            double buffer[100];
            intraComm.Recv(buffer, 100,
                           MPI::DOUBLE,
                           MPI::ANY_SOURCE,
                           /*tag*/ 100,
                           status);

            int count = status.Get_count(MPI::DOUBLE);
            receivedData = std::vector<double>(buffer, buffer+count);

            log.Info() << "RECV [ " << getRank()
                        << " <-- "
                        << status.Get_source()
                        << " ] data : "
                        << receivedData
                        << std::endl;
            isDataReceived = true;
        }else
        {
            log.Err() << "PID " << getProcessId()
                      << " failed to RECV"
                      << std::endl;
        }
        return isDataReceived;
    }
Ejemplo n.º 3
0
void HPC::MPICommunication::waitReception(Request::Handle ioRequest) const
{
	Beagle_StackTraceBeginM();
	Beagle_NonNullPointerAssertM(ioRequest);
	MPI::Status lStatus;
	ioRequest->mSizeRequest.Wait(lStatus);
	if(lStatus.Is_cancelled()) return;
	int lRank = lStatus.Get_source();
	int lMsgSize = ioRequest->mSize;
	std::string lStringTag = ioRequest->mTag + "_str";
	MPI::COMM_WORLD.Probe(lRank,hashTag(lStringTag),lStatus);
	Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lMsgSize);
	//constructing a string of the right size.
	std::string lMsg(lMsgSize, ' ');
	MPI::COMM_WORLD.Recv(&lMsg[0], lMsgSize, MPI::CHAR, lRank, hashTag(lStringTag));
#ifdef BEAGLE_HAVE_LIBZ
	if(mCompressionLevel->getWrappedValue() > 0){
		ioRequest->mMessage = new Beagle::String;
		decompressString(lMsg, ioRequest->mMessage->getWrappedValue());
	} else {
		ioRequest->mMessage = new Beagle::String(lMsg);
	}
#else
	ioRequest->mMessage = new Beagle::String(lMsg);
#endif
	Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const");
}
Ejemplo n.º 4
0
void HPC::MPICommunication::waitSending(Request::Handle ioRequest) const
{
	Beagle_StackTraceBeginM();
	Beagle_NonNullPointerAssertM(ioRequest);
	MPI::Status lStatus;
	ioRequest->mSizeRequest.Wait(lStatus);
	if(lStatus.Is_cancelled()) return;
	ioRequest->mMsgRequest.Wait();
	Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const");
}
Ejemplo n.º 5
0
void
ParaCommMpiWorld::probe(
   int* source,
   int* tag
   )
{
   MPI::Status mpiStatus;
   MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
   *source = mpiStatus.Get_source();
   *tag = mpiStatus.Get_tag();
   TAG_TRACE (Probe, From, *source, *tag);
}
Ejemplo n.º 6
0
//#####################################################################
// Function Recv_Columns
//#####################################################################
template<class T_GRID> template<class T_ARRAYS_HORIZONTAL_COLUMN> void MPI_RLE_GRID<T_GRID>::
Recv_Columns(T_ARRAYS_HORIZONTAL_COLUMN& columns,const ARRAY<T_BOX_HORIZONTAL_INT>& regions,const int tag,const MPI::Status& probe_status) const
{
    ARRAY<char> buffer(probe_status.Get_count(MPI::PACKED));
    int position=0;
    comm->Recv(&buffer(1),buffer.m,MPI::PACKED,probe_status.Get_source(),tag);
    TV_HORIZONTAL_INT direction;
    MPI_UTILITIES::Unpack(direction,buffer,position,*comm);
    int neighbor=0;
    all_neighbor_directions.Find(-direction,neighbor);
    for(typename T_HORIZONTAL_GRID::CELL_ITERATOR iterator(local_grid.horizontal_grid,regions(neighbor)); iterator.Valid(); iterator.Next())
        MPI_UTILITIES::Unpack(columns(iterator.Cell_Index()),buffer,position,*comm);
}
Ejemplo n.º 7
0
bool Neatzsche_MPI::readPopulation(Phenotypes * p, Coevolution * c, TransferFunctions * tfs)
{
  MPI::Status status;
  MPI::Datatype ndt,gdt;
  int genomes,genes,nodes,id;
  MPI::COMM_WORLD.Recv(&genomes,1,MPI::INT,0,0);//Receive the number of genome
  NeuralNodeSmall * nns;
  GeneSmall * gs;
  Genome * genome = NULL;
  int stringc=0; 
  char *strbuf;
  vector<string> * ftypes = NULL;
  for(int i=0;i<genomes;i++){
    ftypes = new vector<string>();
    MPI::COMM_WORLD.Recv(&id,1,MPI_INT,0,0);
    MPI::COMM_WORLD.Recv(&nodes,1,MPI_INT,0,0);
    MPI::COMM_WORLD.Recv(&genes,1,MPI_INT,0,0);
//     nns = (NeuralNodeSmall*)malloc(sizeof(NeuralNodeSmall)*nodes);
//     gs = (GeneSmall*)malloc(sizeof(GeneSmall)*genes);
    nns = new NeuralNodeSmall [nodes];
    gs = new GeneSmall[genes];

    nodetype = Build_neuralnode_type(&nns[0]);
    MPI::COMM_WORLD.Recv(nns,nodes,nodetype,0,0);
    for(int i=0;i<nodes;i++){//blargh, 1 int would be more usefull in this case:P
      MPI::COMM_WORLD.Probe(0, MPI_Cont, status);
      stringc = status.Get_count(MPI_CHAR);
      strbuf = (char*) malloc(sizeof(char)*stringc);
      MPI::COMM_WORLD.Recv(strbuf,stringc,MPI::CHAR,0,0);//receive the ftype of the node
      ftypes->push_back(string(strbuf).substr(0,stringc));
      free(strbuf);
    }
    genetype = Build_gene_type(&gs[0]);
    MPI::COMM_WORLD.Recv(gs,genes,genetype,0,0);

    genome = new Genome(tfs);
    genome->fromSmall(id,nodes,nns,genes,gs,ftypes);
    delete ftypes;
    p->push_back(new Phenotype(genome));
    if(nodes>0)
      delete[] nns; 
    if(genes>0)
      delete[] gs;
  }
  unsigned int cont;
  MPI::COMM_WORLD.Recv(&cont,1,MPI::INT,0,0);//continue or stop?
  return cont == MPI_Cont;

}
Ejemplo n.º 8
0
bool
ParaCommMpiWorld::iProbe(
   int* source,
   int* tag
   )
{
   bool flag;
   MPI::Status mpiStatus;
   flag = MPI::COMM_WORLD.Iprobe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
   if( flag )
   {
      *source = mpiStatus.Get_source();
      *tag = mpiStatus.Get_tag();
      TAG_TRACE (Iprobe, From, *source, *tag);
   }
   return flag;
}
Ejemplo n.º 9
0
int HPC::MPICommunication::waitAny(Request::Bag& ioRequests) const
{
	Beagle_StackTraceBeginM();
	unsigned int lSize = ioRequests.size();
	std::vector<MPI::Request> lRequests;
	lRequests.reserve(lSize);
	for(unsigned int i = 0; i < lSize; ++i){
		lRequests.push_back(ioRequests[i]->mSizeRequest);
	}
	MPI::Status lStatus;
	int lIndex = MPI::Request::Waitany(lSize, &lRequests[0], lStatus);
	ioRequests[lIndex]->mSizeRequest = lRequests[lIndex];
	if(lStatus.Is_cancelled()) return -1;

	if(ioRequests[lIndex]->mType == Request::RECEPTION){
		int lRank = lStatus.Get_source();
		std::string lStringTag = ioRequests[lIndex]->mTag + "_str";
		int lMsgSize = ioRequests[lIndex]->mSize;
		MPI::COMM_WORLD.Probe(lRank,hashTag(lStringTag),lStatus);
		Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lMsgSize);

		//constructing a string of the right size.
		std::string lMsg(lMsgSize,' ');
		MPI::COMM_WORLD.Recv(&lMsg[0], lMsgSize, MPI::CHAR, lRank, hashTag(lStringTag));
#ifdef BEAGLE_HAVE_LIBZ
		if(mCompressionLevel->getWrappedValue() > 0){
			ioRequests[lIndex]->mMessage = new Beagle::String;
			decompressString(lMsg, ioRequests[lIndex]->mMessage->getWrappedValue());
		} else {
			ioRequests[lIndex]->mMessage = new Beagle::String(lMsg);
		}
#else
		ioRequests[lIndex]->mMessage = new Beagle::String(lMsg);
#endif
	} else if(ioRequests[lIndex]->mType == Request::SENDING){
		ioRequests[lIndex]->mMsgRequest.Wait();
	} else {
		throw RunTimeException("MPI::Communication::waitAll(Request&) the request"+uint2str(lIndex)+
				       " is invalid",__FILE__,__LINE__);
	}
	return lIndex;
	Beagle_HPC_StackTraceEndM("int HPC::MPICommunication::waitAny(Request::Bag&) const");
}
Ejemplo n.º 10
0
int
ParaCommMpiWorld::waitSpecTagFromSpecSource(
      const int source,
      const int datatypeId,
      const int tag,
      int *receivedTag
      )
{
   MPI::Status mpiStatus;
   MPI::COMM_WORLD.Probe(source, MPI::ANY_TAG, mpiStatus);
   (*receivedTag) = mpiStatus.Get_tag();
   TAG_TRACE (Probe, From, source, (*receivedTag));
   if( tag == (*receivedTag) )
   {
      return 0;
   }
   else
   {
      return 1;
   }
}
Ejemplo n.º 11
0
/*!
 * \brief Receive message from a specific node rank via MPI
 * \param outMessage Message receive.
 * \param inTag Tag associated to the message to be received.
 * \param inRank Node rank of the sending node.
 */
void HPC::MPICommunication::receive(std::string& outMessage, const std::string& inTag, int inRank) const
{
	Beagle_StackTraceBeginM();
	MPI::Status lStatus;

	int lSize = 0;
	MPI::COMM_WORLD.Recv(&lSize, 1, MPI::INT, inRank, hashTag(inTag+"_size"));
	MPI::COMM_WORLD.Probe(inRank,hashTag(inTag+"_str"),lStatus);
	Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lSize);
	outMessage.resize(lSize);
	MPI::COMM_WORLD.Recv(&outMessage[0], lSize, MPI::CHAR, lStatus.Get_source(), hashTag(inTag+"_str"));

#ifdef BEAGLE_HAVE_LIBZ
	if(mCompressionLevel->getWrappedValue() > 0){
		std::string lString;
		decompressString(outMessage, lString);
		outMessage = lString;
	}
#endif
	Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::receive(std::string&, const std::string&, int) const");
}
Ejemplo n.º 12
0
bool
ParaCommMpiWorld::waitToken(
      int tempRank
      )
{
   pthread_mutex_lock(&tokenAccessLock);
   if( token[0] == myRank )
   {
      pthread_mutex_unlock(&tokenAccessLock);
      return true;
   }
   else
   {
      int previousRank = myRank - 1;
      if( previousRank == 0 )
      {
         if( token[0] != -1 )
         {
            previousRank = comSize - 1;
         }
      }
      int receivedTag;
      MPI::Status mpiStatus;
      MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
      receivedTag = mpiStatus.Get_tag();
      TAG_TRACE (Probe, From, mpiStatus.Get_source(), receivedTag);
      if( receivedTag == TagToken )
      {
         receive(token, 2, ParaINT, 0, TagToken);
         assert(token[0] == myRank);
         pthread_mutex_unlock(&tokenAccessLock);
         return true;
      }
      else
      {
         pthread_mutex_unlock(&tokenAccessLock);
         return false;
      }
   }
}
Ejemplo n.º 13
0
void PSO::Swarm::evaluate_slave() {
  double f(log(0.0));
  int id(0);
  int flag(0);
  int tag(0);
  int dest(0);
  Point position(numParams);
  MPI::Status status;
//  fprintf(stderr,"Slave %d ready.\n",mpi_rank);
  while (1) {
//    flag = MPI::COMM_WORLD.Iprobe(0,MPI::ANY_TAG,status);
//    if (flag) {
//      tag = status.Get_tag();
    MPI::COMM_WORLD.Recv(&id,1,MPI::INT,0,MPI::ANY_TAG,status);
    if (status.Get_tag() == 0) break;
    MPI::COMM_WORLD.Recv(position.data(),numParams,MPI::DOUBLE,0,MPI::ANY_TAG,status);
    f = p->evalFunc(position,p->evalParams);
    MPI::COMM_WORLD.Send(&id,1,MPI::INT,0,2);
    MPI::COMM_WORLD.Send(&f,1,MPI::DOUBLE,0,2);
//    }
  }
//  fprintf(stderr,"Slave %d done.\n",mpi_rank);
}
Ejemplo n.º 14
0
int main ( int argc, char *argv[] )

//****************************************************************************80
//
//  Purpose:
//
//    MAIN is the main program for DAY1.
//
//  Discussion:
//
//    DAY1 is exercise 3 for first day of the MPI workshop
//
//    The instructions say:
//
//    Process 1 computes the squares of the first 200 integers.
//    It sends this data to process 3.
//
//    Process 3 should divide the integers between 20 and 119 by 53,
//    getting a real result, and passes this data back to process 1.
//
//    * I presume the first 200 integers are the numbers 0 through 199.
//
//    * The instructions literally mean that process 3 should look
//      at integers whose VALUES are between 20 and 119.  I doubt that
//      is what the instructor meant, but it's more interesting than
//      simply picking the entries with index between 20 and 119,
//      so that's what I'll do.
//
//    * It is also not completely clear whether only the selected data
//      should be sent back, or the entire array.  Again, it is more
//      interesting to send back only part of the data.
//
//  Licensing:
//
//    This code is distributed under the GNU LGPL license. 
//
//  Author:
//
//    John Burkardt
//
//  Reference:
//
//    William Gropp, Ewing Lusk, Anthony Skjellum,
//    Using MPI: Portable Parallel Programming with the
//    Message-Passing Interface,
//    Second Edition,
//    MIT Press, 1999,
//    ISBN: 0262571323.
//
//  Modified:
//
//    26 October 2011
//
//  Author:
//
//    John Burkardt
//
{
# define I_DIM 200
# define R_DIM 200

  int count;
  int count2;
  int dest;
  int i;
  int i_buffer[I_DIM];
  int id;
  int p;
  float r_buffer[R_DIM];
  int source;
  MPI::Status status;
  int tag;
//
//  Initialize MPI.
//
  MPI::Init ( argc, argv );
//
//  Determine this process's rank.
//
  id = MPI::COMM_WORLD.Get_rank ( );
//
//  Get the number of processes.
//
  p = MPI::COMM_WORLD.Get_size ( );
//
//  Have Process 0 say hello.
//
  if ( id == 0 )
  {
    timestamp ( );
    cout << "\n";
    cout << "DAY1:\n";
    cout << "  C++ version\n";
    cout << "  An MPI example program.\n";
    cout << "\n";
    cout << "  Compiled on " << __DATE__ << " at " << __TIME__ << "\n";
    cout << "\n";
    cout << "  The number of processes available is " << p << "\n";
  }
//
//  If we don't have at least 4 processes, then bail out now.
//
  if ( p < 4 )
  {
    cout << "\n";
    cout << "DAY1 - Process " << id << ".\n";
    cout << "  Not enough processes for this task!\n";
    cout << "  Bailing out now!\n";
    MPI::Finalize ( );
    return 1;
  }
//
//  Process 1 knows that it will generate 200 integers, and may receive no more
//  than 200 reals.
//
  if ( id == 1 )
  {
    count = 200;

    for ( i = 0; i < count; i++ ) 
    {
      i_buffer[i] = i * i;
    }

    dest = 3;
    tag = 1;

    MPI::COMM_WORLD.Send ( i_buffer, count, MPI::INT, dest, tag );

    cout << "P:" << id << " sent " << count 
         << " integers to process " << dest << ".\n";

    source = 3;
    tag = 2;

    MPI::COMM_WORLD.Recv ( r_buffer, R_DIM, MPI::FLOAT, source, tag, status );

    cout << "P:" << id << " received real values from process 3.\n";

    count = status.Get_count ( MPI::FLOAT );

    cout << "P:" << id << " Number of real values received is "
         << count << ".\n";

    cout << "P:" << id << " First 3 values = "
         << r_buffer[0] << "  "
         << r_buffer[1] << "  "
         << r_buffer[2] << "\n";
  }
//
//  Process 3 receives the integer data from process 1, selects some of the data, does
//  a real computation on it, and sends that part back to process 1.
//
  else if ( id == 3 ) 
  {
    source = 1;
    tag = 1;

    MPI::COMM_WORLD.Recv ( i_buffer, I_DIM, MPI::INT, source, tag, status );

    cout << "\n";
    cout << "P:" << id << " received integer values from process 1.\n";

    count = status.Get_count ( MPI::INT );

    cout << "P:" << id << " - Number of integers received is " 
         << count << ".\n";

    cout << "P:" << id << " First 3 values = "
         << i_buffer[0] << "  "
         << i_buffer[1] << "  "
         << i_buffer[2] << "\n";

    count2 = 0;
     
    for ( i = 0; i < count; i++ ) 
    {
      if ( 20 <= i_buffer[i] && i_buffer[i] <= 119 ) 
      {

        r_buffer[count2] = ( float ) i_buffer[i] / 53.0E+00;
        count2 = count2 + 1;

        if ( count2 <= 3 ) 
        {
          cout << "P:" << id << " Input integer " << i_buffer[i]
               << " becomes " << r_buffer[count2-1] << ".\n";
        }

      }
    }

    dest = 1;
    tag = 2;
  
    MPI::COMM_WORLD.Send ( r_buffer, count2, MPI::FLOAT, dest, tag );

    cout << "P:" << id << " sent " << count2 << " reals to process "
         << dest << ".\n";
  }
  else
  {
    cout << "\n";
    cout << "P:" << id << " - MPI has no work for me!\n";
  }
//
//  Terminate MPI.
//
  MPI::Finalize ( );
//
//  Terminate.
//
  if ( id == 0 )
  {
    cout << "\n";
    cout << "DAY1:\n";
    cout << "  Normal end of execution.\n";
    cout << "\n";
    timestamp ( );
  }
  return 0;

# undef I_DIM
# undef R_DIM
}
Ejemplo n.º 15
0
Module_DMAP::Transmitting_Result Module_DMAP::recv_output(int node, int id) {
	int count;
	unsigned long int * positions;
	unsigned long int * global_positions;
	int * contigs;
	//t_alignment * types;
	int * NMs;
	int * lengths;
	int * algn;
	unsigned short int * bools;
	unsigned int * trim_info;
	char * informations;
	unsigned short int bool_temp;
	Mask * reads;

	{
		mutex::scoped_lock lock(mpi_mutex);
		//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Waiting info from node " << node << " to node " << my_rank << endl;
		if (finished)
			return Transmitting_Result(NULL,0);
		MPI::COMM_WORLD.Recv(&count,1,MPI::INT,my_rank-1,COMMUNICATION_CHANNEL);
		//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Receive " << count << " OUTPUTs from node " << node << " to node " << my_rank << endl;
		if (count == 0) {
			finished = true;
			return Transmitting_Result(NULL,0);
		}

		positions = new unsigned long int[count*2];
		global_positions = new unsigned long int[count*2];
		contigs = new int[count];
		//types = new t_alignment[count];
		NMs = new int[count*2];
		lengths = new int[count*2];
		algn = new int[count];
		bools = new unsigned short int[count];
		trim_info = new unsigned int[count*2];

		size_t sum;
		MPI::Status status;
		MPI::COMM_WORLD.Probe(node,DATA_CHANNEL,status);
		sum = status.Get_count(MPI::CHAR);
		informations = new char[sum];
		MPI::COMM_WORLD.Recv(informations,sum,MPI::CHAR,node,DATA_CHANNEL);

		MPI::COMM_WORLD.Recv(positions,count*2,MPI::UNSIGNED_LONG,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(global_positions,count*2,MPI::UNSIGNED_LONG,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(contigs,count,MPI::INT,node,DATA_CHANNEL);
		//MPI::COMM_WORLD.Recv(types,count*sizeof(t_alignment),MPI::CHAR,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(NMs,count*2,MPI::INT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(lengths,count*2,MPI::INT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(algn,count,MPI::INT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(bools,count,MPI::UNSIGNED_SHORT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(trim_info,count*2,MPI::UNSIGNED,node,DATA_CHANNEL);
	}
	reads = new Mask[count];
	char * h = informations;
	for (int i = 0; i < count; i++) {
		Mask & r = reads[i];
		r.id = string(h);
		h += r.id.size() + 1;
		r.sequence = string(h);
		h += r.sequence.size() + 1;
		r.quality = string(h);
		h += r.sequence.size() + 1;

		r.position = positions[i*2];
		r.position_gap = positions[i*2+1];
		r.globalPosition = global_positions[i*2];
		r.globalPosition_gap = global_positions[i*2+1];
		r.contig = contigs[i];
		r.length1_gap = lengths[i*2];
		r.length2_gap = lengths[i*2+1];
		//r.type = types[i];
		r.NM = NMs[i*2];
		r.NM_gap = NMs[i*2+1];
		r.algn = algn[i];
		r.good_region_start = trim_info[i*2];
		r.good_region_stop  = trim_info[i*2+1];

		bool_temp = bools[i];
		r.strand = bool_temp & 0x01;
		r.masked = bool_temp & 0x02;
		r.low_quality = bool_temp & 0x04;
		r.trimmed = bool_temp & 0x08;
		r.discarded = bool_temp & 0x10;
		r.low_complexity = bool_temp & 0x20;
		r.contaminated = bool_temp & 0x40;
		r.gapped = bool_temp & 0x80;
	}
	delete [] positions;
	delete [] contigs;
	//delete [] types;
	delete [] NMs;
	delete [] algn;
	delete [] bools;
	delete [] trim_info;
	delete [] lengths;
	delete [] global_positions;
	delete [] informations;

	//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Received " << count << " OUTPUTs from node " << node << " to node " << my_rank << endl;
	return Transmitting_Result(reads,count);
}
Ejemplo n.º 16
0
int main(int argc, char * argv[]){

	int tag, send_tag;//tag in MPI_Recv
        int to,from;//destination and source of MPI send/receive
	int st_count, st_source, st_tag;
	double start_time = 0.0;//set start and end time for MPI_Wtime()
	double end_time = 0.0;
	MPI::Status status;

	MPI::Init(argc, argv);//start MPI
	int rank = MPI::COMM_WORLD.Get_rank();//The rank label of the machines
	int size = MPI::COMM_WORLD.Get_size();//The number of tasks to be done
//	MPI_Barrier(MPI_COMM_WORLD);
	int option;

	opterr = 0;
	int N = 0;
	string directory;

	while ((option = getopt(argc, argv, "d:n:"))!= -1)//getopt parses the parameters of commands, -n is the first n words that occur most frequently in files, -d is the directory which contains the files that need to be parsed.
	{
		switch (option)
		{
			case 'n':
				N = atoi(optarg);//the first N words
			break;
			case 'd':
				directory = string(optarg);// parameter of the directory
//				cout << dir <<endl;
			break;
			case '?'://when the parameter of option n is wrong, show the error information
				if (optopt == 'n')
					cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl;
			        else if (isprint (optopt))
					cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl;
				else
					cerr<<  "Unknown option character `"<<std::hex<<optopt<<"'."<<endl;
		}
	}

	vector<string> filenames;//use this vector to store file names
	char buffer[1024];
	
	if(rank == 0)//Machine 0 parses the name of directory and files in the directory.
	{	
		struct dirent *ptr;
		DIR *dir;
		dir = opendir(directory.c_str());//open the directory

		while((ptr = readdir(dir))!=NULL)//read the name of the directory
		{
			if(ptr->d_name[0]=='.')
				continue;
			strcpy(buffer,directory.c_str());
			strcat(buffer,ptr->d_name);
//			cout<<buffer<<endl;
			
			filenames.push_back(string(buffer));//put the file names of the directory in the vector filenames
		};
	}

	if(rank == 0)//machine 0 send messages and assign tasks to all the machines, including itself.
	{
		start_time = MPI_Wtime();//star time stamp
		to = 0;
		send_tag = 0;
		int round = 0;

		while(round * size < filenames.size())
		{
			for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++)
			{
				sprintf(buffer, "%s", filenames[i].c_str());
				
//				cout << rank << ":"<< "sending " << buffer << endl;
				MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag);//send filenames to the other machines and let them parse the files, including itself.
				to++;
				send_tag++;
			}

		
			tag = MPI::ANY_TAG;
			from = MPI::ANY_SOURCE;
			MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);//rank 0 receive parsing result from the rest machines, including itself
			st_count = status.Get_count(MPI::CHAR);
			st_source = status.Get_source();
			st_tag = status.Get_tag();
			
			string result("");
			result = parse(buffer, N);
			strcpy(buffer,result.c_str());

			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);//rank 0 send message to itself
			
			for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++)
			{
				tag = MPI::ANY_TAG;
				from = MPI::ANY_SOURCE;
				MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
				st_count = status.Get_count(MPI::CHAR);
				st_source = status.Get_source();
				st_tag = status.Get_tag();

//				cout << rank <<":" << "received from "<<st_source<<endl<< buffer << endl;
				cout << buffer << endl;
			}

			round++;
		}

		for (int i = 1; i < size; ++i)
		{
			strcpy(buffer, "Finish");
			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i, 0);//rank 0 send Finish information to the other machines
		}
		end_time = MPI_Wtime();
		printf("The running time is : %lf \n",end_time-start_time);
	}
	else
	{
		while(1)
		{
			tag = MPI::ANY_TAG;
			from = MPI::ANY_SOURCE;
			MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);//receive end information from rank 0
			st_count = status.Get_count(MPI::CHAR);
			st_source = status.Get_source();
			st_tag = status.Get_tag();
//			cout<<" rank " << rank <<": " << "st_count:"<<st_count<<" st_source"<< st_source << " st_tag "<< st_tag << endl;
//			cout<<"         " << buffer <<endl;

			if (strcmp(buffer, "Finish") == 0)//if the machine receives the finish information, stop receive and send
				break;

			string result("");
			result = parse(buffer, N);//parse the file received from rank 0
			strcpy(buffer,result.c_str());

			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);//send information back to rank 0
		}
	}

//	cout << "rank " << rank <<": "<<"I am dying, goodbye!"<<endl;
//	MPI_Barrier(MPI_COMM_WORLD);
	MPI::Finalize();//MPI finalize
	return 0;
}
Ejemplo n.º 17
0
int main ( int argc, char *argv[] )

//****************************************************************************80
//
//  Purpose:
//
//    MAIN is the main program for MONTE_CARLO.
//
//  Discussion:
//
//    MONTE_CARLO illustrates the use of MPI with a Monte Carlo algorithm.
//
//    Generate N random points in the unit square.  Count M, the number
//    of points that are in the quarter circle.  Then PI is approximately
//    equal to the ratio 4 * M / N.
//
//    It's important that each processor use DIFFERENT random numbers.
//    One way to ensure this is to have a single master processor
//    generate all the random numbers, and then divide them up.
//
//    (A second way, not explored here, is simply to ensure that each
//    processor uses a different seed, either chosen by a master processor,
//    or generated from the processor ID.)
//
//  Licensing:
//
//    This code is distributed under the GNU LGPL license. 
//
//  Modified:
//
//    26 February 2007
//
//  Author:
//
//    John Burkardt
//
//  Reference:
//
//    William Gropp, Ewing Lusk, Anthony Skjellum,
//    Using MPI: Portable Parallel Programming with the
//    Message-Passing Interface,
//    Second Edition,
//    MIT Press, 1999,
//    ISBN: 0262571323.
//
{
  double calculatedPi;
  int dest;
  int done;
  double error;
  int i;
  int id;
  int in;
  int max;
  MPI::Status mesgStatus;
  int num_procs;
  int out;
  int point_max = 1000000;
  int randServer;
  int randNums[CHUNKSIZE];
  int ranks[1];
  int request;
  int temp;
  double tolerance;
  int totalin;
  int totalout;
  MPI::Group worker_group;
  MPI::Intracomm worker_comm;
  MPI::Group world_group;
  double x;
  double y;
//
//  Initialize MPI.
//
  MPI::Init ( argc, argv );
//
//  Get the number of processors.
//
  num_procs = MPI::COMM_WORLD.Get_size ( );
//
//  Get the rank of this processor.
//
  id = MPI::COMM_WORLD.Get_rank ( );

  if ( id == 0 ) 
  {
    timestamp ( );
    cout << "\n";
    cout << "MONTE_CARLO - Master process:\n";
    cout << "  C++ version\n";
    cout << "  Estimate pi by the Monte Carlo method, using MPI.\n";
    cout << "\n";
    cout << "  Compiled on : " << __DATE__ << " at " << __TIME__ << ".\n";
    cout << "\n";
    cout << "  The number of processes is " << num_procs << ".\n";
    cout << "\n";
    cout << "  Points in the unit square will be tested\n";
    cout << "  to see if they lie in the unit quarter circle.\n";
  }
//
//  Pretend that the tolerance TOLERANCE is supplied externally
//  to the master process, which must then broadcast it to all
//  other processes.
//
  if ( id == 0 )
  {
    tolerance = 0.0001;

    cout << "\n";
    cout << "  The method will continue to improve the estimate until:\n";
    cout << "  PI is computed to within a tolerance = " << tolerance << "\n";
    cout << "  or the number of points examined reaches " << point_max << ".\n";
  }

  MPI::COMM_WORLD.Bcast ( &tolerance, 1, MPI::DOUBLE_PRECISION, 0 );

  cout << "  Process " << id << " is active.\n";
//
//  Start by getting the group corresponding to the world communicator.
//
  world_group = MPI::COMM_WORLD.Get_group ( );  
//
//  Put SERVER on the list of processes to exclude, and create the new
//  worker group.
//
  randServer = num_procs-1;
  ranks[0] = randServer;
  worker_group = world_group.Excl ( 1, ranks );
// 
//  Use the worker group to create the new worker communicator.
//
  worker_comm = MPI::COMM_WORLD.Create ( worker_group );
//
//  Since we only needed the worker group to create the worker
//  communicator, we can free the worker group now.
//
  worker_group.Free ( );
//
//  Here is where the computation is carried out.
//

// 
//  I am the rand server.
//
  if ( id == randServer ) 
  {
# if RANDOM_SEED
    struct timeval time;
    gettimeofday ( &time, 0 );
// 
//  Initialize the random number generator 
//
    srandom ( (int)(time.tv_usec*1000000+time.tv_sec) );
# endif
    do
    {
      MPI::COMM_WORLD.Recv ( &request, 1, MPI::INT, MPI::ANY_SOURCE,
        NEED_NUMBERS, mesgStatus );

      if ( request ) 
      {
        for ( i = 0; i < CHUNKSIZE; i++) 
        {
          randNums[i] = random();
        }
        dest = mesgStatus.Get_source ( );

        MPI::COMM_WORLD.Send ( randNums, CHUNKSIZE, MPI::INT,
          dest, RANDOM_NUMBERS );
      }
    } while ( 0 < request );
  }
// 
//  I am a worker process.
//
  else  
  {
    request = 1;
    done = 0;
    in = 0;
    out = 0;
    max = 2147483647;
//
//  Find the maximum integer for normalization.
//
    MPI::COMM_WORLD.Send ( &request, 1, MPI::INT, randServer, NEED_NUMBERS );
// 
//  Request a string of random numbers.
//
    while ( !done ) 
    {
      request = 1;
      MPI::COMM_WORLD.Recv ( randNums, CHUNKSIZE, MPI::INT, randServer,
        RANDOM_NUMBERS, mesgStatus );

      for ( i = 0; i < CHUNKSIZE; ) 
      {
        x = ( ( float ) randNums[i++] ) / max;
        y = ( ( float ) randNums[i++] ) / max;

        if ( x * x + y * y < 1.0E+00 ) 
        {
          in++;
        } 
        else 
        {
          out++;
        }
      }
// 
//  Total the number of points that are within the circle.
// 
      temp = in;
      worker_comm.Reduce ( &temp, &totalin, 1, MPI::INT, MPI::SUM, 0 );
// 
//  Total the number of points that are outside the circle.
//
      temp = out;
      worker_comm.Reduce ( &temp, &totalout, 1, MPI::INT, MPI::SUM, 0 );

      if ( id == 0 ) 
      {
        calculatedPi = ( 4.0E+00 * totalin ) / ( totalin + totalout );
        error = fabs ( calculatedPi - 3.141592653589793238462643E+00 );
        done = ( error < tolerance ) || point_max < ( totalin + totalout );
        cout << "pi = " << setw(24) << setprecision(16) << calculatedPi << "\n";

        if ( done )
        {
          request = 0;
        }
        else
        {
          request = 1;
        }
        MPI::COMM_WORLD.Send ( &request, 1, MPI::INT, randServer, 
          NEED_NUMBERS );

        worker_comm.Bcast ( &done, 1, MPI::INT, 0 );
      } 
      else
      {
        worker_comm.Bcast ( &done, 1, MPI::INT, 0 );

        if ( !done ) 
        {
          request = 1;
          MPI::COMM_WORLD.Send ( &request, 1, MPI::INT, randServer,
            NEED_NUMBERS );
        }
      }
    }
  }

  if ( id == 0 ) 
  {
    cout << "\n";
    cout << "Number of points: " << totalin + totalout << "\n";
    cout << "Number inside:    " << totalin << "\n";
    cout << "Number outside:   " << totalout << "\n";
  }
//
//  Terminate MPI.
//
  MPI::Finalize ( );
//
//  Terminate.
//
  if ( id == 0 )
  {
    cout << "\n";
    cout << "MONTE_CARLO - Master process:\n";
    cout << "  Normal end of execution.\n";
    cout << "\n";
    timestamp ( );
  }
  return 0;
}
void PPS::start(){

    //Define parameters struct for mpi
    //Refer to this as an example http://lists.mcs.anl.gov/pipermail/mpich-discuss/2009-April/004880.html
    MPI::Datatype MPIPPSTRUCT;
    int blockcounts[2];
    MPI::Aint offsets[2];
    MPI::Datatype datatypes[2];
    MPI::Aint extent,lb;

    blockcounts[0] = 9; //Number of ints
    blockcounts[1] = 13; //number of __fpv
    datatypes[0] = MPI::INT;
    datatypes[1] = MPIFPV;
    offsets[0] = 0;

    MPI::INT.Get_extent(lb, extent);

    offsets[1] = blockcounts[0] * extent;

    MPIPPSTRUCT = MPIPPSTRUCT.Create_struct(2,blockcounts,offsets, datatypes);
    MPIPPSTRUCT.Commit();

    if(PPS::pid == 0){

        struct parameters temp;
        int start,i,countdown = PPS::comm_size-1;
        bool ready = false;
        MPI::Status status;

        //Logs
        std::ofstream logsfile;
        logsfile.open("tslogs.txt",  std::fstream::out | std::fstream::trunc);

        while(true){


            if(countdown == 0) break;

            //Check first ready-to-compute process
            MPI::COMM_WORLD.Recv(&ready, 1, MPI::BOOL, MPI_ANY_SOURCE, 0, status);

            //Logs
            logsfile << "Remaining sims: " << PPS::plist.size()  << " process countdown: " << countdown << std::endl;

            //Send a 0 status to all the process to stop
            if(ready){
                if(PPS::plist.size() == 0 ){
                    start = EXIT_PROCESS;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);
                    countdown = countdown - 1;
                }else{
                    //Prepare him to receive the params and start the sim (an int that contains the simulation number (-1 = exit))
                    start = PPS::plist.size() - 1;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);

                    temp = PPS::plist.back();

                    //temp.N = status.Get_source() * 10;

                    //Deploy the parameterer struct
                    MPI::COMM_WORLD.Send(&temp, 1, MPIPPSTRUCT, status.Get_source(), 0);

                    //Pullout the parameter struct from the list
                    plist.pop_back();
                }
            }
            ready = false;
        }

        logsfile.close();



    }else{

        int status;
        bool ready = true;
        struct parameters recvparams;

        while(true){
            status == EXIT_PROCESS;
            //Send with a point to point that you are free
            MPI::COMM_WORLD.Send(&ready, 1, MPI::BOOL, 0, 0);

            //receive status value to exit or to receive a new params struct to start new sim
            MPI::COMM_WORLD.Recv(&status, 1, MPI::INT, 0, 0);

            if(status != EXIT_PROCESS){
                //wait to receive parameters


                //std::this_thread::sleep_for(std::chrono::seconds(PPS::pid));

                MPI::COMM_WORLD.Recv(&recvparams, 1, MPIPPSTRUCT, 0, 0);
                //Start sim
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
                //std::cout << "SAY HI: "<< PPS::pid << std::endl;
                //print_params(recvparams);
                //std::cout << "STARTING REAL SIM"<< std::endl;
                PottsSim(recvparams,"output/"+ std::to_string(PPS::pid) + "_proc_output.dat", status);
                //old_code( PPS::pid );
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
            }else{
                std::cout << "I'm the process "<< PPS::pid << ", ready to die." << std::endl;
                break;
            }

        }

    }

    MPIPPSTRUCT.Free();


}
Ejemplo n.º 19
0
template<class T_GRID> int FLOOD_FILL_MPI<T_GRID>::
Synchronize_Colors()
{
    if(mpi_grid.threaded_grid) return Synchronize_Colors_Threaded();
    ARRAY<RANGE<typename T_PARALLEL_GRID::VECTOR_INT> > boundary_regions;
    mpi_grid.Find_Boundary_Regions(boundary_regions,RANGE<typename T_PARALLEL_GRID::VECTOR_INT>::Zero_Box(),false,RANGE<VECTOR<int,1> >(-1,0),false,true,local_grid);
    // figure out which colors are global
    int global_color_count=0;
    ARRAY<int,VECTOR<int,1> > color_map(-1,number_of_regions);color_map(-1)=-1;color_map(0)=0;
    {ARRAY<bool,VECTOR<int,1> > color_is_global(-1,number_of_regions);
    Find_Global_Colors(color_is_global,RANGE<typename T_PARALLEL_GRID::VECTOR_INT>::Centered_Box());
    for(int color=1;color<=number_of_regions;color++)if(color_is_global(color)) color_map(color)=++global_color_count;}

    // send numbers of global colors to everyone
    ARRAY<int> global_color_counts(mpi_grid.number_of_processes);
    mpi_grid.comm->Allgather(&global_color_count,1,MPI_UTILITIES::Datatype<int>(),&global_color_counts(1),1,MPI_UTILITIES::Datatype<int>());
    int total_global_colors=ARRAYS_COMPUTATIONS::Sum(global_color_counts);
    int global_color_offset=ARRAYS_COMPUTATIONS::Sum(global_color_counts.Prefix(mpi_grid.rank));
    LOG::cout<<"initial colors: "<<number_of_regions<<" total, "<<global_color_count<<" out of "<<total_global_colors<<" global"<<std::endl;
    if(!total_global_colors){color_ranks.Clean_Memory();return 0;}

    ARRAY<MPI_PACKAGE> packages;
    ARRAY<T_ARRAYS_INT> colors_copy(boundary_regions.m);
    // send left (front) colors
    ARRAY<MPI::Request> send_requests;
    for(int side=1;side<=T_PARALLEL_GRID::number_of_faces_per_cell;side+=2)if(mpi_grid.side_neighbor_ranks(side)!=MPI::PROC_NULL){
        Resize_Helper(colors_copy(side),local_grid,boundary_regions(side));
        Translate_Local_Colors_To_Global_Colors(color_map,colors_copy(side),boundary_regions(side),global_color_offset);
        MPI_PACKAGE package=mpi_grid.Package_Cell_Data(colors_copy(side),boundary_regions(side));
        packages.Append(package);
        send_requests.Append(package.Isend(*mpi_grid.comm,mpi_grid.side_neighbor_ranks(side),mpi_grid.Get_Send_Tag(mpi_grid.side_neighbor_directions(side))));}
    // receive right (back) colors and initialize union find
    UNION_FIND<> union_find(total_global_colors);
    {ARRAY<MPI::Request> recv_requests;
    for(int side=2;side<=T_PARALLEL_GRID::number_of_faces_per_cell;side+=2)if(mpi_grid.side_neighbor_ranks(side)!=MPI::PROC_NULL){
        Resize_Helper(colors_copy(side),local_grid,boundary_regions(side));
        MPI_PACKAGE package=mpi_grid.Package_Cell_Data(colors_copy(side),boundary_regions(side));
        packages.Append(package);
        recv_requests.Append(package.Irecv(*mpi_grid.comm,mpi_grid.side_neighbor_ranks(side),mpi_grid.Get_Recv_Tag(mpi_grid.side_neighbor_directions(side))));}
    MPI::Status status;
    while(MPI_UTILITIES::Wait_Any(recv_requests,status)){
        int side;for(side=2;side<=T_PARALLEL_GRID::number_of_faces_per_cell;side+=2)if(mpi_grid.Get_Recv_Tag(mpi_grid.side_neighbor_directions(side))==status.Get_tag()) break;
        Find_Color_Matches(color_map,union_find,colors_copy(side),boundary_regions(side),global_color_offset);}}

    // synchronize union find
    UNION_FIND<> final_union_find;
    {ARRAY<char> union_find_buffer(MPI_UTILITIES::Pack_Size(union_find,*mpi_grid.comm)+1);
    {int position=0;MPI_UTILITIES::Pack(union_find,union_find_buffer,position,*mpi_grid.comm);}
    MPI::Datatype union_find_type=MPI::PACKED.Create_contiguous(union_find_buffer.m);union_find_type.Commit();
    MPI::Op union_find_merge_op;union_find_merge_op.Init(Union_Find_Merge_Op,true);
    ARRAY<char> final_union_find_buffer(union_find_buffer.m);
    union_find_merge_op_comm=mpi_grid.comm;
    mpi_grid.comm->Allreduce(union_find_buffer.Get_Array_Pointer(),final_union_find_buffer.Get_Array_Pointer(),1,union_find_type,union_find_merge_op);
    {int position=0;MPI_UTILITIES::Unpack(final_union_find,final_union_find_buffer,position,*mpi_grid.comm);}
    union_find_type.Free();union_find_merge_op.Free();}

    // fix color map for global colors
    number_of_regions=0;
    ARRAY<int> global_to_final_color_map(total_global_colors);
    for(int i=1;i<=total_global_colors;i++){
        int root=final_union_find.Find(i);
        if(!global_to_final_color_map(root)) global_to_final_color_map(root)=++number_of_regions;
        global_to_final_color_map(i)=global_to_final_color_map(root);}
    for(int i=1;i<=color_map.domain.max_corner.x;i++)if(color_map(i)>0) color_map(i)=global_to_final_color_map(color_map(i)+global_color_offset);

    // find list of processes corresponding to each color
    int end=0;
    color_ranks.Clean_Memory();
    color_ranks.Resize(number_of_regions);
    for(int r=0;r<mpi_grid.number_of_processes;r++){
        int start=end+1;end+=global_color_counts(r+1);
        for(int i=start;i<=end;i++)color_ranks(global_to_final_color_map(i)).Append_Unique(r);}
    for(int color=1;color<=color_ranks.m;color++) assert(color_ranks(color).m>1 || mpi_grid.side_neighbor_ranks.Contains(mpi_grid.rank));

    // remap colors
    Remap_Colors(color_map,RANGE<typename T_PARALLEL_GRID::VECTOR_INT>::Centered_Box());

    LOG::cout<<"final colors: "<<color_ranks.m<<" global, "<<number_of_regions-color_ranks.m<<" local"<<std::endl;

    // remap color_touches_uncolorable
    if(color_touches_uncolorable){
        ARRAY<bool> new_color_touches_uncolorable(number_of_regions);
        for(int i=1;i<=color_touches_uncolorable->m;i++)if(color_map(i)>0) new_color_touches_uncolorable(color_map(i))|=(*color_touches_uncolorable)(i);
        color_touches_uncolorable->Exchange(new_color_touches_uncolorable);
        // synchronize color_touches_uncolorable, TODO: this could be merged with above communication
        ARRAY<bool> global_color_touches_uncolorable(color_ranks.m);
        ARRAY<bool>::Get(global_color_touches_uncolorable,*color_touches_uncolorable);
        mpi_grid.comm->Allreduce(&global_color_touches_uncolorable(1),&(*color_touches_uncolorable)(1),color_ranks.m,MPI_UTILITIES::Datatype<bool>(),MPI::LOR);}

    // finish
    MPI_UTILITIES::Wait_All(send_requests);
    MPI_PACKAGE::Free_All(packages);

    return color_ranks.m;
}
Ejemplo n.º 20
0
int main(int argc, char * argv[]){

	int tag, send_tag;
        int to,from;
	int st_count, st_source, st_tag;
	double start_time = 0.0;
	double end_time = 0.0;
	MPI::Status status;

	MPI::Init(argc, argv);
	int rank = MPI::COMM_WORLD.Get_rank();
	int size = MPI::COMM_WORLD.Get_size();
	MPI_Barrier(MPI_COMM_WORLD);
	start_time = MPI_Wtime();
	int option;

	opterr = 0;
	int N = 0;
	string web_file;

	while ((option = getopt(argc, argv, "l:n:"))!= -1)
	{
		switch (option)
		{
			case 'n':
				N = atoi(optarg);
			break;
			case 'l':
				web_file = string(optarg);
			break;
			case '?':
				if (optopt == 'n')
					cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl;
			        else if (isprint (optopt))
					cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl;
				else
					cerr<<  "Unknown option character `"<<std::hex<<optopt<<"'."<<endl;
		}
	}

	vector<string> URLs;
	char buffer[1024];
	string line;
	system("rm -fr /tmp/xiw412/");
	system("mkdir /tmp/xiw412/");

	if(rank == 0)
	{	
		fstream fread_file(web_file.c_str(), ios::in);
		while (getline(fread_file, line)){
		URLs.push_back(line);
		}
	}

	if(rank == 0)
	{
		to = 0;
		send_tag = 0;
		int round = 0;

		while(round * size < URLs.size())
		{
			for(int i = round * size; i < (round + 1) * size && i < URLs.size(); i++)
			{
				sprintf(buffer, "%s", URLs[i].c_str());
				
				cout << rank << ":"<< "sending " << buffer << endl;
				MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag);
				to++;
				send_tag++;
			}

		
			tag = MPI::ANY_TAG;
			from = MPI::ANY_SOURCE;
			MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
			st_count = status.Get_count(MPI::CHAR);
			st_source = status.Get_source();
			st_tag = status.Get_tag();
			
			string result("");
			result = parse(buffer, N);
			strcpy(buffer,result.c_str());

			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);
			
			for(int i = round * size; i < (round + 1) * size && i < URLs.size(); i++)
			{
				tag = MPI::ANY_TAG;
				from = MPI::ANY_SOURCE;
				MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
				st_count = status.Get_count(MPI::CHAR);
				st_source = status.Get_source();
				st_tag = status.Get_tag();

				cout << rank <<":" << "received from "<<st_source<<endl<< buffer << endl;
			}

			round++;
		}

		for (int i = 1; i < size; ++i)
		{
			strcpy(buffer, "Finish");
			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i, 0);
		}
	}
	else
	{
		while(1)
		{
			tag = MPI::ANY_TAG;
			from = MPI::ANY_SOURCE;
			MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
			st_count = status.Get_count(MPI::CHAR);
			st_source = status.Get_source();
			st_tag = status.Get_tag();
//			cout<<" rank " << rank <<": " << "st_count:"<<st_count<<" st_source"<< st_source << " st_tag "<< st_tag << endl;
//			cout<<"         " << buffer <<endl;

			if (strcmp(buffer, "Finish") == 0)
				break;

			string result("");
			result = parse(buffer, N);
			strcpy(buffer,result.c_str());

			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);
		}
	}

	cout << "rank " << rank <<": "<<"I am dying, goodbye!"<<endl;
	MPI_Barrier(MPI_COMM_WORLD);
	end_time = MPI_Wtime();
	printf("The running time is : %lf \n",end_time-start_time);
	MPI::Finalize();
	return 0;
}
Ejemplo n.º 21
0
void PSO::Swarm::run_master(int numIt, int vflag, ostream* out, ostream* hist) {
  double f(-INFINITY);
  int id(0);
  int j(0);
  double* particlePos(NULL);
  MPI::Status status;
  int flag;
  int src;
  int idle(0);
  int iter(0);
  queue<int> evalQueue;

  for (int i(0); i < swarm.size(); ++i) evalQueue.push(i);

  if (vflag) cerr << "Sending particles to slaves..." << endl;
  // initialize slaves
  for (int k(1); k < mpi_ntasks && (iter < numIt || numIt < 0); ++k) {
    j = evalQueue.front();
    evalQueue.pop();
    if (numIt > 0) {
      // numIt < 0 => evaluate at current position
      updateVelocity(j);
      updatePosition(j);
    }
    if (vflag) cerr << j << " " << (*swarm[j]) << endl;
    if (vflag) fprintf(stderr,"Sending particle %d to process %d.\n",j,k);
    MPI::COMM_WORLD.Send(&j,1,MPI::INT,k,1);
    MPI::COMM_WORLD.Send(swarm[j]->position.data(),numParams,MPI::DOUBLE,k,1);
    ++iter;
  }

  while (1) {
    flag = MPI::COMM_WORLD.Iprobe(MPI::ANY_SOURCE,MPI::ANY_TAG,status);
    if (flag) {
      // get function value
      src = status.Get_source();
      MPI::COMM_WORLD.Recv(&id,1,MPI::INT,MPI::ANY_SOURCE,MPI::ANY_TAG,status);
      if (vflag) fprintf(stderr,"Receiving particle %d from process %d.\n",id,src);
      MPI::COMM_WORLD.Recv(&f,1,MPI::DOUBLE,src,MPI::ANY_TAG,status);

      // update particle information
      swarm[id]->value = f;
      if (f >= swarm[id]->bestValue) {
        swarm[id]->bestPosition = swarm[id]->position;
        swarm[id]->bestValue = f;
      }
      ++numEvals;

      if (hist != NULL) {
         *hist << id << " " << (*swarm[id]) << endl;
      }

      // check for new best value
      if (f >= bestVal) {
        bestPos = swarm[id]->position;
        bestVal = f;
        bestParticle = id;
        if (out != NULL) {
          *out << numEvals << " " << bestVal << " ";
          for (int j(0); j < bestPos.size(); ++j) *out << bestPos[j] << " ";
          *out << endl;
        }
      }

      if (numIt > 0) {
        // update velocity and position
        updateVelocity(id);
        updatePosition(id);
        evalQueue.push(id);
      }

      // send new work to slave
      // if (iter < numIt) {
      if ((iter < numIt || numIt < 0) && ! evalQueue.empty()) {
        j = evalQueue.front();
        evalQueue.pop();
        if (vflag) fprintf(stderr,"Sending particle %d to process %d.\n",j,src);
        MPI::COMM_WORLD.Send(&j,1,MPI::INT,src,1);
        MPI::COMM_WORLD.Send(swarm[j]->position.data(),numParams,MPI::DOUBLE,src,1);
        ++iter;
      } else {
        ++idle;
        if (vflag) fprintf(stderr,"Sending done signal to process %d.\n",src);
        MPI::COMM_WORLD.Send(0,0,MPI::INT,src,0);
      }

      if (idle == mpi_ntasks-1) break;
    }
  }
}
Ejemplo n.º 22
0
void manager_process(const MPI::Intracomm &comm_world, const int manager_rank, const int worker_size, std::string &maskName, std::string &imgDir, std::string &outDir, bool overwrite) {
	// first get the list of files to process
   	std::vector<std::string> filenames;
	std::vector<std::string> seg_output;
	std::vector<std::string> features_output;
	uint64_t t1, t0;

	t0 = cci::common::event::timestampInUS();
	getFiles(maskName, imgDir, outDir, filenames, seg_output, features_output, overwrite);

	t1 = cci::common::event::timestampInUS();
	printf("Manager ready at %d, file read took %lu us\n", manager_rank, t1 - t0);
	comm_world.Barrier();

	// now start the loop to listen for messages
	int curr = 0;
	int total = filenames.size();
	MPI::Status status;
	int worker_id;
	char ready;
	char *input;
	char *mask;
	char *output;
	int inputlen;
	int masklen;
	int outputlen;
	while (curr < total) {
		usleep(1000);

		if (comm_world.Iprobe(MPI_ANY_SOURCE, TAG_CONTROL, status)) {
/* where is it coming from */
			worker_id=status.Get_source();
			comm_world.Recv(&ready, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//			printf("manager received request from worker %d\n",worker_id);
			if (worker_id == manager_rank) continue;

			if(ready == WORKER_READY) {
				// tell worker that manager is ready
				comm_world.Send(&MANAGER_READY, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//				printf("manager signal transfer\n");
/* send real data */
				inputlen = filenames[curr].size() + 1;  // add one to create the zero-terminated string
				masklen = seg_output[curr].size() + 1;
				outputlen = features_output[curr].size() + 1;
				input = new char[inputlen];
				memset(input, 0, sizeof(char) * inputlen);
				strncpy(input, filenames[curr].c_str(), inputlen);
				mask = new char[masklen];
				memset(mask, 0, sizeof(char) * masklen);
				strncpy(mask, seg_output[curr].c_str(), masklen);
				output = new char[outputlen];
				memset(output, 0, sizeof(char) * outputlen);
				strncpy(output, features_output[curr].c_str(), outputlen);

				comm_world.Send(&inputlen, 1, MPI::INT, worker_id, TAG_METADATA);
				comm_world.Send(&masklen, 1, MPI::INT, worker_id, TAG_METADATA);
				comm_world.Send(&outputlen, 1, MPI::INT, worker_id, TAG_METADATA);

				// now send the actual string data
				comm_world.Send(input, inputlen, MPI::CHAR, worker_id, TAG_DATA);
				comm_world.Send(mask, masklen, MPI::CHAR, worker_id, TAG_DATA);
				comm_world.Send(output, outputlen, MPI::CHAR, worker_id, TAG_DATA);
				curr++;

				delete [] input;
				delete [] mask;
				delete [] output;

			}

			if (curr % 100 == 1) {
				printf("[ MANAGER STATUS ] %d tasks remaining.\n", total - curr);
			}

		}
	}
/* tell everyone to quit */
	int active_workers = worker_size;
	while (active_workers > 0) {
		usleep(1000);

		if (comm_world.Iprobe(MPI_ANY_SOURCE, TAG_CONTROL, status)) {
		/* where is it coming from */
			worker_id=status.Get_source();
			comm_world.Recv(&ready, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//			printf("manager received request from worker %d\n",worker_id);
			if (worker_id == manager_rank) continue;

			if(ready == WORKER_READY) {
				comm_world.Send(&MANAGER_FINISHED, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//				printf("manager signal finished\n");
				--active_workers;
			}
		}
	}
}
Ejemplo n.º 23
0
int main(int argc, char* argv[])
{
  int pid; //For rank of current process
  int no_of_process; //To find the total number of processes
  int size; //Size of processes to be allocated for each process.

  //Initializing the MPI environment
  MPI::Init ( argc, argv );

  //Getting the number of processes
  no_of_process = MPI::COMM_WORLD.Get_size();

  //Handling if run as a single application.
  if(no_of_process<2){
    cout<<"\n ERROR: You'll need atleast 2 processes to run this application.\n\n";
    MPI_Finalize();
    return 0;
  }

  //argv[1] - PERCENT OF KEYWORDS REQUIRED ; argv[2] - FOLDER PATH
  if(!argv[1] || !argv[2]){
    cout<<"\n\n Parameter not provided. Quitting\n";
    MPI_Finalize();
    return 0;
  }

  //Get the process ID
  pid = MPI::COMM_WORLD.Get_rank();

  // Process ID 0 => Initial Process
  if(pid==0){
  	  queue<string> que;
      que.push(string(argv[2],strlen(argv[2])));

      /********* INITIAL STRUCTURE TO HAVE SOME VALUES IN THE QUEUE ***************/

      string dir = que.front();
      que.pop();
      DIR *dp;
      struct dirent *dirp;

      if((dp = opendir(dir.c_str())) == NULL) {
        cout << "Error(" << errno << ") opening " << dir << endl;
        return errno;
      }

      while ((dirp = readdir(dp)) != NULL) {
        if(((string)dirp->d_name).compare(".")==0||((string)dirp->d_name).compare("..")==0){
          continue;
        }

        que.push(dir+"/"+string(dirp->d_name)); //If only this statement is present, we push all the files into the queue
      }
      closedir(dp);

      /********* INITIAL STRUCTURE TO HAVE SOME VALUES IN THE QUEUE ***************/


      while(!que.empty()){

        // ======== FUNCTION TO PRINT QUEUE VALUES ========

        queue<string> que3;
        que3=que;
        cout<<"\n\n PARENT Queue : "<<endl;
        //Temp function to print the value of the queue
        while(!que3.empty()){
          cout<<que3.front()<<endl;
          que3.pop();
        }

        //Allocate work to processes equally.
        int i=0;
        size=1; //By default, allocating one directory per process
        string buf; //Buffer to send the folders to the subordinate processes
        if(que.size()>(no_of_process-1)){
          size=ceil((float)que.size()/(no_of_process-1));
        }

        /************* PARENT SENDER PROCESS ***********************/
        /************* ===================== ***********************/
        while(!que.empty() && i<=no_of_process-1){
            int j=0;
            buf="";
            while(j<size && !que.empty()){
              buf+=que.front();
              que.pop();
              buf+=";";
              j++;
            }

         // MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag)
            MPI::COMM_WORLD.Send(buf.c_str(), buf.length(), MPI::CHAR, i+1, i+1);
            i++;
        }

        /************* PARENT RECEIVER PROCESS ***********************/
        /************* ======================= ***********************/
        while(i>0){
          // cout<<"\n\n Process 0 Waiting to receive from child";
	        MPI::Status status;

          //Probe for values first
	        MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status);

	        int l = status.Get_count(MPI::CHAR);
	        char *buf = new char[l];
	        const auto sender = status.Get_source();
	        const auto tag = status.Get_tag();

        //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status)
          MPI::COMM_WORLD.Recv(buf, l, MPI::CHAR, sender, tag, status);

	        string fname(buf, l);
	        delete [] buf;
	        vector<string> fnames;
	        boost::split(fnames, fname, boost::is_any_of(";"));

	        for(int k=0;k<fnames.size();k++){
            if(fnames[k].length())
            que.push(fnames[k]);
	        }
	        i--;
	       }
       }


       vector<int> processes_with_files; //Vector to store only the files with ranks
       set<string> queue_values;
       vector<string> vec_queue_values;

      /************* IF QUEUE EMPTY, PROCEED TO QUERY PROCESSING ***********************/
      /************* =========================================== ***********************/
      if(que.empty()){


        //Message asking children to send their file availability
        string send_rank_message="SEND IF YOU HAVE";

        //Send message to children to send if they have files with them
        for(int rank_values=1;rank_values<no_of_process;rank_values++){
          //MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag)
          MPI::COMM_WORLD.Send(send_rank_message.c_str(), send_rank_message.length(), MPI::CHAR, rank_values, rank_values);
        }

        //Values for reception
        int rank_received[no_of_process];
        rank_received[0]=0; //Parent process - So excluding it.

        for(int rank_values=1;rank_values<no_of_process;rank_values++){
          //For probe status store
          MPI::Status status;
          //Probe for incoming values
          MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status);
          //Get source and tag
          const auto sender = status.Get_source();
          const auto tag = status.Get_tag();
          //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status)
          MPI::COMM_WORLD.Recv(&rank_received[sender], 1, MPI::INT, sender, tag, status);
        }

        //String for rank values to be sent to all the child processes
        string processes_with_files_str="";

        //Storing the rank of processes that have files
        for(int i=1;i<no_of_process;i++){
          if(rank_received[i]==1){
            processes_with_files_str+= to_string(i) + ";";
            processes_with_files.push_back(i);
          }
        }

        string process_list_message = "ABOUT TO SEND PROCESS VALUES";
        for(int i=0;i<processes_with_files.size();i++){
          //MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag)
          MPI::COMM_WORLD.Send(process_list_message.c_str(), process_list_message.length(), MPI::CHAR, processes_with_files[i], processes_with_files[i]);
          MPI::COMM_WORLD.Send(processes_with_files_str.c_str(), processes_with_files_str.length(), MPI::CHAR, processes_with_files[i], processes_with_files[i]);
          // cout<<"\n\n Parent has sent the value!\n";
        }


      }//End of queue empty condition

      int val_recv;

      //Expecting reply from all child processes
      for(int i=0;i<processes_with_files.size();i++){
        MPI::Status status;
        //Probe for incoming values
        MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status);
        //Get source and tag
        auto sender = status.Get_source();
        auto tag = status.Get_tag();
        //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status)
        MPI::COMM_WORLD.Recv(&val_recv, 1, MPI::INT, sender, tag, status);
      }

      while(1){

        int choice;
        string task_message;
        char whatfile[400];

        cout<<"\n\n Graph Processed. What do you want to do now? \n 1. Find all the files related to another file\n 2. Find the Transitive Closure of a file\n 3. Exit\n 4. Choice : ";
        cin>>choice;
        cin.ignore (std::numeric_limits<std::streamsize>::max(), '\n');

        switch(choice) {
          case 1: cout<<"\n Enter the file name : ";
                  cin.getline(whatfile,400);
                  task_message=string(whatfile,strlen(whatfile))+";Related Files";
                  cout<<"\n"<<task_message;
                  break;
          case 2: cout<<"\n Enter the file name you wish to find the transitive closure for : ";
                  cin.getline(whatfile,400);
                  task_message=string(whatfile,strlen(whatfile))+";Transitive Closure;Just Tell";
                  queue_values.insert(whatfile);
                  vec_queue_values.push_back(whatfile);
                  break;
          case 3: task_message="EXIT NOW";

          default:;
        }

        for(int rank_values=1;rank_values<no_of_process;rank_values++){
          //MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag)
          MPI::COMM_WORLD.Send(task_message.c_str(), task_message.length(), MPI::CHAR, rank_values, rank_values);
        }

        if(choice==3){
          cout<<"\n PARENT : QUITTING. BYE!";
          break;
        }

        else if (choice==2){
          
          int send_flag=1;


          while(send_flag) {


          send_flag=0;

          char* char_value=NULL;
          int char_length;

          // cout<<"\n\n ********************************** Value Sent for Transitive closure!";

          for(int i=0;i<vec_queue_values.size();i++){


            MPI::Status status;
            //Probe for incoming values
            MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status);
            //Get source and tag
            char_length = status.Get_count(MPI::CHAR);
            char_value = new char[char_length];
            auto sender = status.Get_source();
            auto tag = status.Get_tag();
            //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status)
            // cout<<"\n\n Parent waiting to receive!\n\n ";
            MPI::COMM_WORLD.Recv(char_value, char_length, MPI::CHAR, sender, tag, status);
          
          }


          string recd_string(char_value,char_length);
          delete [] char_value;

          vector<string> recd_file_vector;
          string send_string_val="";

          //Clear the vector queue value
          vec_queue_values.clear();

          boost::split(recd_file_vector, recd_string, boost::is_any_of(";"));


          for(int i=0;i<recd_file_vector.size();i++){

            if(recd_file_vector[i].length()){
              
              if(queue_values.find(recd_file_vector[i])==queue_values.end()){
                send_flag=1;
                queue_values.insert(recd_file_vector[i]);
                vec_queue_values.push_back(recd_file_vector[i]);
                send_string_val += recd_file_vector[i] + ";";
              
              }
            }
          }

          send_string_val += "Transitive Closure;Find One";

          if(send_flag){

            for(int rank_values=1;rank_values<no_of_process;rank_values++){  
              // cout<<"\n\n Sending value to "<<rank_values;
               MPI::COMM_WORLD.Send(send_string_val.c_str(), send_string_val.length(), MPI::CHAR, rank_values, rank_values);
            }
          }

          else{
            cout<<"\n\n Connected File Names : \n";
            queue_values.erase(whatfile);
            for(auto x: queue_values){
              cout<<x<<"\n";
            }
            queue_values.clear();
            vec_queue_values.clear();
          }
        }

        }

        else{

          MPI::Status status;
          //Probe for incoming values
          MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status);
          //Get source and tag
          auto sender = status.Get_source();
          auto tag = status.Get_tag();
          //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status)
          // cout<<"\n\n Parent waiting to receive!\n\n ";
          MPI::COMM_WORLD.Recv(&val_recv, 1, MPI::INT, sender, tag, status);
          // cout<<"\n Parent received!";
        }

      } // End of While Loop


    } //END OF PROCESS 0
Ejemplo n.º 24
0
int main ( int argc, char *argv[] )

//****************************************************************************80
//
//  Purpose:
//
//    MAIN is the main program for SEARCH.
//
//  Discussion:
//
//    SEARCH demonstrates the use of MPI routines to carry out a search
//
//    An array of given size is to be searched for occurrences of a
//    specific value.
//
//    The search is done in parallel.  A master process generates the
//    array and the target value, then distributes the information among
//    a set of worker processes, and waits for them to communicate back
//    the (global) index values at which occurrences of the target value
//    were found.
//
//    An interesting feature of this program is the use of allocatable
//    arrays, which allows the master program to set aside just enough
//    memory for the whole array, and for each worker program to set aside
//    just enough memory for its own part of the array.
//
//  Licensing:
//
//    This code is distributed under the GNU LGPL license. 
//
//  Modified:
//
//    01 September 2009
//
//  Author:
//
//    John Burkardt
//
//  Reference:
//
//    William Gropp, Ewing Lusk, Anthony Skjellum,
//    Using MPI: Portable Parallel Programming with the
//    Message-Passing Interface,
//    Second Edition,
//    MIT Press, 1999,
//    ISBN: 0262571323.
//
{
  int *a;
  int dest;
  float factor;
  int global;
  int i;
  int id;
  int ierr;
  int n;
  int npart;
  int p;
  int source;
  int start;
  MPI::Status status;
  int tag;
  int tag_target = 1;
  int tag_size = 2;
  int tag_data = 3;
  int tag_found = 4;
  int tag_done = 5;
  int target;
  int workers_done;
  int x;
//
//  Initialize MPI.
//
  MPI::Init ( argc, argv );
//
//  Get this processes's rank.
//
  id = MPI::COMM_WORLD.Get_rank ( );
//
//  Find out how many processes are available.
//
  p = MPI::COMM_WORLD.Get_size ( );

  if ( id == 0 )
  {
    timestamp ( );
    cout << "\n";
    cout << "SEARCH - Master process:\n";
    cout << "  C++ version\n";
    cout << "  An example MPI program to search an array.\n";
    cout << "\n";
    cout << "  Compiled on " << __DATE__ << " at " << __TIME__ << ".\n";
    cout << "\n";
    cout << "  The number of processes is " << p << "\n";
  }

  cout << "\n";
  cout << "Process " << id << " is active.\n";
//
//  Have the master process generate the target and data.  In a more 
//  realistic application, the data might be in a file which the master 
//  process would read.  Here, the master process decides.
//
  if ( id == 0 )
  {
//
//  Pick the number of data items per process, and set the total.
//
    factor = ( float ) rand ( ) / ( float ) RAND_MAX;
    npart = 50 + ( int ) ( factor * 100.0E+00 );
    n = npart * p;

    cout << "\n";
    cout << "SEARCH - Master process:\n";
    cout << "  The number of data items per process is " << npart << "\n";
    cout << "  The total number of data items is       " << n << ".\n";
//
//  Now allocate the master copy of A, fill it with values, and pick 
//  a value for the target.
//
    a = new int[n];

    factor = ( float ) n / 10.0E+00 ;

    for ( i = 0; i < n; i++ ) 
    {
      a[i] = ( int ) ( factor * ( float ) rand ( ) / ( float ) RAND_MAX );
    }
    target = a[n/2];

    cout << "  The target value is " << target << ".\n";
//
//  The worker processes need to have the target value, the number of data items,
//  and their individual chunk of the data vector.
//
    for ( i = 1; i <= p-1; i++ )
    {
      dest = i;
      tag = tag_target;

      MPI::COMM_WORLD.Send ( &target, 1, MPI::INT, dest, tag );

      tag = tag_size;

      MPI::COMM_WORLD.Send ( &npart, 1, MPI::INT, dest, tag );

      start = ( i - 1 ) * npart;
      tag = tag_data;

      MPI::COMM_WORLD.Send ( a+start, npart, MPI::INT, dest, tag );
    }
//
//  Now the master process simply waits for each worker process to report that 
//  it is done.
//
    workers_done = 0;

    while ( workers_done < p-1 )
    {
      MPI::COMM_WORLD.Recv ( &x, 1, MPI::INT, MPI::ANY_SOURCE, MPI::ANY_TAG, status );

      source = status.Get_source ( );
      tag = status.Get_tag ( );
    
      if ( tag == tag_done )
      {
        workers_done = workers_done + 1;
      }
      else if ( tag == tag_found )
      {
        cout << "P" << source << "  " << x << "  " << a[x] << "\n";
      }
      else
      {
        cout << "  Master process received message with unknown tag = "
             << tag << ".\n";
      }

    }
//
//  The master process can throw away A now.
//
    delete [] a;
  }
//
//  Each worker process expects to receive the target value, the number of data
//  items, and the data vector.
//
  else 
  {
    source = 0;
    tag = tag_target;

    MPI::COMM_WORLD.Recv ( &target, 1, MPI::INT, source, tag, status );
 
    source = 0;
    tag = tag_size;

    MPI::COMM_WORLD.Recv ( &npart, 1, MPI::INT, source, tag, status );

    a = new int[npart];

    source = 0;
    tag = tag_data;

    MPI::COMM_WORLD.Recv ( a, npart, MPI::INT, source, tag, status );
//
//  The worker simply checks each entry to see if it is equal to the target
//  value.
//
    for ( i = 0; i < npart; i++ )
    {
      if ( a[i] == target )
      {
        global = ( id - 1 ) * npart + i;
        dest = 0;
        tag = tag_found;

        MPI::COMM_WORLD.Send ( &global, 1, MPI::INT, dest, tag );
      }
    }
//
//  When the worker is finished with the loop, it sends a dummy data value with
//  the tag "TAG_DONE" indicating that it is done.
//
    dest = 0;
    tag = tag_done;

    MPI::COMM_WORLD.Send ( &target, 1, MPI::INT, dest, tag );

    delete [] ( a );
     
  }
//
//  Terminate MPI.
//
  MPI::Finalize ( );
//
//  Terminate.
//
  if ( id == 0 )
  {
    cout << "\n";
    cout << "SEARCH - Master process:\n";
    cout << "  Normal end of execution.\n";
    cout << "\n";
    timestamp ( );
  } 
  return 0;
}
Ejemplo n.º 25
0
void *mpi_thread(void *arg) {
    struct State *state = (struct State *)arg;

    int buf;
    MPI::Status status;
    set<int> queue;

    bool inside = false;

    while (1) {
        MPI::COMM_WORLD.Recv(&buf, 1, MPI::INT, MPI::ANY_SOURCE, MPI::ANY_TAG, status);
        state->lamport = max(state->lamport, buf) + 1;
        switch (status.Get_tag()) {
            case INSIDE_TAG: // enter/exit
                if (!inside) {
                    for (int i = 0; i < state->size; i++) {
                        if (i != state->rank) {
                            MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, i, REQUEST_TAG);
                        }
                    }
                    int request_clock = state->lamport;
                    int replies_received = 0;
                    while (replies_received < state->size - 1) {
                        MPI::COMM_WORLD.Recv(&buf, 1, MPI::INT, MPI::ANY_SOURCE, MPI::ANY_TAG, status);
                        state->lamport = max(state->lamport, buf) + 1;
                        switch (status.Get_tag()) {
                            case REQUEST_TAG:
                                if (request_clock < buf || (buf == request_clock && state->rank < status.Get_source())) {
                                    // current process has higher priority
                                    queue.insert(status.Get_source());
                                } else {
                                    // other process has higher priority
                                    MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, status.Get_source(), AGREE_TAG);
                                }
                                break;
                            case AGREE_TAG:
                                if (buf > request_clock) {
                                    replies_received++;
                                    log(state, "comm: Agree %d received from %d", buf, status.Get_source());
                                }
                                break;
                            default:
                                log(state, "comm: Unknown message tag %d", status.Get_tag());
                        }
                    }
                    inside = true;
                    unique_lock<mutex> lck(state->mtx);
                    state->ready = true;
                    state->cv.notify_all();
                    lck.unlock();
                } else {
                    // broadcast agree to all in queue
                    char *repr = (char *)malloc(1024);
                    *repr = '\0';
                    for (int p : queue) {
                        sprintf(repr + strlen(repr), "%d, ", p);
                    }
                    state->lamport++;
                    log(state, "comm: !!! LEFT, %s", repr);
                    free(repr);
                    for (int p : queue) {
                        MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, p, AGREE_TAG);
                    }
                    queue.clear();
                    inside = false;
                }
                break;
            case REQUEST_TAG:
                if (inside) {
                    queue.insert(status.Get_source());
                } else {
                    MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, status.Get_source(), AGREE_TAG);
                    state->lamport++;
                }
                break;
            case AGREE_TAG:
                break;
            default:
                log(state, "comm: Unknown message tag %d", status.Get_tag());
        }
    }
}
Ejemplo n.º 26
0
void DataBus::createDynamicTypes(int bodyNum)
{
    LOG_DEBUG("Building dynamic MPI types for fast node sync");
    auto& engine = Engine::getInstance();
    GCMDispatcher* dispatcher = engine.getDispatcher();
    Body* body = engine.getBody(bodyNum);//ById( engine.getDispatcher()->getMyBodyId() );
    TetrMeshSecondOrder* mesh = (TetrMeshSecondOrder*)body->getMeshes();

    // TODO add more cleanup code here to prevent memory leaks
    if (MPI_NODE_TYPES != NULL) {
        LOG_TRACE("Cleaning old types");
        for (int i = 0; i < numberOfWorkers; i++)
        {
            for (int j = 0; j < numberOfWorkers; j++)
            {
                LOG_TRACE("Cleaning type " << i << " " << j );
                LOG_TRACE("Size " << i << " " << j << " = " << local_numbers[i][j].size());
                if (local_numbers[i][j].size() > 0)
                    MPI_NODE_TYPES[i][j].Free();
            }
        }
        delete[] MPI_NODE_TYPES;
    }

    if (local_numbers != NULL) {
        for (int i = 0; i < numberOfWorkers; i++)
            delete[] local_numbers[i];
        delete[] local_numbers;
    }

    // FIXME - it's overhead
    local_numbers = new vector<int>*[numberOfWorkers];
    vector<int> **remote_numbers = new vector<int>*[numberOfWorkers];
    MPI_NODE_TYPES = new MPI::Datatype*[numberOfWorkers];

    for (int i = 0; i < numberOfWorkers; i++)
    {
        local_numbers[i] = new vector<int>[numberOfWorkers];
        remote_numbers[i] = new vector<int>[numberOfWorkers];
        MPI_NODE_TYPES[i] = new MPI::Datatype[numberOfWorkers];
    }

    BARRIER("DataBus::createDynamicTypes#0");

    // find all remote nodes
    for (int j = 0; j < mesh->getNodesNumber(); j++)
    {
        CalcNode& node = mesh->getNodeByLocalIndex(j);
        if ( node.isRemote() )
        {
            //LOG_DEBUG("N: " << j);
            //LOG_DEBUG("R1: " << j << " " << mesh->getBody()->getId());
            int owner = dispatcher->getOwner(node.coords/*, mesh->getBody()->getId()*/);
            //LOG_DEBUG("R2: " << owner);
            assert_ne(owner, rank );
            local_numbers[rank][owner].push_back( mesh->nodesMap[node.number] );
            remote_numbers[rank][owner].push_back(node.number);
        }
    }

    BARRIER("DataBus::createDynamicTypes#1");

    LOG_DEBUG("Requests prepared:");
    for (int i = 0; i < numberOfWorkers; i++)
        for (int j = 0; j < numberOfWorkers; j++)
            LOG_DEBUG("Request size from #" << i << " to #" << j << ": " << local_numbers[i][j].size());

    // sync types
    unsigned int max_len = 0;
    for (int i = 0; i < numberOfWorkers; i++)
        for (int j = 0; j < numberOfWorkers; j++)
            if (local_numbers[i][j].size() > max_len)
                max_len = local_numbers[i][j].size();

    vector<int> lengths;
    for (unsigned int i = 0; i < max_len; i++)
        lengths.push_back(1);

    int info[3];

    vector<MPI::Request> reqs;

    for (int i = 0; i < numberOfWorkers; i++)
        for (int j = 0; j < numberOfWorkers; j++)
            if (local_numbers[i][j].size() > 0)
            {
                info[0] = remote_numbers[i][j].size();
                info[1] = i;
                info[2] = j;
                MPI_NODE_TYPES[i][j] =  MPI_ELNODE.Create_indexed(
                    local_numbers[i][j].size(),
                    &lengths[0],
                    &local_numbers[i][j][0]
                );
                MPI_NODE_TYPES[i][j].Commit();
                reqs.push_back(
                    MPI::COMM_WORLD.Isend(
                        &remote_numbers[i][j][0],
                        remote_numbers[i][j].size(),
                        MPI::INT,
                        j,
                        TAG_SYNC_NODE_TYPES
                    )
                );
                reqs.push_back(
                    MPI::COMM_WORLD.Isend(
                        info,
                        3,
                        MPI::INT,
                        j,
                        TAG_SYNC_NODE_TYPES_I
                    )
                );
            }

    BARRIER("DataBus::createDynamicTypes#2");

    MPI::Status status;

    while (MPI::COMM_WORLD.Iprobe(MPI::ANY_SOURCE, TAG_SYNC_NODE_TYPES_I, status))
    {
        MPI::COMM_WORLD.Recv(
            info,
            3,
            MPI::INT,
            status.Get_source(),
            TAG_SYNC_NODE_TYPES_I
        );
        local_numbers[info[1]][info[2]].resize(info[0]);
        MPI::COMM_WORLD.Recv(
            &local_numbers[info[1]][info[2]][0],
            info[0],
            MPI::INT,
            status.Get_source(),
            TAG_SYNC_NODE_TYPES
        );
        if (lengths.size() < (unsigned)info[0])
            for (int i = lengths.size(); i < info[0]; i++)
                lengths.push_back(1);
        for(int i = 0; i < info[0]; i++)
            local_numbers[info[1]][info[2]][i] = mesh->nodesMap[ local_numbers[info[1]][info[2]][i] ];
        MPI_NODE_TYPES[info[1]][info[2]] =  MPI_ELNODE.Create_indexed(
            info[0],
            &lengths[0],
            &local_numbers[info[1]][info[2]][0]
        );
        MPI_NODE_TYPES[info[1]][info[2]].Commit();
    }

    MPI::Request::Waitall(reqs.size(), &reqs[0]);
    BARRIER("DataBus::createDynamicTypes#3");

    for (int i = 0 ; i < numberOfWorkers; i++)
        delete[] remote_numbers[i];
    delete[] remote_numbers;
    LOG_DEBUG("Building dynamic MPI types for fast node sync done");
}
Ejemplo n.º 27
0
int main(int argc, char** argv)
{
	//_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );

	//_CrtMemState s1;
	//_CrtMemCheckpoint( &s1 );

	/* run multiple trials of Develep 
	   input: trial text file
	   first column: number of trials to run
	   second column: parameter file for trials
	   third column: data for trials
	*/
	
		string trialsetup(argv[1]);
       // cout << "trialsetup: " + trialsetup + "\n";
		int totaltrials = 0;
		vector<int> trialset; 
		vector<string> paramfile;
		vector<string> datafile;

		ifstream fs(trialsetup);
		getTrialSetup(fs,totaltrials,trialset,paramfile,datafile);
		
		int numsent=0;

		//MPI stuff
		int master=0;
		int ierr;
		MPI::Init();
		int numprocs = MPI::COMM_WORLD.Get_size();
		int myid = MPI::COMM_WORLD.Get_rank();
		//cout << "I am process " + to_string(static_cast<long long>(myid)) + " of " + to_string(static_cast<long long>(numprocs)) + "\n";
		MPI::Status status;
		//const char * pbuff,dbuff;
     try
	 {   
		if (myid==master){
        	//cout << "total trials: " + to_string(static_cast<long long>(totaltrials)) + "\n";
        	//cout << "In master loop\n";
        	cout << "Running trials of ellenGP: \n Number of trials: " + to_string(static_cast<long long>(totaltrials)) +"\n Number of processors: " + to_string(static_cast<long long>(numprocs)) + "\n";

        	// schedule tasks from master node
        	for (int i=0;i<min(numprocs-1,totaltrials);i++){
        		//cout << "sending " + paramfile.at(i) + " to process " + to_string(static_cast<long long>(i)) + "\n";
        		MPI::COMM_WORLD.Send(paramfile.at(i).c_str(),paramfile.at(i).length(),MPI::CHAR,i+1,i+1);
        		//cout << "sending " + datafile.at(i) + " to process " + to_string(static_cast<long long>(i)) + "\n";
				MPI::COMM_WORLD.Send(datafile.at(i).c_str(),datafile.at(i).length(),MPI::CHAR,i+1,i+1);
				numsent++;
				//cout << "numsent: " + to_string(static_cast<long long>(numsent)) + "\n";
        	}
        	//int curnumsent=numsent;
        	int stops =0;
        	while(numsent<=totaltrials && stops<numprocs-1){
        		int ans;
        		MPI::COMM_WORLD.Recv(&ans,1,MPI::INT,MPI::ANY_SOURCE,MPI::ANY_TAG,status);
        		const int sender = status.Get_source();
        		//int anstype = status.Get_tag();
        		if (numsent < totaltrials){
        			MPI::COMM_WORLD.Send(paramfile.at(numsent).c_str(),paramfile.at(numsent).length(),MPI::CHAR,sender,numsent+1);
					MPI::COMM_WORLD.Send(datafile.at(numsent).c_str(),datafile.at(numsent).length(),MPI::CHAR,sender,numsent+1);
					++numsent;
        		}
        		else{
        			//cout << "sending stop command to process " + to_string(static_cast<long long>(sender)) + "\n";
        			MPI::COMM_WORLD.Send(MPI::BOTTOM,0,MPI::CHAR,sender,0);
        			++stops;
        		}

        	} cout << "out of master while loop\n";
        }
        else{
        	//cout << "in slave task \n";
        	// receive tasks and send completion messages to master
        	//cout << "in slave task. myid is " + to_string(static_cast<long long>(myid)) + " and totaltrials is " + to_string(static_cast<long long>(totaltrials)) + "\n";
        	bool cont = true;
        	while (cont){
				if (myid <= totaltrials){
					//char * pbuff,dbuff;
					//cout << "probe master status\n";
					MPI::COMM_WORLD.Probe(master, MPI::ANY_TAG, status);
					int l1 = status.Get_count(MPI::CHAR);
					char * pbuff = new char[l1];
					//cout << "Receive packet\n";
					MPI::COMM_WORLD.Recv(pbuff,l1,MPI::CHAR,master, MPI::ANY_TAG,status);
					//cout << "received pbuff value: " + string(pbuff) + "\n";
					if(status.Get_tag() !=0 ){

						MPI::COMM_WORLD.Probe(master, MPI::ANY_TAG, status);
						int l2 = status.Get_count(MPI::CHAR);
						char * dbuff = new char[l2];
						MPI::COMM_WORLD.Recv(dbuff,l2,MPI::CHAR,master, MPI::ANY_TAG,status);
						//cout << "received dbuff value: " + string(dbuff) + "\n";
						if(status.Get_tag() !=0 ){
							int tag = status.Get_tag();
							string pfile(pbuff,l1);
							string dfile(dbuff,l2);
							cout << "running process " + to_string(static_cast<long long>(tag)) + " of " + to_string(static_cast<long long>(totaltrials)) + " on processor " + to_string(static_cast<long long>(myid)) + " : " + pfile.substr(pfile.rfind('/')+1,pfile.size()) + ", " + dfile.substr(dfile.rfind('/')+1,dfile.size())  + "\n";
							//run develep
							runEllenGP(pfile,dfile,1,myid);
							//cout << "hello\n";
							cout << "\nfinished process " + to_string(static_cast<long long>(tag)) + " of " + to_string(static_cast<long long>(totaltrials)) + " on processor " + to_string(static_cast<long long>(myid)) + " : " + pfile.substr(pfile.rfind('/')+1,pfile.size()) + ", " + dfile.substr(dfile.rfind('/')+1,dfile.size())  + "\n";

							// send message when finished
							int tmp = 1;
							MPI::COMM_WORLD.Send(&tmp,1,MPI::INT,master,myid);
						}
						else{
							//cout << "status tag is zero on process " + to_string(static_cast<long long>(myid)) + "\n";
							cont=false;

						}


						delete [] dbuff;
					}
					else{
						//cout << "status tag is zero on process " + to_string(static_cast<long long>(myid)) + "\n";
						cont=false;
					}

					delete [] pbuff;

				}
        	}


        }





		MPI::Finalize();
		char key;
		if(myid==master)
			cout << "All trials completed. Exiting..." << endl;
		//key = getchar();
	}
	catch(const std::bad_alloc&)
	{
		cout << "bad allocation error from processor " << to_string(static_cast<long long>(myid)) << "\n";
		exit(1);
	}
	catch(exception& er) 
	{
		cout << "Error: " << er.what() << endl;
		exit(1);

	}
	catch(...)
	{
		cout << "Exception Occurred."<<endl;
		exit(1);
	}
	return 0;
}
Ejemplo n.º 28
0
// int reducethread(Wordtoword *allwords, int totalwordsize, map_stringtostringint &localmap, WordFrequency *allwordsfreq, int totalwordsizefreq, std::unordered_map<string,int> &frequencymap)
// {
//     reduce(allwords, totalwordsize, localmap);  
//             reducefreq(allwordsfreq, totalwordsizefreq, frequencymap);
//         if(allwords!=nullptr)delete(allwords);
//         if(allwordsfreq!=nullptr)delete(allwordsfreq);                  
//         return 0;
// }
int main(int argc, char *argv[]) 
{ 

    MPI::Init(argc, argv); 
/*//////Datatype for Sending words*/
    MPI_Datatype MPI_Customword;
    MPI_Datatype type0[1] = { MPI_CHAR };
    int blocklen0[1] = { STRING_LENGTH};
    MPI_Aint disp0[1];
 
    disp0[0] = 0;
    
    MPI_Type_create_struct(1, blocklen0, disp0, type0, &MPI_Customword);
    MPI_Type_commit(&MPI_Customword);
/*//////Datatype for Sending words*/

/*//////Datatype for Sending words*/
    MPI_Datatype MPI_Custom_byte;
    MPI_Datatype type2[1] = { MPI_BYTE };
    int blocklen2[1] = { 1000 };
    MPI_Aint disp2[1];
 
    disp2[0] = 0;
    
    MPI_Type_create_struct(1, blocklen2, disp2, type2, &MPI_Custom_byte);
    MPI_Type_commit(&MPI_Custom_byte);
/*//////Datatype for Sending words*/

/*//////Datatype for Sending word cooccurances*/
    MPI_Datatype MPI_SingleWordtoWord;
    MPI_Datatype type[3] = { MPI_CHAR, MPI_CHAR, MPI_INT };
    int blocklen[3] = { STRING_LENGTH, STRING_LENGTH , 1 };
    MPI_Aint disp[3];
 
    disp[0] = 0;
    disp[1] = STRING_LENGTH;
    disp[2] = STRING_LENGTH*2;
    MPI_Type_create_struct(3, blocklen, disp, type, &MPI_SingleWordtoWord);
    MPI_Type_commit(&MPI_SingleWordtoWord);
/*//////Datatype for Sending word cooccurances*/

/*//////Datatype for Sending word frequencies*/
    MPI_Datatype MPI_SingleWordFrequency;
    MPI_Datatype type1[2] = { MPI_CHAR, MPI_INT };
    int blocklen1[2] = { STRING_LENGTH, 1 };
    MPI_Aint disp1[2];
 
    disp1[0] = 0;
    disp1[1] = STRING_LENGTH;    
    MPI_Type_create_struct(2, blocklen1, disp1, type1, &MPI_SingleWordFrequency);
    MPI_Type_commit(&MPI_SingleWordFrequency);
/*//////Datatype for Sending word frequencies*/

/*//////Declarations//////*/
    MPI::Status status; 
    int myrank = MPI::COMM_WORLD.Get_rank(); 
    int size = MPI::COMM_WORLD.Get_size(); 
    std::vector<string> files=getallfilenames("/work/scratch/vv52zasu/inputfiles/");

    std::unordered_map<string,int> frequencymap;
    std::unordered_map<string,std::unordered_map<string, int>> localmap;
    std::unordered_map<string,std::unordered_map<string, int>> localsecondlevelmap;
    int bufsize, *buf;
    string bufchar1;
    char filename[128]; 
/*//////Declarations//////*/    

/*//////Read files in a loop and write initial data to localmap/////*/
    std::clock_t fileprocessing_timestart = clock();
    for(std::vector<string>::iterator it = files.begin(); it != files.end(); ++it)
    {
        if(myrank ==0)
            std::cout<<"Processing file:"<<(*it).c_str()<<endl;

        MPI::File thefile = MPI::File::Open(MPI::COMM_WORLD, (*it).c_str(), MPI::MODE_RDONLY, MPI::INFO_NULL); 
        MPI::Offset filesize = thefile.Get_size();
       
        char *bufchar;  
        int CHUNKSIZE = (filesize/size)+1;
        CHUNKSIZE = std::max(CHUNKSIZE, 10000);
        bufchar =  new char[CHUNKSIZE+2]; 

        MPI_Status status1;
        int i=0;

        MPI_File_seek(thefile, (myrank)*CHUNKSIZE, MPI_SEEK_SET);
        MPI_File_read( thefile, bufchar, CHUNKSIZE, MPI_CHAR, &status1);
        int count=0;
        MPI_Get_count( &status1, MPI_CHAR, &count );
        string str(bufchar,bufchar+count);
        
        MPI::COMM_WORLD.Barrier();
        
        int occurrences = 0;
        string::size_type start = 0;
        
        int from =0, to=0,index=0;
        string tosend(str);
        while(1)
        {
            index=index+to+1;
            to = tosend.find("\n\n");      
            if(to==string::npos) break;        
            tosend=tosend.substr(to+1);
        }
        string trimstr=str.substr(0,index-1);        
        
        str="";
        MPI::COMM_WORLD.Barrier();

        int length = tosend.length();
        char *recvptr;
        recvptr = new char[CHUNKSIZE];
        
        int dest=0,src=0;
        if(myrank==size-1)
        {
            dest=0;
            src=myrank-1;
        }
        else if(myrank==0)
        {
            dest=1;src=size-1;
        }
        else 
        {
            dest=myrank+1;src=myrank-1;
        }
        
        MPI::COMM_WORLD.Sendrecv(tosend.c_str(), length, MPI_CHAR, dest, 123, recvptr, CHUNKSIZE, MPI_CHAR, src, 123, status);
        

        string finalstr="";
        for (int i=0; i<size; i++)
        {
            if (i == myrank) {
                string recvstr;
                if(myrank !=0)
                {        
                    recvstr.assign(recvptr,recvptr+status.Get_count(MPI_CHAR)); 
                    finalstr = recvstr+trimstr; 
                }
                else finalstr=trimstr;
            }
            MPI::COMM_WORLD.Barrier();
        }
        
        delete(recvptr);
        str="";
        trimstr="";
        tosend="";

        process_string(finalstr, localmap, frequencymap);
        if(myrank ==0)
            std::cout<<"Processing file Ended: "<<(*it).c_str()<<endl;
    }
    MPI::COMM_WORLD.Barrier();
    if(myrank==0) cout<<" Time taken to process the files: "<< (clock()-fileprocessing_timestart)/(double) CLOCKS_PER_SEC<<"\n";
/*//////Read files in a loop and write initial data to localmap/////*/    
/*////////////Sending the strings to hash%size process//////////*/
vector<vector<string>> vecgrouped(size, vector<string>(0));

for(auto &entry: localmap){
    vecgrouped[std::hash<string>()(entry.first)%size].push_back(entry.first);    
}

     int locsize = localmap.size();
     for(auto &entry: localmap)
         locsize += entry.second.size();

cout << "Process: " << myrank << " Local map size: "<< localmap.size() << vecgrouped[0].size() + vecgrouped[1].size() + vecgrouped[2].size() + vecgrouped[3].size() << endl;


char* recvdata;
int sizeofint = sizeof(int);
int recvsize_me=0;
for( int i=0 ; i < size ; i++ )
{
    //if ( myrank ==0 ) 
        //cout << "Process: " << myrank << " Entered the loop. Round: "<< i << endl;
    auto &tosend_twords = vecgrouped[i];
    int data_size = 0;
    for( auto& entry: tosend_twords ) 
    {
        data_size+=localmap[entry].size();
    }
    //Allocate memory
    int allocsize = (tosend_twords.size() + data_size) * (STRING_LENGTH + sizeofint) + ( tosend_twords.size() * sizeofint );
    char* memptr_head = new char[ allocsize ];
    char* memptr = memptr_head;
    
    //Add the data to the allocated memory
    const int N = tosend_twords.size();
    int torecv=0;
    MPI_Reduce(&N ,    &recvsize_me,    1,    MPI_INT,    MPI_SUM,  i,    MPI_COMM_WORLD);
    if ( myrank == i ) 
        cout << "Process: " << myrank << " receive size: "<< recvsize_me << endl;
    //memcpy( memptr , &N , sizeof(int));
    //memptr += sizeofint;
    
    for( auto &entry : tosend_twords )
    {
        memcpy( memptr , entry.c_str() , entry.size() + 1);
        memptr += STRING_LENGTH;
        
        int N = frequencymap[entry];        
        memcpy( memptr , &N , sizeof(int) );
        memptr += sizeofint;

        N = localmap[entry].size();
        memcpy( memptr , &N , sizeof(int) );
        memptr += sizeofint;
        
        for( auto &entry_cword : localmap[entry] )
        {
            memcpy( memptr , entry_cword.first.c_str() , entry_cword.first.size() + 1);            
            memptr += STRING_LENGTH;
            
            const int N = entry_cword.second;
            memcpy( memptr , &N , sizeofint );
            memptr += sizeofint;
        }
    }
    //After copying data in the ptr, the corresponding data in localmap can be deleted
    for( auto& entry: tosend_twords ) 
    {
        localmap.erase(entry);
    }
    tosend_twords.clear();
    
    int *allocsizes= new int[size];
    MPI_Gather(&allocsize,  1,  MPI_INT,  allocsizes,  1,  MPI_INT,  i,  MPI_COMM_WORLD);
    
    int *allocdisplacements = new int[size];
    allocdisplacements[0] = 0;
    for( int i=0 ; i < size ; i++ )
    {
        allocdisplacements[i]=allocdisplacements[i-1]+allocsizes[i-1];
    }
    int totalrecvsize = allocdisplacements[size-1]+ allocsizes[size-1];
    
    if ( myrank == i )
        recvdata = new char[totalrecvsize];
    
    MPI_Gatherv(memptr_head, allocsize, MPI_CHAR, recvdata,  allocsizes, allocdisplacements, MPI_CHAR , i ,  MPI_COMM_WORLD);
    
    delete[](memptr_head);
    if ( myrank == 0 ) 
        cout << "Process: " << myrank << "Finished round: " << i << "\n";
}
localmap.clear();
frequencymap.clear();
//copy the received data into localmap
char *recvdata_head = recvdata;
for( int i=0 ; i < recvsize_me ; i++ )
{
    char tword[STRING_LENGTH];
    memcpy( tword , recvdata , STRING_LENGTH);
    recvdata += STRING_LENGTH;
    
    int frequency;
    memcpy( &frequency , recvdata , sizeof(int) );
    recvdata += sizeofint; 

    frequencymap[tword] += frequency;       

    int cwords_size;
    memcpy( &cwords_size , recvdata , sizeof(int) );
    recvdata += sizeofint;
    
    auto &submap = localmap[tword];
    for( int i=0 ; i < cwords_size ; i++ )
    {
        char cword[STRING_LENGTH];
        memcpy( cword , recvdata , STRING_LENGTH);            
        recvdata += STRING_LENGTH;
        
        int ccount;
        memcpy( &ccount , recvdata , sizeofint );
        recvdata += sizeofint;
        
        submap[cword] += ccount;
    }
}

delete[] recvdata_head;
cout << "Process: " << myrank << " Localmap size:" << localmap.size() << endl;


    for(auto &entry: localmap)
    {
        auto &submap = entry.second;
        for(auto it=submap.begin();it!=submap.end();)
            if(it->second<5)submap.erase(it++);else it++;
    }
/*//////////broadcasting rootwords and getting the entries in other processes////////*/

    //----------------------------------------------------------------------------------------------------------
    ////broadcasting first level cooccurances and getting entries of second level coccurances from other processes
    //----------------------------------------------------------------------------------------------------------
    auto it=localmap.begin();
    while(1)
    {       
        std::clock_t whilestart=clock();   
        MPI::COMM_WORLD.Barrier();
        if(myrank==0)cout<<"while start lo"<<endl; 
        int totalwordsize=0;
        int totalwordsizefreq=0;            
        Wordtoword *allwords=nullptr;
        WordFrequency *allwordsfreq=nullptr;
        int subwordscount=0;
        for(int i=0;i<size;i++)
        {
            subwordscount=0;
            int mapsize = localmap.size();
            
            std::clock_t start=clock();
            std::set<string> uniquestrings;
            if(myrank==i)
            {
                for( ;it!=localmap.end();it++)
                {
                    if((it->second).size()==0)continue;  
                    for (auto &entry : it->second)
                    {                 
                        if(localmap.find(entry.first)==localmap.end()  && localsecondlevelmap.find(entry.first)==localsecondlevelmap.end())
                            uniquestrings.insert(entry.first);
                    }

                    subwordscount= uniquestrings.size();
                    //break doesnt increment it before quitting loop.
                    if(subwordscount>=2000){cout<<"\n";it++;break;}
                }
                //+mapsize;
              //  cout<< "Came1:"<<myrank<<" and subcount:"<<subwordscount;
            }
      
            int j=0;
               
            MPI_Bcast(  &subwordscount,  1,  MPI_INT,  i,  MPI_COMM_WORLD);
            if(subwordscount==0)continue;
            //char rootwords[subwordscount][STRING_LENGTH];
           char rootwords[subwordscount][STRING_LENGTH];        //char (*rootwords)[STRING_LENGTH] = new 
        
            if(myrank==i)
            {
                for (auto entry : uniquestrings)
                {
                    std::size_t length = entry.copy(rootwords[j],STRING_LENGTH-1);
                    rootwords[j][length]='\0';    
                    j++;        
                }
            }
            //broadcasting all the words
            MPI_Bcast(  rootwords, subwordscount,  MPI_Customword,  i,  MPI_COMM_WORLD);
            
            int numofwordstosend=0;
            int numofwordfreqstosend=0;
            Wordtoword *wordtoword_ptr=nullptr;
            WordFrequency *wordfrequency_ptr=nullptr;
          
            //sending the number of words to the broadcaster process
            if(myrank != i)
            {
                //get all the word entries matching in the local map
                wordtoword_ptr = get_wordtoword_ifpresent(localmap,numofwordstosend, rootwords,  subwordscount);   
                wordfrequency_ptr = get_wordfrequency_ifpresent(frequencymap,numofwordfreqstosend, rootwords, subwordscount);  
            }          
            
            int *recvcounts= new int[size];
            int *recvdisplacements= new int[size];
            MPI::COMM_WORLD.Barrier();
            int *recvcountsfreq= new int[size];
            int *recvdisplacementsfreq= new int[size];        
        
            MPI_Gather(&numofwordstosend,  1,  MPI_INT,  recvcounts,  1,  MPI_INT,  i,  MPI_COMM_WORLD);
            MPI_Gather(&numofwordfreqstosend,  1,  MPI_INT,  recvcountsfreq,  1,  MPI_INT, i,  MPI_COMM_WORLD);
            
            //cout<<" STEP1 "<<(clock()-start)/ (double) CLOCKS_PER_SEC;
            if(myrank==i)
            {  
                recvdisplacements[0]=0;
                for(int x=0;x<size;x++)
                {
                    if(x!=0)
                    {recvdisplacements[x]=recvdisplacements[x-1]+recvcounts[x-1];}
                    totalwordsize+=recvcounts[x];         
                }

                allwords = new Wordtoword[totalwordsize];
                
                recvdisplacementsfreq[0]=0;          
                for(int x=0;x<size;x++)
                {
                    if(x!=0)
                    {
                        recvdisplacementsfreq[x]=recvdisplacementsfreq[x-1]+recvcountsfreq[x-1];
                    }
                    totalwordsizefreq+=recvcountsfreq[x];
                }
                allwordsfreq= new WordFrequency[totalwordsizefreq];
           //     cout<<" STEP1 "<<(clock()-start)/ (double) CLOCKS_PER_SEC<<" Totalwordssize:"<<totalwordsize;
            }
            
          
          
            MPI_Gatherv(wordtoword_ptr, numofwordstosend, MPI_SingleWordtoWord, allwords,  recvcounts, recvdisplacements, MPI_SingleWordtoWord,i ,  MPI_COMM_WORLD);
            MPI_Gatherv(wordfrequency_ptr, numofwordfreqstosend, MPI_SingleWordFrequency, allwordsfreq,  recvcountsfreq, recvdisplacementsfreq, MPI_SingleWordFrequency , i, MPI_COMM_WORLD);
            
                //cout<<" STEP3 "<<(clock()-start)/ (double) CLOCKS_PER_SEC;
            if(recvcounts!=nullptr)delete recvcounts;
            if(recvdisplacements!=nullptr)delete recvdisplacements;
            if(recvcountsfreq!=nullptr)delete recvcountsfreq;
            if(recvdisplacementsfreq!=nullptr)delete recvdisplacementsfreq;
            if(wordtoword_ptr!=nullptr)delete wordtoword_ptr;
            if(wordfrequency_ptr!=nullptr)delete wordfrequency_ptr;    
         //       cout<<" STEP4 "<<(clock()-start)/ (double) CLOCKS_PER_SEC<<"\n";
        
            
        }
        //cout<<"Process Memory: "<<myrank<<" Mapsize"<<mapsize(localmap)+mapsize(localsecondlevelmap)<<endl;
         if(allwords!=nullptr){
             reduce(allwords, totalwordsize, localsecondlevelmap,1);
             delete allwords;
         }
        //cout<<"Process : "<< myrank <<" Reduce ended "<<(clock()-whilestart)/ (double) CLOCKS_PER_SEC<<endl;
         if(allwordsfreq!=nullptr){
             reducefreq(allwordsfreq, totalwordsizefreq, frequencymap,1);   
             delete allwordsfreq;
         }
         //cout<<"Process Memory: "<<myrank<<" Mapsize"<<mapsize(localmap)+mapsize(localsecondlevelmap)<<endl;
         MPI::COMM_WORLD.Barrier();
         //ll
         cout<<"Process: "<< myrank <<" While ended "<<(clock()-whilestart)/ (double) CLOCKS_PER_SEC<<endl;
        
        int arewedone=0;
        MPI_Allreduce(&subwordscount,    &arewedone,    1,    MPI_INT,    MPI_SUM,    MPI_COMM_WORLD);
        if(arewedone==0)break;
            
    }
    
    MPI::COMM_WORLD.Barrier();


    if ( myrank == 0 )
        cout << "Process: " << myrank << " writing to file started.." << "\n";
     //----------------------------------------------------------------------------------------------------------
    ////writing to file
    //----------------------------------------------------------------------------------------------------------

    std::fstream fs;
    string filenamefs ="file"+to_string(myrank)+".txt";
    fs.open (filenamefs.c_str(), std::fstream::in | std::fstream::out | std::fstream::app);

    char *line = new char[20];
    int x=0;
    int totallength=0;
    int maxlocalmapsize=0;
    int localmap_size = localmap.size();
    MPI_Allreduce(&localmap_size,    &maxlocalmapsize,    1,    MPI_INT,    MPI_MAX,    MPI_COMM_WORLD);
     
    long long sizeofstr=0;
    int counter=0,counter1=0;
    it = localmap.begin();
    char *localstr= new char[20];
    int length=0;
    int length1=0;
    for(it = localmap.begin();it!=localmap.end();it++)
     {  
        if((it->first).substr( (it->first).length() - 2 ) == "NN")  
        {   
            string rootentry ="";//= it->first +"    ";
             auto &submap = it->second;

            string secondlevelstring="(";
            for(auto& secondlevelentry : submap)
            {
                //if(secondlevelentry.second>=6)
                    secondlevelstring+=secondlevelentry.first+"::"+std::to_string(secondlevelentry.second)+",";
            }                        
            secondlevelstring.replace(secondlevelstring.end()-1,secondlevelstring.end(),")\n"); 
            rootentry+=it->first +"    "+it->first+"::"+ std::to_string(frequencymap[it->first]) + secondlevelstring;
                        
            for(auto &firstlevelentry : submap)
            {
                //if(firstlevelentry.second>=6)
                {
                    //cout<<firstlevelentry.second<<"came"<<endl;
                    
                    if(localmap.find(firstlevelentry.first)!=localmap.end())
                    {   
                        auto &secondlevelsubmap = localmap[firstlevelentry.first];
                        string secondlevelstring="(";
                        
                        for(auto& secondlevelentry : secondlevelsubmap)
                        {
                            //if(secondlevelentry.second>=5)
                                secondlevelstring+=secondlevelentry.first+"::"+std::to_string(secondlevelentry.second)+",";
                        }     
                        secondlevelstring.replace(secondlevelstring.end()-1,secondlevelstring.end(),")\n"); 
                        rootentry+=it->first +"    "+firstlevelentry.first+"::"+ std::to_string(frequencymap[firstlevelentry.first]) + secondlevelstring;
                       
                    }
                    else if(localsecondlevelmap.find(firstlevelentry.first)!=localsecondlevelmap.end())
                    {
                        auto &secondlevelsubmap = localsecondlevelmap[firstlevelentry.first];
                        string secondlevelstring="(";
                        for(auto& secondlevelentry : secondlevelsubmap)
                        {
                            //if(secondlevelentry.second>=5)
                                secondlevelstring+=secondlevelentry.first+"::"+std::to_string(secondlevelentry.second)+",";
                        }
                        secondlevelstring.replace(secondlevelstring.end()-1,secondlevelstring.end(),")\n");
                        rootentry+=it->first +"    "+firstlevelentry.first+"::"+std::to_string(frequencymap[firstlevelentry.first]) +secondlevelstring;
                        
                    }
                    // else
                    //     if((it->first).compare("absence::NN")==0)counter1++;
                }
            }
            length = rootentry.length();
            free(localstr);
            
            localstr = new char[length+1];
            strcpy(localstr, rootentry.c_str());
            if(myrank==0)   
            counter++;
            fs<<localstr;
        }
    }
    
    fs.close();

    if(myrank==0)cout<<"writing to file done..size="<<sizeofstr<<"\n";
        
    MPI::Finalize(); 
    return 0; 
}