void HPC::MPICommunication::waitReception(Request::Handle ioRequest) const { Beagle_StackTraceBeginM(); Beagle_NonNullPointerAssertM(ioRequest); MPI::Status lStatus; ioRequest->mSizeRequest.Wait(lStatus); if(lStatus.Is_cancelled()) return; int lRank = lStatus.Get_source(); int lMsgSize = ioRequest->mSize; std::string lStringTag = ioRequest->mTag + "_str"; MPI::COMM_WORLD.Probe(lRank,hashTag(lStringTag),lStatus); Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lMsgSize); //constructing a string of the right size. std::string lMsg(lMsgSize, ' '); MPI::COMM_WORLD.Recv(&lMsg[0], lMsgSize, MPI::CHAR, lRank, hashTag(lStringTag)); #ifdef BEAGLE_HAVE_LIBZ if(mCompressionLevel->getWrappedValue() > 0){ ioRequest->mMessage = new Beagle::String; decompressString(lMsg, ioRequest->mMessage->getWrappedValue()); } else { ioRequest->mMessage = new Beagle::String(lMsg); } #else ioRequest->mMessage = new Beagle::String(lMsg); #endif Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const"); }
bool recvData(std::vector<double>& receivedData) { bool isDataReceived = false; if ( intraComm != MPI::COMM_NULL) { MPI::Status status; double buffer[100]; intraComm.Recv(buffer, 100, MPI::DOUBLE, MPI::ANY_SOURCE, /*tag*/ 100, status); int count = status.Get_count(MPI::DOUBLE); receivedData = std::vector<double>(buffer, buffer+count); log.Info() << "RECV [ " << getRank() << " <-- " << status.Get_source() << " ] data : " << receivedData << std::endl; isDataReceived = true; }else { log.Err() << "PID " << getProcessId() << " failed to RECV" << std::endl; } return isDataReceived; }
void ParaCommMpiWorld::probe( int* source, int* tag ) { MPI::Status mpiStatus; MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus); *source = mpiStatus.Get_source(); *tag = mpiStatus.Get_tag(); TAG_TRACE (Probe, From, *source, *tag); }
//##################################################################### // Function Recv_Columns //##################################################################### template<class T_GRID> template<class T_ARRAYS_HORIZONTAL_COLUMN> void MPI_RLE_GRID<T_GRID>:: Recv_Columns(T_ARRAYS_HORIZONTAL_COLUMN& columns,const ARRAY<T_BOX_HORIZONTAL_INT>& regions,const int tag,const MPI::Status& probe_status) const { ARRAY<char> buffer(probe_status.Get_count(MPI::PACKED)); int position=0; comm->Recv(&buffer(1),buffer.m,MPI::PACKED,probe_status.Get_source(),tag); TV_HORIZONTAL_INT direction; MPI_UTILITIES::Unpack(direction,buffer,position,*comm); int neighbor=0; all_neighbor_directions.Find(-direction,neighbor); for(typename T_HORIZONTAL_GRID::CELL_ITERATOR iterator(local_grid.horizontal_grid,regions(neighbor)); iterator.Valid(); iterator.Next()) MPI_UTILITIES::Unpack(columns(iterator.Cell_Index()),buffer,position,*comm); }
bool ParaCommMpiWorld::iProbe( int* source, int* tag ) { bool flag; MPI::Status mpiStatus; flag = MPI::COMM_WORLD.Iprobe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus); if( flag ) { *source = mpiStatus.Get_source(); *tag = mpiStatus.Get_tag(); TAG_TRACE (Iprobe, From, *source, *tag); } return flag; }
int HPC::MPICommunication::waitAny(Request::Bag& ioRequests) const { Beagle_StackTraceBeginM(); unsigned int lSize = ioRequests.size(); std::vector<MPI::Request> lRequests; lRequests.reserve(lSize); for(unsigned int i = 0; i < lSize; ++i){ lRequests.push_back(ioRequests[i]->mSizeRequest); } MPI::Status lStatus; int lIndex = MPI::Request::Waitany(lSize, &lRequests[0], lStatus); ioRequests[lIndex]->mSizeRequest = lRequests[lIndex]; if(lStatus.Is_cancelled()) return -1; if(ioRequests[lIndex]->mType == Request::RECEPTION){ int lRank = lStatus.Get_source(); std::string lStringTag = ioRequests[lIndex]->mTag + "_str"; int lMsgSize = ioRequests[lIndex]->mSize; MPI::COMM_WORLD.Probe(lRank,hashTag(lStringTag),lStatus); Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lMsgSize); //constructing a string of the right size. std::string lMsg(lMsgSize,' '); MPI::COMM_WORLD.Recv(&lMsg[0], lMsgSize, MPI::CHAR, lRank, hashTag(lStringTag)); #ifdef BEAGLE_HAVE_LIBZ if(mCompressionLevel->getWrappedValue() > 0){ ioRequests[lIndex]->mMessage = new Beagle::String; decompressString(lMsg, ioRequests[lIndex]->mMessage->getWrappedValue()); } else { ioRequests[lIndex]->mMessage = new Beagle::String(lMsg); } #else ioRequests[lIndex]->mMessage = new Beagle::String(lMsg); #endif } else if(ioRequests[lIndex]->mType == Request::SENDING){ ioRequests[lIndex]->mMsgRequest.Wait(); } else { throw RunTimeException("MPI::Communication::waitAll(Request&) the request"+uint2str(lIndex)+ " is invalid",__FILE__,__LINE__); } return lIndex; Beagle_HPC_StackTraceEndM("int HPC::MPICommunication::waitAny(Request::Bag&) const"); }
/*! * \brief Receive message from a specific node rank via MPI * \param outMessage Message receive. * \param inTag Tag associated to the message to be received. * \param inRank Node rank of the sending node. */ void HPC::MPICommunication::receive(std::string& outMessage, const std::string& inTag, int inRank) const { Beagle_StackTraceBeginM(); MPI::Status lStatus; int lSize = 0; MPI::COMM_WORLD.Recv(&lSize, 1, MPI::INT, inRank, hashTag(inTag+"_size")); MPI::COMM_WORLD.Probe(inRank,hashTag(inTag+"_str"),lStatus); Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lSize); outMessage.resize(lSize); MPI::COMM_WORLD.Recv(&outMessage[0], lSize, MPI::CHAR, lStatus.Get_source(), hashTag(inTag+"_str")); #ifdef BEAGLE_HAVE_LIBZ if(mCompressionLevel->getWrappedValue() > 0){ std::string lString; decompressString(outMessage, lString); outMessage = lString; } #endif Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::receive(std::string&, const std::string&, int) const"); }
bool ParaCommMpiWorld::waitToken( int tempRank ) { pthread_mutex_lock(&tokenAccessLock); if( token[0] == myRank ) { pthread_mutex_unlock(&tokenAccessLock); return true; } else { int previousRank = myRank - 1; if( previousRank == 0 ) { if( token[0] != -1 ) { previousRank = comSize - 1; } } int receivedTag; MPI::Status mpiStatus; MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus); receivedTag = mpiStatus.Get_tag(); TAG_TRACE (Probe, From, mpiStatus.Get_source(), receivedTag); if( receivedTag == TagToken ) { receive(token, 2, ParaINT, 0, TagToken); assert(token[0] == myRank); pthread_mutex_unlock(&tokenAccessLock); return true; } else { pthread_mutex_unlock(&tokenAccessLock); return false; } } }
int main(int argc, char * argv[]){ int tag, send_tag;//tag in MPI_Recv int to,from;//destination and source of MPI send/receive int st_count, st_source, st_tag; double start_time = 0.0;//set start and end time for MPI_Wtime() double end_time = 0.0; MPI::Status status; MPI::Init(argc, argv);//start MPI int rank = MPI::COMM_WORLD.Get_rank();//The rank label of the machines int size = MPI::COMM_WORLD.Get_size();//The number of tasks to be done // MPI_Barrier(MPI_COMM_WORLD); int option; opterr = 0; int N = 0; string directory; while ((option = getopt(argc, argv, "d:n:"))!= -1)//getopt parses the parameters of commands, -n is the first n words that occur most frequently in files, -d is the directory which contains the files that need to be parsed. { switch (option) { case 'n': N = atoi(optarg);//the first N words break; case 'd': directory = string(optarg);// parameter of the directory // cout << dir <<endl; break; case '?'://when the parameter of option n is wrong, show the error information if (optopt == 'n') cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl; else if (isprint (optopt)) cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl; else cerr<< "Unknown option character `"<<std::hex<<optopt<<"'."<<endl; } } vector<string> filenames;//use this vector to store file names char buffer[1024]; if(rank == 0)//Machine 0 parses the name of directory and files in the directory. { struct dirent *ptr; DIR *dir; dir = opendir(directory.c_str());//open the directory while((ptr = readdir(dir))!=NULL)//read the name of the directory { if(ptr->d_name[0]=='.') continue; strcpy(buffer,directory.c_str()); strcat(buffer,ptr->d_name); // cout<<buffer<<endl; filenames.push_back(string(buffer));//put the file names of the directory in the vector filenames }; } if(rank == 0)//machine 0 send messages and assign tasks to all the machines, including itself. { start_time = MPI_Wtime();//star time stamp to = 0; send_tag = 0; int round = 0; while(round * size < filenames.size()) { for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++) { sprintf(buffer, "%s", filenames[i].c_str()); // cout << rank << ":"<< "sending " << buffer << endl; MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag);//send filenames to the other machines and let them parse the files, including itself. to++; send_tag++; } tag = MPI::ANY_TAG; from = MPI::ANY_SOURCE; MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);//rank 0 receive parsing result from the rest machines, including itself st_count = status.Get_count(MPI::CHAR); st_source = status.Get_source(); st_tag = status.Get_tag(); string result(""); result = parse(buffer, N); strcpy(buffer,result.c_str()); MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);//rank 0 send message to itself for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++) { tag = MPI::ANY_TAG; from = MPI::ANY_SOURCE; MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status); st_count = status.Get_count(MPI::CHAR); st_source = status.Get_source(); st_tag = status.Get_tag(); // cout << rank <<":" << "received from "<<st_source<<endl<< buffer << endl; cout << buffer << endl; } round++; } for (int i = 1; i < size; ++i) { strcpy(buffer, "Finish"); MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i, 0);//rank 0 send Finish information to the other machines } end_time = MPI_Wtime(); printf("The running time is : %lf \n",end_time-start_time); } else { while(1) { tag = MPI::ANY_TAG; from = MPI::ANY_SOURCE; MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);//receive end information from rank 0 st_count = status.Get_count(MPI::CHAR); st_source = status.Get_source(); st_tag = status.Get_tag(); // cout<<" rank " << rank <<": " << "st_count:"<<st_count<<" st_source"<< st_source << " st_tag "<< st_tag << endl; // cout<<" " << buffer <<endl; if (strcmp(buffer, "Finish") == 0)//if the machine receives the finish information, stop receive and send break; string result(""); result = parse(buffer, N);//parse the file received from rank 0 strcpy(buffer,result.c_str()); MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);//send information back to rank 0 } } // cout << "rank " << rank <<": "<<"I am dying, goodbye!"<<endl; // MPI_Barrier(MPI_COMM_WORLD); MPI::Finalize();//MPI finalize return 0; }
void PSO::Swarm::run_master(int numIt, int vflag, ostream* out, ostream* hist) { double f(-INFINITY); int id(0); int j(0); double* particlePos(NULL); MPI::Status status; int flag; int src; int idle(0); int iter(0); queue<int> evalQueue; for (int i(0); i < swarm.size(); ++i) evalQueue.push(i); if (vflag) cerr << "Sending particles to slaves..." << endl; // initialize slaves for (int k(1); k < mpi_ntasks && (iter < numIt || numIt < 0); ++k) { j = evalQueue.front(); evalQueue.pop(); if (numIt > 0) { // numIt < 0 => evaluate at current position updateVelocity(j); updatePosition(j); } if (vflag) cerr << j << " " << (*swarm[j]) << endl; if (vflag) fprintf(stderr,"Sending particle %d to process %d.\n",j,k); MPI::COMM_WORLD.Send(&j,1,MPI::INT,k,1); MPI::COMM_WORLD.Send(swarm[j]->position.data(),numParams,MPI::DOUBLE,k,1); ++iter; } while (1) { flag = MPI::COMM_WORLD.Iprobe(MPI::ANY_SOURCE,MPI::ANY_TAG,status); if (flag) { // get function value src = status.Get_source(); MPI::COMM_WORLD.Recv(&id,1,MPI::INT,MPI::ANY_SOURCE,MPI::ANY_TAG,status); if (vflag) fprintf(stderr,"Receiving particle %d from process %d.\n",id,src); MPI::COMM_WORLD.Recv(&f,1,MPI::DOUBLE,src,MPI::ANY_TAG,status); // update particle information swarm[id]->value = f; if (f >= swarm[id]->bestValue) { swarm[id]->bestPosition = swarm[id]->position; swarm[id]->bestValue = f; } ++numEvals; if (hist != NULL) { *hist << id << " " << (*swarm[id]) << endl; } // check for new best value if (f >= bestVal) { bestPos = swarm[id]->position; bestVal = f; bestParticle = id; if (out != NULL) { *out << numEvals << " " << bestVal << " "; for (int j(0); j < bestPos.size(); ++j) *out << bestPos[j] << " "; *out << endl; } } if (numIt > 0) { // update velocity and position updateVelocity(id); updatePosition(id); evalQueue.push(id); } // send new work to slave // if (iter < numIt) { if ((iter < numIt || numIt < 0) && ! evalQueue.empty()) { j = evalQueue.front(); evalQueue.pop(); if (vflag) fprintf(stderr,"Sending particle %d to process %d.\n",j,src); MPI::COMM_WORLD.Send(&j,1,MPI::INT,src,1); MPI::COMM_WORLD.Send(swarm[j]->position.data(),numParams,MPI::DOUBLE,src,1); ++iter; } else { ++idle; if (vflag) fprintf(stderr,"Sending done signal to process %d.\n",src); MPI::COMM_WORLD.Send(0,0,MPI::INT,src,0); } if (idle == mpi_ntasks-1) break; } } }
void manager_process(const MPI::Intracomm &comm_world, const int manager_rank, const int worker_size, std::string &maskName, std::string &imgDir, std::string &outDir, bool overwrite) { // first get the list of files to process std::vector<std::string> filenames; std::vector<std::string> seg_output; std::vector<std::string> features_output; uint64_t t1, t0; t0 = cci::common::event::timestampInUS(); getFiles(maskName, imgDir, outDir, filenames, seg_output, features_output, overwrite); t1 = cci::common::event::timestampInUS(); printf("Manager ready at %d, file read took %lu us\n", manager_rank, t1 - t0); comm_world.Barrier(); // now start the loop to listen for messages int curr = 0; int total = filenames.size(); MPI::Status status; int worker_id; char ready; char *input; char *mask; char *output; int inputlen; int masklen; int outputlen; while (curr < total) { usleep(1000); if (comm_world.Iprobe(MPI_ANY_SOURCE, TAG_CONTROL, status)) { /* where is it coming from */ worker_id=status.Get_source(); comm_world.Recv(&ready, 1, MPI::CHAR, worker_id, TAG_CONTROL); // printf("manager received request from worker %d\n",worker_id); if (worker_id == manager_rank) continue; if(ready == WORKER_READY) { // tell worker that manager is ready comm_world.Send(&MANAGER_READY, 1, MPI::CHAR, worker_id, TAG_CONTROL); // printf("manager signal transfer\n"); /* send real data */ inputlen = filenames[curr].size() + 1; // add one to create the zero-terminated string masklen = seg_output[curr].size() + 1; outputlen = features_output[curr].size() + 1; input = new char[inputlen]; memset(input, 0, sizeof(char) * inputlen); strncpy(input, filenames[curr].c_str(), inputlen); mask = new char[masklen]; memset(mask, 0, sizeof(char) * masklen); strncpy(mask, seg_output[curr].c_str(), masklen); output = new char[outputlen]; memset(output, 0, sizeof(char) * outputlen); strncpy(output, features_output[curr].c_str(), outputlen); comm_world.Send(&inputlen, 1, MPI::INT, worker_id, TAG_METADATA); comm_world.Send(&masklen, 1, MPI::INT, worker_id, TAG_METADATA); comm_world.Send(&outputlen, 1, MPI::INT, worker_id, TAG_METADATA); // now send the actual string data comm_world.Send(input, inputlen, MPI::CHAR, worker_id, TAG_DATA); comm_world.Send(mask, masklen, MPI::CHAR, worker_id, TAG_DATA); comm_world.Send(output, outputlen, MPI::CHAR, worker_id, TAG_DATA); curr++; delete [] input; delete [] mask; delete [] output; } if (curr % 100 == 1) { printf("[ MANAGER STATUS ] %d tasks remaining.\n", total - curr); } } } /* tell everyone to quit */ int active_workers = worker_size; while (active_workers > 0) { usleep(1000); if (comm_world.Iprobe(MPI_ANY_SOURCE, TAG_CONTROL, status)) { /* where is it coming from */ worker_id=status.Get_source(); comm_world.Recv(&ready, 1, MPI::CHAR, worker_id, TAG_CONTROL); // printf("manager received request from worker %d\n",worker_id); if (worker_id == manager_rank) continue; if(ready == WORKER_READY) { comm_world.Send(&MANAGER_FINISHED, 1, MPI::CHAR, worker_id, TAG_CONTROL); // printf("manager signal finished\n"); --active_workers; } } } }
int main(int argc, char * argv[]){ int tag, send_tag; int to,from; int st_count, st_source, st_tag; double start_time = 0.0; double end_time = 0.0; MPI::Status status; MPI::Init(argc, argv); int rank = MPI::COMM_WORLD.Get_rank(); int size = MPI::COMM_WORLD.Get_size(); MPI_Barrier(MPI_COMM_WORLD); start_time = MPI_Wtime(); int option; opterr = 0; int N = 0; string web_file; while ((option = getopt(argc, argv, "l:n:"))!= -1) { switch (option) { case 'n': N = atoi(optarg); break; case 'l': web_file = string(optarg); break; case '?': if (optopt == 'n') cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl; else if (isprint (optopt)) cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl; else cerr<< "Unknown option character `"<<std::hex<<optopt<<"'."<<endl; } } vector<string> URLs; char buffer[1024]; string line; system("rm -fr /tmp/xiw412/"); system("mkdir /tmp/xiw412/"); if(rank == 0) { fstream fread_file(web_file.c_str(), ios::in); while (getline(fread_file, line)){ URLs.push_back(line); } } if(rank == 0) { to = 0; send_tag = 0; int round = 0; while(round * size < URLs.size()) { for(int i = round * size; i < (round + 1) * size && i < URLs.size(); i++) { sprintf(buffer, "%s", URLs[i].c_str()); cout << rank << ":"<< "sending " << buffer << endl; MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag); to++; send_tag++; } tag = MPI::ANY_TAG; from = MPI::ANY_SOURCE; MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status); st_count = status.Get_count(MPI::CHAR); st_source = status.Get_source(); st_tag = status.Get_tag(); string result(""); result = parse(buffer, N); strcpy(buffer,result.c_str()); MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag); for(int i = round * size; i < (round + 1) * size && i < URLs.size(); i++) { tag = MPI::ANY_TAG; from = MPI::ANY_SOURCE; MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status); st_count = status.Get_count(MPI::CHAR); st_source = status.Get_source(); st_tag = status.Get_tag(); cout << rank <<":" << "received from "<<st_source<<endl<< buffer << endl; } round++; } for (int i = 1; i < size; ++i) { strcpy(buffer, "Finish"); MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i, 0); } } else { while(1) { tag = MPI::ANY_TAG; from = MPI::ANY_SOURCE; MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status); st_count = status.Get_count(MPI::CHAR); st_source = status.Get_source(); st_tag = status.Get_tag(); // cout<<" rank " << rank <<": " << "st_count:"<<st_count<<" st_source"<< st_source << " st_tag "<< st_tag << endl; // cout<<" " << buffer <<endl; if (strcmp(buffer, "Finish") == 0) break; string result(""); result = parse(buffer, N); strcpy(buffer,result.c_str()); MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag); } } cout << "rank " << rank <<": "<<"I am dying, goodbye!"<<endl; MPI_Barrier(MPI_COMM_WORLD); end_time = MPI_Wtime(); printf("The running time is : %lf \n",end_time-start_time); MPI::Finalize(); return 0; }
int main ( int argc, char *argv[] ) //****************************************************************************80 // // Purpose: // // MAIN is the main program for MONTE_CARLO. // // Discussion: // // MONTE_CARLO illustrates the use of MPI with a Monte Carlo algorithm. // // Generate N random points in the unit square. Count M, the number // of points that are in the quarter circle. Then PI is approximately // equal to the ratio 4 * M / N. // // It's important that each processor use DIFFERENT random numbers. // One way to ensure this is to have a single master processor // generate all the random numbers, and then divide them up. // // (A second way, not explored here, is simply to ensure that each // processor uses a different seed, either chosen by a master processor, // or generated from the processor ID.) // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 26 February 2007 // // Author: // // John Burkardt // // Reference: // // William Gropp, Ewing Lusk, Anthony Skjellum, // Using MPI: Portable Parallel Programming with the // Message-Passing Interface, // Second Edition, // MIT Press, 1999, // ISBN: 0262571323. // { double calculatedPi; int dest; int done; double error; int i; int id; int in; int max; MPI::Status mesgStatus; int num_procs; int out; int point_max = 1000000; int randServer; int randNums[CHUNKSIZE]; int ranks[1]; int request; int temp; double tolerance; int totalin; int totalout; MPI::Group worker_group; MPI::Intracomm worker_comm; MPI::Group world_group; double x; double y; // // Initialize MPI. // MPI::Init ( argc, argv ); // // Get the number of processors. // num_procs = MPI::COMM_WORLD.Get_size ( ); // // Get the rank of this processor. // id = MPI::COMM_WORLD.Get_rank ( ); if ( id == 0 ) { timestamp ( ); cout << "\n"; cout << "MONTE_CARLO - Master process:\n"; cout << " C++ version\n"; cout << " Estimate pi by the Monte Carlo method, using MPI.\n"; cout << "\n"; cout << " Compiled on : " << __DATE__ << " at " << __TIME__ << ".\n"; cout << "\n"; cout << " The number of processes is " << num_procs << ".\n"; cout << "\n"; cout << " Points in the unit square will be tested\n"; cout << " to see if they lie in the unit quarter circle.\n"; } // // Pretend that the tolerance TOLERANCE is supplied externally // to the master process, which must then broadcast it to all // other processes. // if ( id == 0 ) { tolerance = 0.0001; cout << "\n"; cout << " The method will continue to improve the estimate until:\n"; cout << " PI is computed to within a tolerance = " << tolerance << "\n"; cout << " or the number of points examined reaches " << point_max << ".\n"; } MPI::COMM_WORLD.Bcast ( &tolerance, 1, MPI::DOUBLE_PRECISION, 0 ); cout << " Process " << id << " is active.\n"; // // Start by getting the group corresponding to the world communicator. // world_group = MPI::COMM_WORLD.Get_group ( ); // // Put SERVER on the list of processes to exclude, and create the new // worker group. // randServer = num_procs-1; ranks[0] = randServer; worker_group = world_group.Excl ( 1, ranks ); // // Use the worker group to create the new worker communicator. // worker_comm = MPI::COMM_WORLD.Create ( worker_group ); // // Since we only needed the worker group to create the worker // communicator, we can free the worker group now. // worker_group.Free ( ); // // Here is where the computation is carried out. // // // I am the rand server. // if ( id == randServer ) { # if RANDOM_SEED struct timeval time; gettimeofday ( &time, 0 ); // // Initialize the random number generator // srandom ( (int)(time.tv_usec*1000000+time.tv_sec) ); # endif do { MPI::COMM_WORLD.Recv ( &request, 1, MPI::INT, MPI::ANY_SOURCE, NEED_NUMBERS, mesgStatus ); if ( request ) { for ( i = 0; i < CHUNKSIZE; i++) { randNums[i] = random(); } dest = mesgStatus.Get_source ( ); MPI::COMM_WORLD.Send ( randNums, CHUNKSIZE, MPI::INT, dest, RANDOM_NUMBERS ); } } while ( 0 < request ); } // // I am a worker process. // else { request = 1; done = 0; in = 0; out = 0; max = 2147483647; // // Find the maximum integer for normalization. // MPI::COMM_WORLD.Send ( &request, 1, MPI::INT, randServer, NEED_NUMBERS ); // // Request a string of random numbers. // while ( !done ) { request = 1; MPI::COMM_WORLD.Recv ( randNums, CHUNKSIZE, MPI::INT, randServer, RANDOM_NUMBERS, mesgStatus ); for ( i = 0; i < CHUNKSIZE; ) { x = ( ( float ) randNums[i++] ) / max; y = ( ( float ) randNums[i++] ) / max; if ( x * x + y * y < 1.0E+00 ) { in++; } else { out++; } } // // Total the number of points that are within the circle. // temp = in; worker_comm.Reduce ( &temp, &totalin, 1, MPI::INT, MPI::SUM, 0 ); // // Total the number of points that are outside the circle. // temp = out; worker_comm.Reduce ( &temp, &totalout, 1, MPI::INT, MPI::SUM, 0 ); if ( id == 0 ) { calculatedPi = ( 4.0E+00 * totalin ) / ( totalin + totalout ); error = fabs ( calculatedPi - 3.141592653589793238462643E+00 ); done = ( error < tolerance ) || point_max < ( totalin + totalout ); cout << "pi = " << setw(24) << setprecision(16) << calculatedPi << "\n"; if ( done ) { request = 0; } else { request = 1; } MPI::COMM_WORLD.Send ( &request, 1, MPI::INT, randServer, NEED_NUMBERS ); worker_comm.Bcast ( &done, 1, MPI::INT, 0 ); } else { worker_comm.Bcast ( &done, 1, MPI::INT, 0 ); if ( !done ) { request = 1; MPI::COMM_WORLD.Send ( &request, 1, MPI::INT, randServer, NEED_NUMBERS ); } } } } if ( id == 0 ) { cout << "\n"; cout << "Number of points: " << totalin + totalout << "\n"; cout << "Number inside: " << totalin << "\n"; cout << "Number outside: " << totalout << "\n"; } // // Terminate MPI. // MPI::Finalize ( ); // // Terminate. // if ( id == 0 ) { cout << "\n"; cout << "MONTE_CARLO - Master process:\n"; cout << " Normal end of execution.\n"; cout << "\n"; timestamp ( ); } return 0; }
void PPS::start(){ //Define parameters struct for mpi //Refer to this as an example http://lists.mcs.anl.gov/pipermail/mpich-discuss/2009-April/004880.html MPI::Datatype MPIPPSTRUCT; int blockcounts[2]; MPI::Aint offsets[2]; MPI::Datatype datatypes[2]; MPI::Aint extent,lb; blockcounts[0] = 9; //Number of ints blockcounts[1] = 13; //number of __fpv datatypes[0] = MPI::INT; datatypes[1] = MPIFPV; offsets[0] = 0; MPI::INT.Get_extent(lb, extent); offsets[1] = blockcounts[0] * extent; MPIPPSTRUCT = MPIPPSTRUCT.Create_struct(2,blockcounts,offsets, datatypes); MPIPPSTRUCT.Commit(); if(PPS::pid == 0){ struct parameters temp; int start,i,countdown = PPS::comm_size-1; bool ready = false; MPI::Status status; //Logs std::ofstream logsfile; logsfile.open("tslogs.txt", std::fstream::out | std::fstream::trunc); while(true){ if(countdown == 0) break; //Check first ready-to-compute process MPI::COMM_WORLD.Recv(&ready, 1, MPI::BOOL, MPI_ANY_SOURCE, 0, status); //Logs logsfile << "Remaining sims: " << PPS::plist.size() << " process countdown: " << countdown << std::endl; //Send a 0 status to all the process to stop if(ready){ if(PPS::plist.size() == 0 ){ start = EXIT_PROCESS; MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0); countdown = countdown - 1; }else{ //Prepare him to receive the params and start the sim (an int that contains the simulation number (-1 = exit)) start = PPS::plist.size() - 1; MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0); temp = PPS::plist.back(); //temp.N = status.Get_source() * 10; //Deploy the parameterer struct MPI::COMM_WORLD.Send(&temp, 1, MPIPPSTRUCT, status.Get_source(), 0); //Pullout the parameter struct from the list plist.pop_back(); } } ready = false; } logsfile.close(); }else{ int status; bool ready = true; struct parameters recvparams; while(true){ status == EXIT_PROCESS; //Send with a point to point that you are free MPI::COMM_WORLD.Send(&ready, 1, MPI::BOOL, 0, 0); //receive status value to exit or to receive a new params struct to start new sim MPI::COMM_WORLD.Recv(&status, 1, MPI::INT, 0, 0); if(status != EXIT_PROCESS){ //wait to receive parameters //std::this_thread::sleep_for(std::chrono::seconds(PPS::pid)); MPI::COMM_WORLD.Recv(&recvparams, 1, MPIPPSTRUCT, 0, 0); //Start sim //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl; //std::cout << "SAY HI: "<< PPS::pid << std::endl; //print_params(recvparams); //std::cout << "STARTING REAL SIM"<< std::endl; PottsSim(recvparams,"output/"+ std::to_string(PPS::pid) + "_proc_output.dat", status); //old_code( PPS::pid ); //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl; }else{ std::cout << "I'm the process "<< PPS::pid << ", ready to die." << std::endl; break; } } } MPIPPSTRUCT.Free(); }
int main(int argc, char* argv[]) { int pid; //For rank of current process int no_of_process; //To find the total number of processes int size; //Size of processes to be allocated for each process. //Initializing the MPI environment MPI::Init ( argc, argv ); //Getting the number of processes no_of_process = MPI::COMM_WORLD.Get_size(); //Handling if run as a single application. if(no_of_process<2){ cout<<"\n ERROR: You'll need atleast 2 processes to run this application.\n\n"; MPI_Finalize(); return 0; } //argv[1] - PERCENT OF KEYWORDS REQUIRED ; argv[2] - FOLDER PATH if(!argv[1] || !argv[2]){ cout<<"\n\n Parameter not provided. Quitting\n"; MPI_Finalize(); return 0; } //Get the process ID pid = MPI::COMM_WORLD.Get_rank(); // Process ID 0 => Initial Process if(pid==0){ queue<string> que; que.push(string(argv[2],strlen(argv[2]))); /********* INITIAL STRUCTURE TO HAVE SOME VALUES IN THE QUEUE ***************/ string dir = que.front(); que.pop(); DIR *dp; struct dirent *dirp; if((dp = opendir(dir.c_str())) == NULL) { cout << "Error(" << errno << ") opening " << dir << endl; return errno; } while ((dirp = readdir(dp)) != NULL) { if(((string)dirp->d_name).compare(".")==0||((string)dirp->d_name).compare("..")==0){ continue; } que.push(dir+"/"+string(dirp->d_name)); //If only this statement is present, we push all the files into the queue } closedir(dp); /********* INITIAL STRUCTURE TO HAVE SOME VALUES IN THE QUEUE ***************/ while(!que.empty()){ // ======== FUNCTION TO PRINT QUEUE VALUES ======== queue<string> que3; que3=que; cout<<"\n\n PARENT Queue : "<<endl; //Temp function to print the value of the queue while(!que3.empty()){ cout<<que3.front()<<endl; que3.pop(); } //Allocate work to processes equally. int i=0; size=1; //By default, allocating one directory per process string buf; //Buffer to send the folders to the subordinate processes if(que.size()>(no_of_process-1)){ size=ceil((float)que.size()/(no_of_process-1)); } /************* PARENT SENDER PROCESS ***********************/ /************* ===================== ***********************/ while(!que.empty() && i<=no_of_process-1){ int j=0; buf=""; while(j<size && !que.empty()){ buf+=que.front(); que.pop(); buf+=";"; j++; } // MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag) MPI::COMM_WORLD.Send(buf.c_str(), buf.length(), MPI::CHAR, i+1, i+1); i++; } /************* PARENT RECEIVER PROCESS ***********************/ /************* ======================= ***********************/ while(i>0){ // cout<<"\n\n Process 0 Waiting to receive from child"; MPI::Status status; //Probe for values first MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status); int l = status.Get_count(MPI::CHAR); char *buf = new char[l]; const auto sender = status.Get_source(); const auto tag = status.Get_tag(); //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status) MPI::COMM_WORLD.Recv(buf, l, MPI::CHAR, sender, tag, status); string fname(buf, l); delete [] buf; vector<string> fnames; boost::split(fnames, fname, boost::is_any_of(";")); for(int k=0;k<fnames.size();k++){ if(fnames[k].length()) que.push(fnames[k]); } i--; } } vector<int> processes_with_files; //Vector to store only the files with ranks set<string> queue_values; vector<string> vec_queue_values; /************* IF QUEUE EMPTY, PROCEED TO QUERY PROCESSING ***********************/ /************* =========================================== ***********************/ if(que.empty()){ //Message asking children to send their file availability string send_rank_message="SEND IF YOU HAVE"; //Send message to children to send if they have files with them for(int rank_values=1;rank_values<no_of_process;rank_values++){ //MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag) MPI::COMM_WORLD.Send(send_rank_message.c_str(), send_rank_message.length(), MPI::CHAR, rank_values, rank_values); } //Values for reception int rank_received[no_of_process]; rank_received[0]=0; //Parent process - So excluding it. for(int rank_values=1;rank_values<no_of_process;rank_values++){ //For probe status store MPI::Status status; //Probe for incoming values MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status); //Get source and tag const auto sender = status.Get_source(); const auto tag = status.Get_tag(); //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status) MPI::COMM_WORLD.Recv(&rank_received[sender], 1, MPI::INT, sender, tag, status); } //String for rank values to be sent to all the child processes string processes_with_files_str=""; //Storing the rank of processes that have files for(int i=1;i<no_of_process;i++){ if(rank_received[i]==1){ processes_with_files_str+= to_string(i) + ";"; processes_with_files.push_back(i); } } string process_list_message = "ABOUT TO SEND PROCESS VALUES"; for(int i=0;i<processes_with_files.size();i++){ //MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag) MPI::COMM_WORLD.Send(process_list_message.c_str(), process_list_message.length(), MPI::CHAR, processes_with_files[i], processes_with_files[i]); MPI::COMM_WORLD.Send(processes_with_files_str.c_str(), processes_with_files_str.length(), MPI::CHAR, processes_with_files[i], processes_with_files[i]); // cout<<"\n\n Parent has sent the value!\n"; } }//End of queue empty condition int val_recv; //Expecting reply from all child processes for(int i=0;i<processes_with_files.size();i++){ MPI::Status status; //Probe for incoming values MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status); //Get source and tag auto sender = status.Get_source(); auto tag = status.Get_tag(); //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status) MPI::COMM_WORLD.Recv(&val_recv, 1, MPI::INT, sender, tag, status); } while(1){ int choice; string task_message; char whatfile[400]; cout<<"\n\n Graph Processed. What do you want to do now? \n 1. Find all the files related to another file\n 2. Find the Transitive Closure of a file\n 3. Exit\n 4. Choice : "; cin>>choice; cin.ignore (std::numeric_limits<std::streamsize>::max(), '\n'); switch(choice) { case 1: cout<<"\n Enter the file name : "; cin.getline(whatfile,400); task_message=string(whatfile,strlen(whatfile))+";Related Files"; cout<<"\n"<<task_message; break; case 2: cout<<"\n Enter the file name you wish to find the transitive closure for : "; cin.getline(whatfile,400); task_message=string(whatfile,strlen(whatfile))+";Transitive Closure;Just Tell"; queue_values.insert(whatfile); vec_queue_values.push_back(whatfile); break; case 3: task_message="EXIT NOW"; default:; } for(int rank_values=1;rank_values<no_of_process;rank_values++){ //MPI::Comm::Send(const void* buf, int count, MPI::Datatype& datatype, int dest, int tag) MPI::COMM_WORLD.Send(task_message.c_str(), task_message.length(), MPI::CHAR, rank_values, rank_values); } if(choice==3){ cout<<"\n PARENT : QUITTING. BYE!"; break; } else if (choice==2){ int send_flag=1; while(send_flag) { send_flag=0; char* char_value=NULL; int char_length; // cout<<"\n\n ********************************** Value Sent for Transitive closure!"; for(int i=0;i<vec_queue_values.size();i++){ MPI::Status status; //Probe for incoming values MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status); //Get source and tag char_length = status.Get_count(MPI::CHAR); char_value = new char[char_length]; auto sender = status.Get_source(); auto tag = status.Get_tag(); //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status) // cout<<"\n\n Parent waiting to receive!\n\n "; MPI::COMM_WORLD.Recv(char_value, char_length, MPI::CHAR, sender, tag, status); } string recd_string(char_value,char_length); delete [] char_value; vector<string> recd_file_vector; string send_string_val=""; //Clear the vector queue value vec_queue_values.clear(); boost::split(recd_file_vector, recd_string, boost::is_any_of(";")); for(int i=0;i<recd_file_vector.size();i++){ if(recd_file_vector[i].length()){ if(queue_values.find(recd_file_vector[i])==queue_values.end()){ send_flag=1; queue_values.insert(recd_file_vector[i]); vec_queue_values.push_back(recd_file_vector[i]); send_string_val += recd_file_vector[i] + ";"; } } } send_string_val += "Transitive Closure;Find One"; if(send_flag){ for(int rank_values=1;rank_values<no_of_process;rank_values++){ // cout<<"\n\n Sending value to "<<rank_values; MPI::COMM_WORLD.Send(send_string_val.c_str(), send_string_val.length(), MPI::CHAR, rank_values, rank_values); } } else{ cout<<"\n\n Connected File Names : \n"; queue_values.erase(whatfile); for(auto x: queue_values){ cout<<x<<"\n"; } queue_values.clear(); vec_queue_values.clear(); } } } else{ MPI::Status status; //Probe for incoming values MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, status); //Get source and tag auto sender = status.Get_source(); auto tag = status.Get_tag(); //MPI::Comm::Recv(void* buf, int count, MPI::Datatype& datatype, int source, int tag, MPI::Status* status) // cout<<"\n\n Parent waiting to receive!\n\n "; MPI::COMM_WORLD.Recv(&val_recv, 1, MPI::INT, sender, tag, status); // cout<<"\n Parent received!"; } } // End of While Loop } //END OF PROCESS 0
void *mpi_thread(void *arg) { struct State *state = (struct State *)arg; int buf; MPI::Status status; set<int> queue; bool inside = false; while (1) { MPI::COMM_WORLD.Recv(&buf, 1, MPI::INT, MPI::ANY_SOURCE, MPI::ANY_TAG, status); state->lamport = max(state->lamport, buf) + 1; switch (status.Get_tag()) { case INSIDE_TAG: // enter/exit if (!inside) { for (int i = 0; i < state->size; i++) { if (i != state->rank) { MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, i, REQUEST_TAG); } } int request_clock = state->lamport; int replies_received = 0; while (replies_received < state->size - 1) { MPI::COMM_WORLD.Recv(&buf, 1, MPI::INT, MPI::ANY_SOURCE, MPI::ANY_TAG, status); state->lamport = max(state->lamport, buf) + 1; switch (status.Get_tag()) { case REQUEST_TAG: if (request_clock < buf || (buf == request_clock && state->rank < status.Get_source())) { // current process has higher priority queue.insert(status.Get_source()); } else { // other process has higher priority MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, status.Get_source(), AGREE_TAG); } break; case AGREE_TAG: if (buf > request_clock) { replies_received++; log(state, "comm: Agree %d received from %d", buf, status.Get_source()); } break; default: log(state, "comm: Unknown message tag %d", status.Get_tag()); } } inside = true; unique_lock<mutex> lck(state->mtx); state->ready = true; state->cv.notify_all(); lck.unlock(); } else { // broadcast agree to all in queue char *repr = (char *)malloc(1024); *repr = '\0'; for (int p : queue) { sprintf(repr + strlen(repr), "%d, ", p); } state->lamport++; log(state, "comm: !!! LEFT, %s", repr); free(repr); for (int p : queue) { MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, p, AGREE_TAG); } queue.clear(); inside = false; } break; case REQUEST_TAG: if (inside) { queue.insert(status.Get_source()); } else { MPI::COMM_WORLD.Send(&state->lamport, 1, MPI::INT, status.Get_source(), AGREE_TAG); state->lamport++; } break; case AGREE_TAG: break; default: log(state, "comm: Unknown message tag %d", status.Get_tag()); } } }
int main ( int argc, char *argv[] ) //****************************************************************************80 // // Purpose: // // MAIN is the main program for SEARCH. // // Discussion: // // SEARCH demonstrates the use of MPI routines to carry out a search // // An array of given size is to be searched for occurrences of a // specific value. // // The search is done in parallel. A master process generates the // array and the target value, then distributes the information among // a set of worker processes, and waits for them to communicate back // the (global) index values at which occurrences of the target value // were found. // // An interesting feature of this program is the use of allocatable // arrays, which allows the master program to set aside just enough // memory for the whole array, and for each worker program to set aside // just enough memory for its own part of the array. // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 01 September 2009 // // Author: // // John Burkardt // // Reference: // // William Gropp, Ewing Lusk, Anthony Skjellum, // Using MPI: Portable Parallel Programming with the // Message-Passing Interface, // Second Edition, // MIT Press, 1999, // ISBN: 0262571323. // { int *a; int dest; float factor; int global; int i; int id; int ierr; int n; int npart; int p; int source; int start; MPI::Status status; int tag; int tag_target = 1; int tag_size = 2; int tag_data = 3; int tag_found = 4; int tag_done = 5; int target; int workers_done; int x; // // Initialize MPI. // MPI::Init ( argc, argv ); // // Get this processes's rank. // id = MPI::COMM_WORLD.Get_rank ( ); // // Find out how many processes are available. // p = MPI::COMM_WORLD.Get_size ( ); if ( id == 0 ) { timestamp ( ); cout << "\n"; cout << "SEARCH - Master process:\n"; cout << " C++ version\n"; cout << " An example MPI program to search an array.\n"; cout << "\n"; cout << " Compiled on " << __DATE__ << " at " << __TIME__ << ".\n"; cout << "\n"; cout << " The number of processes is " << p << "\n"; } cout << "\n"; cout << "Process " << id << " is active.\n"; // // Have the master process generate the target and data. In a more // realistic application, the data might be in a file which the master // process would read. Here, the master process decides. // if ( id == 0 ) { // // Pick the number of data items per process, and set the total. // factor = ( float ) rand ( ) / ( float ) RAND_MAX; npart = 50 + ( int ) ( factor * 100.0E+00 ); n = npart * p; cout << "\n"; cout << "SEARCH - Master process:\n"; cout << " The number of data items per process is " << npart << "\n"; cout << " The total number of data items is " << n << ".\n"; // // Now allocate the master copy of A, fill it with values, and pick // a value for the target. // a = new int[n]; factor = ( float ) n / 10.0E+00 ; for ( i = 0; i < n; i++ ) { a[i] = ( int ) ( factor * ( float ) rand ( ) / ( float ) RAND_MAX ); } target = a[n/2]; cout << " The target value is " << target << ".\n"; // // The worker processes need to have the target value, the number of data items, // and their individual chunk of the data vector. // for ( i = 1; i <= p-1; i++ ) { dest = i; tag = tag_target; MPI::COMM_WORLD.Send ( &target, 1, MPI::INT, dest, tag ); tag = tag_size; MPI::COMM_WORLD.Send ( &npart, 1, MPI::INT, dest, tag ); start = ( i - 1 ) * npart; tag = tag_data; MPI::COMM_WORLD.Send ( a+start, npart, MPI::INT, dest, tag ); } // // Now the master process simply waits for each worker process to report that // it is done. // workers_done = 0; while ( workers_done < p-1 ) { MPI::COMM_WORLD.Recv ( &x, 1, MPI::INT, MPI::ANY_SOURCE, MPI::ANY_TAG, status ); source = status.Get_source ( ); tag = status.Get_tag ( ); if ( tag == tag_done ) { workers_done = workers_done + 1; } else if ( tag == tag_found ) { cout << "P" << source << " " << x << " " << a[x] << "\n"; } else { cout << " Master process received message with unknown tag = " << tag << ".\n"; } } // // The master process can throw away A now. // delete [] a; } // // Each worker process expects to receive the target value, the number of data // items, and the data vector. // else { source = 0; tag = tag_target; MPI::COMM_WORLD.Recv ( &target, 1, MPI::INT, source, tag, status ); source = 0; tag = tag_size; MPI::COMM_WORLD.Recv ( &npart, 1, MPI::INT, source, tag, status ); a = new int[npart]; source = 0; tag = tag_data; MPI::COMM_WORLD.Recv ( a, npart, MPI::INT, source, tag, status ); // // The worker simply checks each entry to see if it is equal to the target // value. // for ( i = 0; i < npart; i++ ) { if ( a[i] == target ) { global = ( id - 1 ) * npart + i; dest = 0; tag = tag_found; MPI::COMM_WORLD.Send ( &global, 1, MPI::INT, dest, tag ); } } // // When the worker is finished with the loop, it sends a dummy data value with // the tag "TAG_DONE" indicating that it is done. // dest = 0; tag = tag_done; MPI::COMM_WORLD.Send ( &target, 1, MPI::INT, dest, tag ); delete [] ( a ); } // // Terminate MPI. // MPI::Finalize ( ); // // Terminate. // if ( id == 0 ) { cout << "\n"; cout << "SEARCH - Master process:\n"; cout << " Normal end of execution.\n"; cout << "\n"; timestamp ( ); } return 0; }
int main(int argc, char** argv) { //_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF ); //_CrtMemState s1; //_CrtMemCheckpoint( &s1 ); /* run multiple trials of Develep input: trial text file first column: number of trials to run second column: parameter file for trials third column: data for trials */ string trialsetup(argv[1]); // cout << "trialsetup: " + trialsetup + "\n"; int totaltrials = 0; vector<int> trialset; vector<string> paramfile; vector<string> datafile; ifstream fs(trialsetup); getTrialSetup(fs,totaltrials,trialset,paramfile,datafile); int numsent=0; //MPI stuff int master=0; int ierr; MPI::Init(); int numprocs = MPI::COMM_WORLD.Get_size(); int myid = MPI::COMM_WORLD.Get_rank(); //cout << "I am process " + to_string(static_cast<long long>(myid)) + " of " + to_string(static_cast<long long>(numprocs)) + "\n"; MPI::Status status; //const char * pbuff,dbuff; try { if (myid==master){ //cout << "total trials: " + to_string(static_cast<long long>(totaltrials)) + "\n"; //cout << "In master loop\n"; cout << "Running trials of ellenGP: \n Number of trials: " + to_string(static_cast<long long>(totaltrials)) +"\n Number of processors: " + to_string(static_cast<long long>(numprocs)) + "\n"; // schedule tasks from master node for (int i=0;i<min(numprocs-1,totaltrials);i++){ //cout << "sending " + paramfile.at(i) + " to process " + to_string(static_cast<long long>(i)) + "\n"; MPI::COMM_WORLD.Send(paramfile.at(i).c_str(),paramfile.at(i).length(),MPI::CHAR,i+1,i+1); //cout << "sending " + datafile.at(i) + " to process " + to_string(static_cast<long long>(i)) + "\n"; MPI::COMM_WORLD.Send(datafile.at(i).c_str(),datafile.at(i).length(),MPI::CHAR,i+1,i+1); numsent++; //cout << "numsent: " + to_string(static_cast<long long>(numsent)) + "\n"; } //int curnumsent=numsent; int stops =0; while(numsent<=totaltrials && stops<numprocs-1){ int ans; MPI::COMM_WORLD.Recv(&ans,1,MPI::INT,MPI::ANY_SOURCE,MPI::ANY_TAG,status); const int sender = status.Get_source(); //int anstype = status.Get_tag(); if (numsent < totaltrials){ MPI::COMM_WORLD.Send(paramfile.at(numsent).c_str(),paramfile.at(numsent).length(),MPI::CHAR,sender,numsent+1); MPI::COMM_WORLD.Send(datafile.at(numsent).c_str(),datafile.at(numsent).length(),MPI::CHAR,sender,numsent+1); ++numsent; } else{ //cout << "sending stop command to process " + to_string(static_cast<long long>(sender)) + "\n"; MPI::COMM_WORLD.Send(MPI::BOTTOM,0,MPI::CHAR,sender,0); ++stops; } } cout << "out of master while loop\n"; } else{ //cout << "in slave task \n"; // receive tasks and send completion messages to master //cout << "in slave task. myid is " + to_string(static_cast<long long>(myid)) + " and totaltrials is " + to_string(static_cast<long long>(totaltrials)) + "\n"; bool cont = true; while (cont){ if (myid <= totaltrials){ //char * pbuff,dbuff; //cout << "probe master status\n"; MPI::COMM_WORLD.Probe(master, MPI::ANY_TAG, status); int l1 = status.Get_count(MPI::CHAR); char * pbuff = new char[l1]; //cout << "Receive packet\n"; MPI::COMM_WORLD.Recv(pbuff,l1,MPI::CHAR,master, MPI::ANY_TAG,status); //cout << "received pbuff value: " + string(pbuff) + "\n"; if(status.Get_tag() !=0 ){ MPI::COMM_WORLD.Probe(master, MPI::ANY_TAG, status); int l2 = status.Get_count(MPI::CHAR); char * dbuff = new char[l2]; MPI::COMM_WORLD.Recv(dbuff,l2,MPI::CHAR,master, MPI::ANY_TAG,status); //cout << "received dbuff value: " + string(dbuff) + "\n"; if(status.Get_tag() !=0 ){ int tag = status.Get_tag(); string pfile(pbuff,l1); string dfile(dbuff,l2); cout << "running process " + to_string(static_cast<long long>(tag)) + " of " + to_string(static_cast<long long>(totaltrials)) + " on processor " + to_string(static_cast<long long>(myid)) + " : " + pfile.substr(pfile.rfind('/')+1,pfile.size()) + ", " + dfile.substr(dfile.rfind('/')+1,dfile.size()) + "\n"; //run develep runEllenGP(pfile,dfile,1,myid); //cout << "hello\n"; cout << "\nfinished process " + to_string(static_cast<long long>(tag)) + " of " + to_string(static_cast<long long>(totaltrials)) + " on processor " + to_string(static_cast<long long>(myid)) + " : " + pfile.substr(pfile.rfind('/')+1,pfile.size()) + ", " + dfile.substr(dfile.rfind('/')+1,dfile.size()) + "\n"; // send message when finished int tmp = 1; MPI::COMM_WORLD.Send(&tmp,1,MPI::INT,master,myid); } else{ //cout << "status tag is zero on process " + to_string(static_cast<long long>(myid)) + "\n"; cont=false; } delete [] dbuff; } else{ //cout << "status tag is zero on process " + to_string(static_cast<long long>(myid)) + "\n"; cont=false; } delete [] pbuff; } } } MPI::Finalize(); char key; if(myid==master) cout << "All trials completed. Exiting..." << endl; //key = getchar(); } catch(const std::bad_alloc&) { cout << "bad allocation error from processor " << to_string(static_cast<long long>(myid)) << "\n"; exit(1); } catch(exception& er) { cout << "Error: " << er.what() << endl; exit(1); } catch(...) { cout << "Exception Occurred."<<endl; exit(1); } return 0; }
void DataBus::createDynamicTypes(int bodyNum) { LOG_DEBUG("Building dynamic MPI types for fast node sync"); auto& engine = Engine::getInstance(); GCMDispatcher* dispatcher = engine.getDispatcher(); Body* body = engine.getBody(bodyNum);//ById( engine.getDispatcher()->getMyBodyId() ); TetrMeshSecondOrder* mesh = (TetrMeshSecondOrder*)body->getMeshes(); // TODO add more cleanup code here to prevent memory leaks if (MPI_NODE_TYPES != NULL) { LOG_TRACE("Cleaning old types"); for (int i = 0; i < numberOfWorkers; i++) { for (int j = 0; j < numberOfWorkers; j++) { LOG_TRACE("Cleaning type " << i << " " << j ); LOG_TRACE("Size " << i << " " << j << " = " << local_numbers[i][j].size()); if (local_numbers[i][j].size() > 0) MPI_NODE_TYPES[i][j].Free(); } } delete[] MPI_NODE_TYPES; } if (local_numbers != NULL) { for (int i = 0; i < numberOfWorkers; i++) delete[] local_numbers[i]; delete[] local_numbers; } // FIXME - it's overhead local_numbers = new vector<int>*[numberOfWorkers]; vector<int> **remote_numbers = new vector<int>*[numberOfWorkers]; MPI_NODE_TYPES = new MPI::Datatype*[numberOfWorkers]; for (int i = 0; i < numberOfWorkers; i++) { local_numbers[i] = new vector<int>[numberOfWorkers]; remote_numbers[i] = new vector<int>[numberOfWorkers]; MPI_NODE_TYPES[i] = new MPI::Datatype[numberOfWorkers]; } BARRIER("DataBus::createDynamicTypes#0"); // find all remote nodes for (int j = 0; j < mesh->getNodesNumber(); j++) { CalcNode& node = mesh->getNodeByLocalIndex(j); if ( node.isRemote() ) { //LOG_DEBUG("N: " << j); //LOG_DEBUG("R1: " << j << " " << mesh->getBody()->getId()); int owner = dispatcher->getOwner(node.coords/*, mesh->getBody()->getId()*/); //LOG_DEBUG("R2: " << owner); assert_ne(owner, rank ); local_numbers[rank][owner].push_back( mesh->nodesMap[node.number] ); remote_numbers[rank][owner].push_back(node.number); } } BARRIER("DataBus::createDynamicTypes#1"); LOG_DEBUG("Requests prepared:"); for (int i = 0; i < numberOfWorkers; i++) for (int j = 0; j < numberOfWorkers; j++) LOG_DEBUG("Request size from #" << i << " to #" << j << ": " << local_numbers[i][j].size()); // sync types unsigned int max_len = 0; for (int i = 0; i < numberOfWorkers; i++) for (int j = 0; j < numberOfWorkers; j++) if (local_numbers[i][j].size() > max_len) max_len = local_numbers[i][j].size(); vector<int> lengths; for (unsigned int i = 0; i < max_len; i++) lengths.push_back(1); int info[3]; vector<MPI::Request> reqs; for (int i = 0; i < numberOfWorkers; i++) for (int j = 0; j < numberOfWorkers; j++) if (local_numbers[i][j].size() > 0) { info[0] = remote_numbers[i][j].size(); info[1] = i; info[2] = j; MPI_NODE_TYPES[i][j] = MPI_ELNODE.Create_indexed( local_numbers[i][j].size(), &lengths[0], &local_numbers[i][j][0] ); MPI_NODE_TYPES[i][j].Commit(); reqs.push_back( MPI::COMM_WORLD.Isend( &remote_numbers[i][j][0], remote_numbers[i][j].size(), MPI::INT, j, TAG_SYNC_NODE_TYPES ) ); reqs.push_back( MPI::COMM_WORLD.Isend( info, 3, MPI::INT, j, TAG_SYNC_NODE_TYPES_I ) ); } BARRIER("DataBus::createDynamicTypes#2"); MPI::Status status; while (MPI::COMM_WORLD.Iprobe(MPI::ANY_SOURCE, TAG_SYNC_NODE_TYPES_I, status)) { MPI::COMM_WORLD.Recv( info, 3, MPI::INT, status.Get_source(), TAG_SYNC_NODE_TYPES_I ); local_numbers[info[1]][info[2]].resize(info[0]); MPI::COMM_WORLD.Recv( &local_numbers[info[1]][info[2]][0], info[0], MPI::INT, status.Get_source(), TAG_SYNC_NODE_TYPES ); if (lengths.size() < (unsigned)info[0]) for (int i = lengths.size(); i < info[0]; i++) lengths.push_back(1); for(int i = 0; i < info[0]; i++) local_numbers[info[1]][info[2]][i] = mesh->nodesMap[ local_numbers[info[1]][info[2]][i] ]; MPI_NODE_TYPES[info[1]][info[2]] = MPI_ELNODE.Create_indexed( info[0], &lengths[0], &local_numbers[info[1]][info[2]][0] ); MPI_NODE_TYPES[info[1]][info[2]].Commit(); } MPI::Request::Waitall(reqs.size(), &reqs[0]); BARRIER("DataBus::createDynamicTypes#3"); for (int i = 0 ; i < numberOfWorkers; i++) delete[] remote_numbers[i]; delete[] remote_numbers; LOG_DEBUG("Building dynamic MPI types for fast node sync done"); }