예제 #1
0
void LUResolveMPI(int n,float resolving[][maxN])
{
//First We need to Matrix resolving, for it is common to initailize resolving in One process
	array=MPI::FLOAT.Create_contiguous(n);
	array.Commit();

//Then we need to Scatter the content of resolving
	for(int i=0;i<n;i+=totalProcNum)
		MPI::COMM_WORLD.Scatter(resolving+i, 1, array, resolving+i+nowProcID, 1, array, 0);

//This time we just assume that ProcNum can completely divide the Scale, then we finished 

//We start the round	

	for(int nowRound=0;nowRound<n;nowRound++)
	{
		int master=nowRound%totalProcNum;
		if(nowProcID==master)
			for(int i=nowRound+1;i<n;i++)
				buffer[i]=(resolving[nowRound][i]/=resolving[nowRound][nowRound]);
		//Then we use broadcast let this part to all	
		MPI::COMM_WORLD.Bcast(buffer+nowRound+1,n-nowRound-1,MPI::INT,master);
		//Now we can use the buffer Number to be calculate the number
		for(int row=n-totalProcNum+nowProcID;row>=nowRound+1;row-=totalProcNum)
			for(int column=nowRound+1;column<n;column++)
				resolving[row][column]=resolving[row][column]-resolving[row][nowRound]*buffer[column];
	}
//We finished calculate process

//Then we gather the whole matrix
	for(int i=0;i<n;i+=totalProcNum)
		MPI::COMM_WORLD.Gather(resolving+i+nowProcID,1,array,resolving+i,1,array,0);
//We finish gathering matrix
	array.Free();
}
예제 #2
0
void runSimulation()
{
  int outputFrequency = 1;
  int maxSteps = 200;
  double factor = pow(MPILayer().size(), 1.0/3.0);
  Coord<3> baseDim(15, 15, 15);
  Coord<3> dim(factor * baseDim.x(), factor * baseDim.y(), factor * baseDim.z()); 

    MPI::Aint displacements[] = {0};
    MPI::Datatype memberTypes[] = {MPI::CHAR};
    int lengths[] = { sizeof(CELL) };
    MPI::Datatype objType;
    objType =
        MPI::Datatype::Create_struct(1, lengths, displacements, memberTypes);
    objType.Commit();


    NBodyInitializer<CELL> *init = new NBodyInitializer<CELL>(dim, maxSteps);

    // HiParSimulator::HiParSimulator<CELL, HiParSimulator::RecursiveBisectionPartition<3> > sim(
    //     init,
    //     MPILayer().rank() ? 0 : new TracingBalancer(new NoOpBalancer()),
    //     maxSteps,
    //     1,
    //     objType);

    SerialSimulator<CELL> sim(init);

    if (MPILayer().rank() == 0) {
      std::cout << "ranks: " << MPILayer().size() << "\n"
		<< "dim: " << dim << "\n"
		<< "serial1\n";
        sim.addWriter(
            new TracingWriter<CELL>(outputFrequency, init->maxSteps()));
    }

    long long tStart = Chronometer::timeUSec();
    sim.run();
    long long tEnd = Chronometer::timeUSec();

    double seconds = 1e-6 * (tEnd - tStart);
    double flops =
        // time steps * grid size
        1.0 * maxSteps * dim.prod() *
        // interactions per container update
        27 * CELL::SIZE * CELL::SIZE *
        // FLOPs per interaction
        (3 + 6 + 1 + 6);
    double gflops = 1e-9 * flops / seconds;
    std::cout << "GFLOPS: " << gflops << "\n"
              << "----------------------------------------------------------------------\n";
}
void
ParaSolverTerminationStateMpi::receive(
      ParaComm *comm,
      int source,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->receive(&interrupted, 1, datatype, source, tag)
   );
   datatype.Free();
}
void
ParaSolverTerminationStateMpi::send(
      ParaComm *comm,
      int destination,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->send(&interrupted, 1, datatype, destination, tag)
   );
   datatype.Free();
}
void
ParaCalculationStateMpi::receive(
      ParaComm *comm,
      int source,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->receive(&compTime, 1, datatype, source, tag)
   );
   datatype.Free();
}
void
ParaCalculationStateMpi::send(
      ParaComm *comm,
      int destination,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->send(&compTime, 1, datatype, destination, tag)
   );
   datatype.Free();
}
예제 #7
0
 /*
 * Build and commit a user-defined MPI Struct datatype.
 *
 * \param mpiType new MPI datatype (on output).
 */
 void MpiStructBuilder::commit(MPI::Datatype& mpiType) 
 {
    for (int i = 0; i < nMember_; ++i) {
       addresses_[i] = addresses_[i] - base_;
    }
    mpiType = 
          MPI::Datatype::Create_struct(nMember_, counts_, addresses_, types_);
    mpiType.Commit();
 }
예제 #8
0
int main(int argc, char* argv[]) {
	MPI::Init(argc, argv);
	
	rank = MPI::COMM_WORLD.Get_rank();
	size = MPI::COMM_WORLD.Get_size();
	if (size < 2 || size > 32) MPI::COMM_WORLD.Abort(1);
	
	int brojVrsta = size - 1, 
		brojKolona;
		
	
	if (0 == rank) {
	// master
		pline(); cout << "broj kolona matrice: ";
		fflush(stdout); cin >> brojKolona;
		
		for (int i=1; i<brojVrsta+1; i++)
			MPI::COMM_WORLD.Send(&brojKolona, 1, MPI::INT, i, 0);
		if (brojKolona <= 0 || brojKolona > 100) MPI::COMM_WORLD.Abort(1);
		
		pline(); cout << "ucitaj elemente matrice " << "[" << brojVrsta << "][" << brojKolona << "]" << endl;
		
		MPI::Datatype VRSTA = MPI::DOUBLE.Create_contiguous(brojKolona);
		VRSTA.Commit();
		
		double matrica[brojVrsta][brojKolona];

		
		for (int i=0; i<brojVrsta; i++) {
			for (int j=0; j<brojKolona; j++) {
				fflush(stdout);
				cin >> matrica[i][j];
			}
		}
	
		for (int i=0; i<brojVrsta; i++) {
			MPI::COMM_WORLD.Send(matrica[i], 1, VRSTA, i+1, 0);
		}
		
		VRSTA.Free();
	}
예제 #9
0
static void Union_Find_Merge_Op(const void* in,void* inout,int len,const MPI::Datatype& datatype)
{
    // unpack union finds
    UNION_FIND<> union_find_in,union_find_inout;
    int m=datatype.Get_size();assert(len==1);
    {int position=0;MPI_UTILITIES::Unpack(union_find_in,ARRAY_VIEW<const char>(m,static_cast<const char*>(in)),position,*union_find_merge_op_comm);}
    {int position=0;MPI_UTILITIES::Unpack(union_find_inout,ARRAY_VIEW<const char>(m,static_cast<char*>(inout)),position,*union_find_merge_op_comm);}
    // merge
    union_find_inout.Merge(union_find_in);
    // pack output 
    {int position=0;MPI_UTILITIES::Pack(union_find_inout,ARRAY_VIEW<char>(m,static_cast<char*>(inout)),position,*union_find_merge_op_comm);}
}
예제 #10
0
파일: main.cpp 프로젝트: jotrk/vanDouken
int main(int argc, char **argv)
{
    MPI::Init(argc, argv);
    QApplication app(argc, argv);
    LibGeoDecomp::Typemaps::initializeMaps();

    MPI::Aint displacements[] = {0};
    MPI::Datatype memberTypes[] = {MPI::CHAR};
    int lengths[] = {sizeof(CanvasCell)};
    MPI::Datatype objType;
    objType = 
        MPI::Datatype::Create_struct(1, lengths, displacements, memberTypes);
    objType.Commit();

    MPI::Datatype particleType;
    lengths[0] = sizeof(Particle);
    particleType = 
        MPI::Datatype::Create_struct(1, lengths, displacements, memberTypes);
    particleType.Commit();

    ParticleWidget *widget = 0;
    CanvasInitializer *init = getInit1();
    
    HiParSimulator<CanvasCell, RecursiveBisectionPartition<2> > *sim = new HiParSimulator<CanvasCell, RecursiveBisectionPartition<2> >(
        init,
        MPILayer().rank() ? 0 : new NoOpBalancer(), 
        1000,
        1,
        particleType);
    std::cout << "Simulator created ...\n";
    // HiParSimulator<CanvasCell, RecursiveBisectionPartition<2> > sim(
    //     new CanvasInitializer(dim.x(), dim.y()),
    //     MPILayer().rank() ? 0 : new NoOpBalancer(), 
    //     1000,
    //     1,
    //     particleType);

    ForceSteerer *steerer = new ForceSteerer(1);
    sim->addSteerer(steerer);
    std::cout << "Steerer added ...\n";

    Storage * storage = 0;
    if (MPILayer().rank() == 0) {
        storage = new Storage;
        widget = new ParticleWidget(*storage, init->gridDimensions(), Qt::black);
        widget->resize(1000, 500);
        widget->show();
        std::cout << "Widget created ...\n";
    }

    sim->addWriter(new TracingWriter<CanvasCell>(100, 100000));
    sim->addWriter(new ParticleWriter(storage, init->gridDimensions(), 1, particleType));

    FakeSimulator fakeSim(sim);

    // DebugOutput logger;
    QTimer timer;
    QTimer timerGL;

    QObject::connect(&timer, SIGNAL(timeout()),
                     &fakeSim, SLOT(step()));
    // QObject::connect(&widget, SIGNAL(newFrame()),
    //                  &logger, SLOT(newFrame()));
    // QObject::connect(&widget, SIGNAL(forceRecorded(QPoint, QPoint)),
    //                  &logger, SLOT(addForce(QPoint, QPoint)));
    QObject::connect(storage,     SIGNAL(forceRecorded(QVector2D, QVector2D)),
                     steerer,    SLOT(addForce(QVector2D, QVector2D))); 
    QObject::connect(&timerGL, SIGNAL(timeout()), widget, SLOT(updateGL()));

    timer.start(10);
    timerGL.start(10);
    app.exec();

    delete sim;
    MPI::Finalize();
}
예제 #11
0
static vector<int> kmeans(const vector<string> &dnas, int k) {
    int rank = MPI::COMM_WORLD.Get_rank();
    int size = MPI::COMM_WORLD.Get_size();
    int dnaLength = dnas[0].length();

    //Define the string for dna as MPI type
    MPI::Datatype dnaType = MPI::CHAR.Create_contiguous(dnaLength + 1);
    dnaType.Commit();

    //Define the class Bases as MPI type
    MPI::Datatype basesType, oldTypes[1];
    MPI::Aint offsets[1];
    int blockCounts[1];
    oldTypes[0] = MPI::INT;
    offsets[0] = 0;
    blockCounts[0] = 4;

    basesType = MPI::Datatype::Create_struct(1, blockCounts, offsets, oldTypes);
    basesType.Commit();

	vector<pair<int, int> > tasks(devideTasks(size, dnas.size()));
	int startIndex = tasks[rank].first;
	int endIndex = tasks[rank].second;
	vector<string> prevCenters(k, string(dnaLength, 'A'));
	vector<string> centers(k);
    if (isMaster(rank)) {
    	centers = select_centers(dnas, k);
    }

	vector<int> belonging(dnas.size());
	char *buffer = new char[dnaLength + 1];
	while (true) {
		//Send the new centers to all tasks
		if (isMaster(rank)) {
			for (int i = 0; i < size; i++) {
				if (!isMaster(i)) {
					for (int j = 0; j < k; j++) {
						MPI::COMM_WORLD.Send(centers[j].c_str(), 1, dnaType, i, DEFAULT_TAG);
					}
				}
			}
		}
		//All tasks update their centers
		if (!isMaster(rank)) {
			for (int j = 0; j < k; j++) {
				MPI::COMM_WORLD.Recv(buffer, 1, dnaType, MASTER_RANK, DEFAULT_TAG);
				centers[j] = buffer;
			}
		}
		MPI::COMM_WORLD.Barrier();

		if (equal(centers.begin(), centers.end(), prevCenters.begin())) {
			break;
		}
		prevCenters = centers;
		vector<vector<Bases> > sumClusters(k, vector<Bases>(dnaLength));
		for (int i = startIndex; i < endIndex; i++) {
			int min_dis = numeric_limits<int>::max();
			int belong;
			for (int j = 0; j < k; ++j) {
				int dis = cal_dis(dnas[i], centers[j]);
				if (dis < min_dis) {
					min_dis = dis;
					belong = j;
				}
			}
			belonging[i] = belong;
			for (int j = 0; j < dnaLength; j++) {
				int idx = get_idx(dnas[i][j]);
				sumClusters[belong][j].increase(idx);
			}
		}
		if (!isMaster(rank)) {
			for (int i = 0; i < k; i++) {
				MPI::COMM_WORLD.Send(&(sumClusters[i][0]), dnaLength, basesType, MASTER_RANK, DEFAULT_TAG);
			}
		}
		if (isMaster(rank)) {
			vector<vector<Bases> > aggrSumClusters(k, vector<Bases>(dnaLength));
			for (int i = 0; i < k; i++) {
				for (int j = 0; j < dnaLength; j++) {
					aggrSumClusters[i][j] += sumClusters[i][j];
					//cout << "cluster " << i << " dna " << j << " count " << aggrSumClusters[i][j].get_max() << endl;
				}
			}
			for (int i = 0; i < size; i++) {
				if (isMaster(i)) {
					continue;
				}
				for (int j = 0; j < k; j++) {
					MPI::COMM_WORLD.Recv(&(sumClusters[j][0]), dnaLength, basesType, i, DEFAULT_TAG);
				}

				for (int j = 0; j < k; j++) {
					for (int n = 0; n < dnaLength; n++) {
						aggrSumClusters[j][n] += sumClusters[j][n];
					}
				}
			}
			for (int i = 0; i < centers.size(); ++i) {
				string center;
				for (int j = 0; j < dnas[0].size(); ++j) {
					center += get_base(aggrSumClusters[i][j].get_max());
				}
				centers[i] = center;
			}
		}
		MPI::COMM_WORLD.Barrier();
	}

	delete[] buffer;
	if (!isMaster(rank)) {
		MPI::COMM_WORLD.Send(&belonging[startIndex], endIndex - startIndex, MPI::INT, MASTER_RANK, DEFAULT_TAG);
	}
	if (isMaster(rank)) {
		for (int i = 0; i < size; i++) {
			if (isMaster(i)) {
				continue;
			}
			int taskStart = tasks[i].first;
			int taskEnd = tasks[i].second;
			MPI::COMM_WORLD.Recv(&belonging[taskStart], taskEnd - taskStart, MPI::INT, i, DEFAULT_TAG);
		}
	}

	return belonging;
}
void PPS::start(){

    //Define parameters struct for mpi
    //Refer to this as an example http://lists.mcs.anl.gov/pipermail/mpich-discuss/2009-April/004880.html
    MPI::Datatype MPIPPSTRUCT;
    int blockcounts[2];
    MPI::Aint offsets[2];
    MPI::Datatype datatypes[2];
    MPI::Aint extent,lb;

    blockcounts[0] = 9; //Number of ints
    blockcounts[1] = 13; //number of __fpv
    datatypes[0] = MPI::INT;
    datatypes[1] = MPIFPV;
    offsets[0] = 0;

    MPI::INT.Get_extent(lb, extent);

    offsets[1] = blockcounts[0] * extent;

    MPIPPSTRUCT = MPIPPSTRUCT.Create_struct(2,blockcounts,offsets, datatypes);
    MPIPPSTRUCT.Commit();

    if(PPS::pid == 0){

        struct parameters temp;
        int start,i,countdown = PPS::comm_size-1;
        bool ready = false;
        MPI::Status status;

        //Logs
        std::ofstream logsfile;
        logsfile.open("tslogs.txt",  std::fstream::out | std::fstream::trunc);

        while(true){


            if(countdown == 0) break;

            //Check first ready-to-compute process
            MPI::COMM_WORLD.Recv(&ready, 1, MPI::BOOL, MPI_ANY_SOURCE, 0, status);

            //Logs
            logsfile << "Remaining sims: " << PPS::plist.size()  << " process countdown: " << countdown << std::endl;

            //Send a 0 status to all the process to stop
            if(ready){
                if(PPS::plist.size() == 0 ){
                    start = EXIT_PROCESS;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);
                    countdown = countdown - 1;
                }else{
                    //Prepare him to receive the params and start the sim (an int that contains the simulation number (-1 = exit))
                    start = PPS::plist.size() - 1;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);

                    temp = PPS::plist.back();

                    //temp.N = status.Get_source() * 10;

                    //Deploy the parameterer struct
                    MPI::COMM_WORLD.Send(&temp, 1, MPIPPSTRUCT, status.Get_source(), 0);

                    //Pullout the parameter struct from the list
                    plist.pop_back();
                }
            }
            ready = false;
        }

        logsfile.close();



    }else{

        int status;
        bool ready = true;
        struct parameters recvparams;

        while(true){
            status == EXIT_PROCESS;
            //Send with a point to point that you are free
            MPI::COMM_WORLD.Send(&ready, 1, MPI::BOOL, 0, 0);

            //receive status value to exit or to receive a new params struct to start new sim
            MPI::COMM_WORLD.Recv(&status, 1, MPI::INT, 0, 0);

            if(status != EXIT_PROCESS){
                //wait to receive parameters


                //std::this_thread::sleep_for(std::chrono::seconds(PPS::pid));

                MPI::COMM_WORLD.Recv(&recvparams, 1, MPIPPSTRUCT, 0, 0);
                //Start sim
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
                //std::cout << "SAY HI: "<< PPS::pid << std::endl;
                //print_params(recvparams);
                //std::cout << "STARTING REAL SIM"<< std::endl;
                PottsSim(recvparams,"output/"+ std::to_string(PPS::pid) + "_proc_output.dat", status);
                //old_code( PPS::pid );
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
            }else{
                std::cout << "I'm the process "<< PPS::pid << ", ready to die." << std::endl;
                break;
            }

        }

    }

    MPIPPSTRUCT.Free();


}