コード例 #1
0
ファイル: mpiTest.cpp プロジェクト: wjcskqygj2015/MPI
void LUResolveMPI(int n,float resolving[][maxN])
{
//First We need to Matrix resolving, for it is common to initailize resolving in One process
	array=MPI::FLOAT.Create_contiguous(n);
	array.Commit();

//Then we need to Scatter the content of resolving
	for(int i=0;i<n;i+=totalProcNum)
		MPI::COMM_WORLD.Scatter(resolving+i, 1, array, resolving+i+nowProcID, 1, array, 0);

//This time we just assume that ProcNum can completely divide the Scale, then we finished 

//We start the round	

	for(int nowRound=0;nowRound<n;nowRound++)
	{
		int master=nowRound%totalProcNum;
		if(nowProcID==master)
			for(int i=nowRound+1;i<n;i++)
				buffer[i]=(resolving[nowRound][i]/=resolving[nowRound][nowRound]);
		//Then we use broadcast let this part to all	
		MPI::COMM_WORLD.Bcast(buffer+nowRound+1,n-nowRound-1,MPI::INT,master);
		//Now we can use the buffer Number to be calculate the number
		for(int row=n-totalProcNum+nowProcID;row>=nowRound+1;row-=totalProcNum)
			for(int column=nowRound+1;column<n;column++)
				resolving[row][column]=resolving[row][column]-resolving[row][nowRound]*buffer[column];
	}
//We finished calculate process

//Then we gather the whole matrix
	for(int i=0;i<n;i+=totalProcNum)
		MPI::COMM_WORLD.Gather(resolving+i+nowProcID,1,array,resolving+i,1,array,0);
//We finish gathering matrix
	array.Free();
}
コード例 #2
0
void
ParaSolverTerminationStateMpi::receive(
      ParaComm *comm,
      int source,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->receive(&interrupted, 1, datatype, source, tag)
   );
   datatype.Free();
}
コード例 #3
0
void
ParaSolverTerminationStateMpi::send(
      ParaComm *comm,
      int destination,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->send(&interrupted, 1, datatype, destination, tag)
   );
   datatype.Free();
}
コード例 #4
0
void
ParaCalculationStateMpi::receive(
      ParaComm *comm,
      int source,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->receive(&compTime, 1, datatype, source, tag)
   );
   datatype.Free();
}
コード例 #5
0
void
ParaCalculationStateMpi::send(
      ParaComm *comm,
      int destination,
      int tag
      )
{
   DEF_PARA_COMM( commMpi, comm);

   MPI::Datatype datatype;
   datatype = createDatatype();
   datatype.Commit();
   PARA_COMM_CALL(
      commMpi->send(&compTime, 1, datatype, destination, tag)
   );
   datatype.Free();
}
コード例 #6
0
ファイル: dz5z1.cpp プロジェクト: aaleksandar/si4mps
int main(int argc, char* argv[]) {
	MPI::Init(argc, argv);
	
	rank = MPI::COMM_WORLD.Get_rank();
	size = MPI::COMM_WORLD.Get_size();
	if (size < 2 || size > 32) MPI::COMM_WORLD.Abort(1);
	
	int brojVrsta = size - 1, 
		brojKolona;
		
	
	if (0 == rank) {
	// master
		pline(); cout << "broj kolona matrice: ";
		fflush(stdout); cin >> brojKolona;
		
		for (int i=1; i<brojVrsta+1; i++)
			MPI::COMM_WORLD.Send(&brojKolona, 1, MPI::INT, i, 0);
		if (brojKolona <= 0 || brojKolona > 100) MPI::COMM_WORLD.Abort(1);
		
		pline(); cout << "ucitaj elemente matrice " << "[" << brojVrsta << "][" << brojKolona << "]" << endl;
		
		MPI::Datatype VRSTA = MPI::DOUBLE.Create_contiguous(brojKolona);
		VRSTA.Commit();
		
		double matrica[brojVrsta][brojKolona];

		
		for (int i=0; i<brojVrsta; i++) {
			for (int j=0; j<brojKolona; j++) {
				fflush(stdout);
				cin >> matrica[i][j];
			}
		}
	
		for (int i=0; i<brojVrsta; i++) {
			MPI::COMM_WORLD.Send(matrica[i], 1, VRSTA, i+1, 0);
		}
		
		VRSTA.Free();
	}
コード例 #7
0
void PPS::start(){

    //Define parameters struct for mpi
    //Refer to this as an example http://lists.mcs.anl.gov/pipermail/mpich-discuss/2009-April/004880.html
    MPI::Datatype MPIPPSTRUCT;
    int blockcounts[2];
    MPI::Aint offsets[2];
    MPI::Datatype datatypes[2];
    MPI::Aint extent,lb;

    blockcounts[0] = 9; //Number of ints
    blockcounts[1] = 13; //number of __fpv
    datatypes[0] = MPI::INT;
    datatypes[1] = MPIFPV;
    offsets[0] = 0;

    MPI::INT.Get_extent(lb, extent);

    offsets[1] = blockcounts[0] * extent;

    MPIPPSTRUCT = MPIPPSTRUCT.Create_struct(2,blockcounts,offsets, datatypes);
    MPIPPSTRUCT.Commit();

    if(PPS::pid == 0){

        struct parameters temp;
        int start,i,countdown = PPS::comm_size-1;
        bool ready = false;
        MPI::Status status;

        //Logs
        std::ofstream logsfile;
        logsfile.open("tslogs.txt",  std::fstream::out | std::fstream::trunc);

        while(true){


            if(countdown == 0) break;

            //Check first ready-to-compute process
            MPI::COMM_WORLD.Recv(&ready, 1, MPI::BOOL, MPI_ANY_SOURCE, 0, status);

            //Logs
            logsfile << "Remaining sims: " << PPS::plist.size()  << " process countdown: " << countdown << std::endl;

            //Send a 0 status to all the process to stop
            if(ready){
                if(PPS::plist.size() == 0 ){
                    start = EXIT_PROCESS;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);
                    countdown = countdown - 1;
                }else{
                    //Prepare him to receive the params and start the sim (an int that contains the simulation number (-1 = exit))
                    start = PPS::plist.size() - 1;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);

                    temp = PPS::plist.back();

                    //temp.N = status.Get_source() * 10;

                    //Deploy the parameterer struct
                    MPI::COMM_WORLD.Send(&temp, 1, MPIPPSTRUCT, status.Get_source(), 0);

                    //Pullout the parameter struct from the list
                    plist.pop_back();
                }
            }
            ready = false;
        }

        logsfile.close();



    }else{

        int status;
        bool ready = true;
        struct parameters recvparams;

        while(true){
            status == EXIT_PROCESS;
            //Send with a point to point that you are free
            MPI::COMM_WORLD.Send(&ready, 1, MPI::BOOL, 0, 0);

            //receive status value to exit or to receive a new params struct to start new sim
            MPI::COMM_WORLD.Recv(&status, 1, MPI::INT, 0, 0);

            if(status != EXIT_PROCESS){
                //wait to receive parameters


                //std::this_thread::sleep_for(std::chrono::seconds(PPS::pid));

                MPI::COMM_WORLD.Recv(&recvparams, 1, MPIPPSTRUCT, 0, 0);
                //Start sim
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
                //std::cout << "SAY HI: "<< PPS::pid << std::endl;
                //print_params(recvparams);
                //std::cout << "STARTING REAL SIM"<< std::endl;
                PottsSim(recvparams,"output/"+ std::to_string(PPS::pid) + "_proc_output.dat", status);
                //old_code( PPS::pid );
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
            }else{
                std::cout << "I'm the process "<< PPS::pid << ", ready to die." << std::endl;
                break;
            }

        }

    }

    MPIPPSTRUCT.Free();


}