コード例 #1
0
    void finishProcessing()
    {
    	distributor->wait();

        //All nodes wait for each other
        node->barrierWait();
        if (node->isMaster())
            ProgNmaAlignment::finishProcessing();
        node->barrierWait();
    }
コード例 #2
0
    /** main body */
    void createWorkFiles()
    {
        //Master node should prepare some stuff before start working
        MetaData &mdIn = *getInputMd(); //get a reference to input metadata

        if (node->isMaster())
        {
            ProgNmaAlignment::createWorkFiles();
            mdIn.write(fnOutDir + "/nmaTodo.xmd");
        }
        node->barrierWait();//Sync all before start working
        mdIn.read(fnOutDir + "/nmaTodo.xmd");
        mdIn.findObjects(imgsId);//get objects ids
        rangen = node->rank;
        distributor = new MpiTaskDistributor(mdIn.size(), 1, node);
    }
コード例 #3
0
 /** Redefine read to initialize MPI environment */
 void read(int argc, char **argv)
 {
     node = new MpiNode(argc, argv);
     if (!node->isMaster())
     	verbose=0;
     fileMutex = new MpiFileMutex(node);
     ProgNmaAlignment::read(argc, argv);
 }
コード例 #4
0
ファイル: mpi.cpp プロジェクト: shy3u/GeRelion
void printMpiNodesMachineNames(MpiNode &node, int nthreads)
{

    char nodename[64] = "undefined";
    gethostname(nodename,sizeof(nodename));

    if (node.isMaster())
    {
    	std::cout << " === GERELION MPI setup ===" << std::endl;
    	std::cout << " + Number of MPI processes             = " << node.size << std::endl;
    	if (nthreads > 1)
    	{
    		std::cout << " + Number of threads per MPI process  = " << nthreads << std::endl;
    		std::cout << " + Total number of threads therefore  = " << nthreads * node.size << std::endl;
		}
    	std::cout << " + Master  (0) runs on host            = " << nodename << std::endl;
    	std::cout.flush();
    }
    node.barrierWait();

    for (int slave = 1; slave < node.size; slave++)
    {
    	if (slave == node.rank)
    	{
    		std::cout << " + Slave ";
    		std::cout.width(5);
    		std::cout << slave;
    		std::cout << " runs on host            = " << nodename << std::endl;
    		std::cout.flush();
		}
    	node.barrierWait();
    }

    if (node.isMaster())
    {
            std::cout << " =================" << std::endl;
    }
    std::cout.flush();

    // Try to flush all std::cout of all MPI processes before proceeding...
    sleep(1);
    node.barrierWait();

}
コード例 #5
0
 //Only master show progress
 void showProgress()
 {
     if (node->isMaster())
         ProgNmaAlignment::showProgress();
 }
コード例 #6
0
 //Only master do starting progress bar stuff
 void startProcessing()
 {
     if (node->isMaster())
         ProgNmaAlignment::startProcessing();
 }
コード例 #7
0
ファイル: mpi_run_main.cpp プロジェクト: I2PC/scipion
 ProgMPIRun(int argc, char **argv)
 {
     node=new MpiNode(argc,argv);
     if (!node->isMaster())
         verbose=0;
 }