Beispiel #1
0
void TwoParticleGF::compute(bool clear, const boost::mpi::communicator & comm)
{
    if (Status < Prepared) throw (exStatusMismatch());
    if (Status >= Computed) return;
    if (!Vanishing) {
        // Create a "skeleton" class with pointers to part that can call a compute method
        pMPI::mpi_skel<ComputeAndClearWrap> skel;
        bool fill_container = m_data_.NBosonic() > 0 && m_data_.NFermionic() > 0; 
        skel.parts.reserve(parts.size());
        for (size_t i=0; i<parts.size(); i++) { 
            skel.parts.push_back(ComputeAndClearWrap(&m_data_, parts[i], clear, fill_container, 1));
            };
        std::map<pMPI::JobId, pMPI::WorkerId> job_map = skel.run(comm, true); // actual running - very costly
        int rank = comm.rank();
        int comm_size = comm.size(); 

        // Start distributing data
        //DEBUG(comm.rank() << getIndex(0) << getIndex(1) << getIndex(2) << getIndex(3) << " Start distributing data");
        comm.barrier();
         

        if (!clear) { 
            for (size_t p = 0; p<parts.size(); p++) {
                boost::mpi::broadcast(comm, parts[p]->NonResonantTerms, job_map[p]);
                boost::mpi::broadcast(comm, parts[p]->ResonantTerms, job_map[p]);
                if (rank == job_map[p]) { 
                    parts[p]->Status = TwoParticleGFPart::Computed;
                     };
                };
            comm.barrier();
        }
    };
    Status = Computed;
}
Beispiel #2
0
void
printit(const boost::mpi::communicator& comm,
        const std::vector<T>& things,
        const std::string& caption)
{
    if (!caption.empty() && comm.rank() == 0) {
        std::cout << caption << std::endl;
        std::cout.flush();
    }
    for (int p = 0; p < comm.size(); ++p) {
        if (comm.rank() == p) {
            std::cout << p << ": ";
            std::copy(things.begin(), things.end(),
                      std::ostream_iterator<T>(std::cout, ","));
            std::cout << std::endl;
            std::cout.flush();
        }
        comm.barrier();
    }

    comm.barrier();
    size_t global_size;
    boost::mpi::reduce(comm, things.size(), global_size, std::plus<size_t>(), 0);
    if (comm.rank() == 0) {
        if (!caption.empty()) {
            std::cout << caption;
        }
        std::cout << "Number of things: " << global_size << std::endl;
    }
    comm.barrier();
    std::cout << comm.rank() << ": leaving printit()" << std::endl;
}
void TwoParticleGFContainer::computeAll_split(bool clearTerms, const boost::mpi::communicator & comm)
{
    // split communicator
    size_t ncomponents = NonTrivialElements.size();
    size_t ncolors = std::min(int(comm.size()), int(NonTrivialElements.size()));
    RealType color_size = 1.0*comm.size()/ncolors;
    std::map<int,int> proc_colors;
    std::map<int,int> elem_colors;
    std::map<int,int> color_roots;
    bool calc = false;
    for (size_t p=0; p<comm.size(); p++) {
        int color = int (1.0*p / color_size);
        proc_colors[p] = color;
        color_roots[color]=p;
    }
    for (size_t i=0; i<ncomponents; i++) {
        int color = i*ncolors/ncomponents;
        elem_colors[i] = color;
    };

    if (!comm.rank()) {
        INFO("Splitting " << ncomponents << " components in " << ncolors << " communicators");
        for (size_t i=0; i<ncomponents; i++)
            INFO("2pgf " << i << " color: " << elem_colors[i] << " color_root: " << color_roots[elem_colors[i]]);
    };
    comm.barrier();
    int comp = 0;

    boost::mpi::communicator comm_split = comm.split(proc_colors[comm.rank()]);

    for(std::map<IndexCombination4, boost::shared_ptr<TwoParticleGF> >::iterator iter = NonTrivialElements.begin(); iter != NonTrivialElements.end(); iter++, comp++) {
        bool calc = (elem_colors[comp] == proc_colors[comm.rank()]);
        if (calc) {
            INFO("C" << elem_colors[comp] << "p" << comm.rank() << ": computing 2PGF for " << iter->first);
            if (calc) static_cast<TwoParticleGF&>(*(iter->second)).compute(clearTerms, comm_split);
        };
    };
    comm.barrier();
    // distribute data
    if (!comm.rank()) INFO_NONEWLINE("Distributing 2PGF container...");
    comp = 0;
    for(std::map<IndexCombination4, boost::shared_ptr<TwoParticleGF> >::iterator iter = NonTrivialElements.begin(); iter != NonTrivialElements.end(); iter++, comp++) {
        int sender = color_roots[elem_colors[comp]];
        TwoParticleGF& chi = *((iter)->second);
        for (size_t p = 0; p<chi.parts.size(); p++) {
            //    if (comm.rank() == sender) INFO("P" << comm.rank() << " 2pgf " << p << " " << chi.parts[p]->NonResonantTerms.size());
            boost::mpi::broadcast(comm, chi.parts[p]->NonResonantTerms, sender);
            boost::mpi::broadcast(comm, chi.parts[p]->ResonantTerms, sender);
            if (comm.rank() != sender) {
                chi.setStatus(TwoParticleGF::Computed);
            };
        };
    }
    comm.barrier();
    if (!comm.rank()) INFO("done.");
}
Beispiel #4
0
 /**
  * workerloop runs on all workers and executes worker functions
  *
  * the worker loop waits for wrapper objects to be broadcastet to all
  * workers, if the wrapper is a quit objects it shuts down, if the wrapper is
  * a work object it executes it
  *
  * @params comm the communicator object the workers and the master live in
  *
  * @params root the id of the root or master node that distribute the jobs
  */
 inline void workerloop(boost::mpi::communicator & comm, int root)
 {
   while(true)
   {
     masterworker::wrapper w;
     mpi_broadcast_workaround(comm, w, root);
     if(w.quit())
     {
       comm.barrier();
       return;
     }
     else
     {
       w.execute(comm, root);
     }
   }
 }
Beispiel #5
0
void
broadcast_test(const mpi::communicator& comm, const T& bc_value,
               std::string const& kind, int root) {
  using boost::mpi::broadcast;
  
  T value;
  if (comm.rank() == root) {
    value = bc_value;
    std::cout << "Broadcasting " << kind << " from root " << root << "...";
    std::cout.flush();
  }
  
  
  broadcast(comm, value, root);
  BOOST_CHECK(value == bc_value);
  if (comm.rank() == root) {
    if (value == bc_value) {
      std::cout << "OK." << std::endl;
    } else {
      std::cout << "FAIL." << std::endl;
    }
  }
  comm.barrier();
}
Beispiel #6
0
 inline void barrier(void) { world->barrier(); }
Beispiel #7
0
int run(const boost::mpi::communicator& comm, skylark::base::context_t& context,
    hilbert_options_t& options) {

    int rank = comm.rank();

    InputType X, Xv, Xt;
    LabelType Y, Yv, Yt;

    if(!options.trainfile.empty()) { //training mode

    	read(comm, options.fileformat, options.trainfile, X, Y);
    	int dimensions = skylark::base::Height(X);
    	int targets = GetNumTargets<LabelType>(comm, Y);
    	bool shift = false;

    	if ((options.lossfunction == LOGISTIC) && (targets == 1)) {
    		ShiftForLogistic(Y);
    		targets = 2;
    		shift = true;
    	}

    	BlockADMMSolver<InputType>* Solver =
        	GetSolver<InputType>(context, options, dimensions);

    	if(!options.valfile.empty()) {
    		comm.barrier();
    		if(rank == 0) std::cout << "Loading validation data." << std::endl;

    		read(comm, options.fileformat, options.valfile, Xv, Yv,
    			skylark::base::Height(X));

    		if ((options.lossfunction == LOGISTIC) && shift) {
    				ShiftForLogistic(Yv);
            	}
    		}

    	skylark::ml::model_t<InputType, LabelType>* model =
            Solver->train(X, Y, Xv, Yv, comm);

        if (comm.rank() == 0) 
            model->save(options.modelfile, options.print());
    }

    else {

    	std::cout << "Testing Mode (currently loads test data in memory)" << std::endl;
    	skylark::ml::model_t<InputType, LabelType> model(options.modelfile);
    	read(comm, options.fileformat, options.testfile, Xt, Yt,
            model.get_input_size());
    	LabelType DecisionValues(Yt.Height(), model.get_num_outputs());
    	LabelType PredictedLabels(Yt.Height(), 1);
    	El::Zero(DecisionValues);
    	El::Zero(PredictedLabels);

    	std::cout << "Starting predictions" << std::endl;
    	model.predict(Xt, PredictedLabels, DecisionValues, options.numthreads);
    	double accuracy = model.evaluate(Yt, DecisionValues, comm);
    	if(rank == 0)
    	        std::cout << "Test Accuracy = " <<  accuracy << " %" << std::endl;

    	// fix logistic case -- provide mechanism to dump predictions -- clean up evaluate

    }

    return 0;
}