예제 #1
0
파일: RNDMCOMP.cpp 프로젝트: jyamu/qmc
bool RNDMCOMP::run()
{
  resetUpdateEngines();
  //estimator does not need to collect data
  Estimators->setCollectionMode(true);
  Estimators->start(nBlocks);
  for(int ip=0; ip<NumThreads; ip++)
    Movers[ip]->startRun(nBlocks,false);
  Timer myclock;
  IndexType block = 0;
  IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps;
  do // block
  {
    Estimators->startBlock(nSteps);
    for(int ip=0; ip<NumThreads; ip++)
      Movers[ip]->startBlock(nSteps);
    IndexType step = 0;
    for(IndexType step=0; step< nSteps; ++step, CurrentStep+=BranchInterval)
    {
      #pragma omp parallel for
      for(int ip=0; ip<NumThreads; ++ip)
      {
        int now=CurrentStep;
        MCWalkerConfiguration::iterator
        wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
        for(int interval = 0; interval<BranchInterval-1; ++interval,++now)
          Movers[ip]->advanceWalkers(wit,wit_end,false);
        wClones[ip]->resetCollectables();
        Movers[ip]->advanceWalkers(wit,wit_end,false);
        Movers[ip]->setReleasedNodeMultiplicity(wit,wit_end);
        if(QMCDriverMode[QMC_UPDATE_MODE] && now%updatePeriod == 0)
          Movers[ip]->updateWalkers(wit, wit_end);
      }//#pragma omp parallel
      for(int ip=1; ip<NumThreads; ++ip)
      {
        for(int j=0; j<W.Collectables.size(); ++j)
          W.Collectables[j]+=wClones[ip]->Collectables[j];
      }
      branchEngine->branch(CurrentStep,W, branchClones);
      FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    }
//       #pragma omp parallel for
//       for(int ip=0; ip<NumThreads; ++ip)
//       {
//         MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
//         Movers[ip]->resetbuffers(wit, wit_end);
//       }
    Estimators->stopBlock(acceptRatio());
    block++;
    recordBlock(block);
  }
  while(block<nBlocks && myclock.elapsed()<MaxCPUSecs);
  //for(int ip=0; ip<NumThreads; ip++) Movers[ip]->stopRun();
  for(int ip=0; ip<NumThreads; ip++)
    *(RandomNumberControl::Children[ip])=*(Rng[ip]);
  Estimators->stop();
  return finalize(block);
}
예제 #2
0
  bool VMCSingleOMP::run() 
  { 
    resetRun();

    //start the main estimator
    Estimators->start(nBlocks);

#pragma omp parallel
    {
      int now=0;

#pragma omp for 
      for(int ip=0; ip<NumThreads; ++ip) 
        Movers[ip]->startRun(nBlocks,false);

      for(int block=0;block<nBlocks; ++block)
      {
#pragma omp for 
        for(int ip=0; ip<NumThreads; ++ip)
        {
          IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps;
          //assign the iterators and resuse them
          MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);

          Movers[ip]->startBlock(nSteps);
          int now_loc=now;
          for(int step=0; step<nSteps;++step)
          {
            Movers[ip]->advanceWalkers(wit,wit_end,false);
            Movers[ip]->accumulate(wit,wit_end);
            ++now_loc;
            if(now_loc%updatePeriod==0) Movers[ip]->updateWalkers(wit,wit_end);
            if(now_loc%myPeriod4WalkerDump==0) wClones[ip]->saveEnsemble(wit,wit_end);
          } 
          Movers[ip]->stopBlock();
        }//end-of-parallel for

        //increase now
        now+=nSteps;
#pragma omp master
        {
          CurrentStep+=nSteps;
          Estimators->stopBlock(estimatorClones);
          recordBlock(block+1);
        }//end of mater
      }//block
    }//end of parallel
    Estimators->stop(estimatorClones);

    //copy back the random states
    for(int ip=0; ip<NumThreads; ++ip) 
      *(RandomNumberControl::Children[ip])=*(Rng[ip]);

    //finalize a qmc section
    return finalize(nBlocks);
  }
예제 #3
0
 void WalkerControlBase::setWalkerID(MCWalkerConfiguration& walkers)
 {
   start(); //do the normal start
   MCWalkerConfiguration::iterator wit(walkers.begin());
   MCWalkerConfiguration::iterator wit_end(walkers.end());
   for(; wit != wit_end; ++wit)
   {
     if((*wit)->ID==0) 
     {
       (*wit)->ID=(++NumWalkersCreated)*NumContexts+MyContext;
       (*wit)->ParentID=(*wit)->ID;
     }
   }
 }
예제 #4
0
  void RNDMCOMP::resetUpdateEngines() 
  {

    ReportEngine PRE("RNDMCOMP","resetUpdateEngines");
 
    Timer init_timer;

    makeClones(W,Psi,H);

    if(Movers.empty()) 
    {
      //load walkers
      W.loadEnsemble();
      for(int ip=1;ip<NumThreads;++ip) wClones[ip]->loadEnsemble(W);

      if (useAlternate=="yes")
        branchEngine->initWalkerController(W,false,false);
        //branchEngine->initWalkerController(W,Tau,false,false);
      else
        branchEngine->initWalkerController(W,false,true);
        //branchEngine->initWalkerController(W,Tau,false,true);
      branchEngine->setRN(true);

      //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
      Movers.resize(NumThreads,0);
      branchClones.resize(NumThreads,0);
      Rng.resize(NumThreads,0);
      estimatorClones.resize(NumThreads,0);
      FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
      bool klw=(KillWalker=="yes");

      {//log file
        ostringstream o;
        o << "  Initial partition of walkers on a node: ";
        std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
        o << "\n";
        o << "Killing Walkers at nodes " << (useAlternate!="yes") <<endl;

        o << "Running the released node driver."<<endl;

        app_log() << o.str();
      }

#pragma omp parallel for
      for(int ip=0; ip<NumThreads; ++ip)
      {
        estimatorClones[ip]= new EstimatorManager(*Estimators);
        estimatorClones[ip]->setCollectionMode(false);

        Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
        hClones[ip]->setRandomGenerator(Rng[ip]);

        branchClones[ip] = new BranchEngineType(*branchEngine);
//         Movers[ip] = new RNDMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
        
        if (useAlternate=="yes")
          Movers[ip] = new RNDMCUpdatePbyPAlternate(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);  
        else
          Movers[ip] = new RNDMCUpdatePbyPCeperley(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
        Movers[ip]->put(qmcNode);
        Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);   
        MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
          Movers[ip]->initWalkersForPbyP(wit, wit_end);
        }
      } 
//     branchEngine->checkParameters(W);
    if(BranchInterval<0) BranchInterval=1;
    {
      ostringstream o;
      if (useAlternate=="yes") o << "  Using Alternate Mover"<<endl;
      else o << "  Using Ceperley Mover"<<endl;
      o << "  BranchInterval = " << BranchInterval << "\n";
      o << "  Steps per block = " << nSteps << "\n";
      o << "  Number of blocks = " << nBlocks << "\n";
      app_log() << endl << o.str() << endl;
    }
        
    app_log() << " RNDMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
  }
예제 #5
0
bool DMCOMPOPT::run()
{
  bool variablePop = (Reconfiguration == "no");
  resetUpdateEngines();
  //estimator does not need to collect data
  Estimators->setCollectionMode(true);
  Estimators->start(nBlocks);
  for(int ip=0; ip<NumThreads; ip++)
    Movers[ip]->startRun(nBlocks,false);
  Timer myclock;
  IndexType block = 0;
  IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps;
  int nsampls(0);
  int g_nsampls(0);
  do // block
  {
    Estimators->startBlock(nSteps);
    for(int ip=0; ip<NumThreads; ip++)
      Movers[ip]->startBlock(nSteps);
    IndexType step = 0;
    for(IndexType step=0; step< nSteps; ++step, CurrentStep+=BranchInterval)
    {
      #pragma omp parallel
      {
        int ip=omp_get_thread_num();
        int now=CurrentStep;
        MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_first(W.begin()+wPerNode[ip]), wit2(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);

        for(int interval = 0; interval<BranchInterval-1; ++interval,++now)
        {
          Movers[ip]->advanceWalkers(wit,wit_end,false);
          while(wit2!=wit_end)
          {
            (**wit2).addPropertyHistoryPoint(Eindx,(**wit2).getPropertyBase()[LOCALENERGY]);
            wit2++;
          }
          wit2=wit_first;
          if (Period4WalkerDump&&((now+1)%myPeriod4WalkerDump==0))
          {
            wClones[ip]->saveEnsemble(wit2,wit_end);
            #pragma omp master
            nsampls+=W.getActiveWalkers();
          }
        }

        wClones[ip]->resetCollectables();

        Movers[ip]->advanceWalkers(wit,wit_end,false);
        wit2=wit_first;
        while(wit2!=wit_end)
        {
          (**wit2).addPropertyHistoryPoint(Eindx,(**wit2).getPropertyBase()[LOCALENERGY]);
          wit2++;
        }
        wit2=wit_first;
        if (myPeriod4WalkerDump&&((now+1)%myPeriod4WalkerDump==0))
        {
          wClones[ip]->saveEnsemble(wit2,wit_end);
          #pragma omp master
          nsampls+=W.getActiveWalkers();
        }

        Movers[ip]->setMultiplicity(wit,wit_end);

        if(QMCDriverMode[QMC_UPDATE_MODE] && now%updatePeriod == 0)
          Movers[ip]->updateWalkers(wit, wit_end);

      }//#pragma omp parallel
    }
//       branchEngine->debugFWconfig();
    #pragma omp parallel for
    for (int ip=0; ip<NumThreads; ++ip)
    {
      MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
      while (wit!=wit_end)
      {
        Walker_t& thisWalker(**wit);
        Walker_t::Buffer_t& w_buffer(thisWalker.DataSet);
        wClones[ip]->loadWalker(thisWalker,true);
        psiClones[ip]->copyFromBuffer(*wClones[ip],w_buffer);
        vector<RealType> Dsaved(NumOptimizables,0);
        vector<RealType> HDsaved(NumOptimizables,0);
        psiClones[ip]->evaluateDerivatives(*wClones[ip],dummyOptVars[ip],Dsaved,HDsaved,true);//SH like deriv style
        #pragma omp critical
        fillComponentMatrices(Dsaved,HDsaved,thisWalker);
        wit++;
      }
    }
    branchEngine->branch(CurrentStep,W, branchClones);
    if(variablePop)
      FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    Estimators->stopBlock(acceptRatio());
    block++;
    recordBlock(block);
    g_nsampls=nsampls;
    myComm->allreduce(g_nsampls);
  }
  while(((block<nBlocks) || (g_nsampls<nTargetSamples)) && myclock.elapsed()<MaxCPUSecs);
  //for(int ip=0; ip<NumThreads; ip++) Movers[ip]->stopRun();
  for(int ip=0; ip<NumThreads; ip++)
    *(RandomNumberControl::Children[ip])=*(Rng[ip]);
  //       adding weight index
  MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end());
  while(wit!=wit_end)
  {
    (**wit).deletePropertyHistory();
    wit++;
  }
  Estimators->stop();
  return finalize(block);
}
예제 #6
0
void DMCOMPOPT::resetUpdateEngines()
{
  ReportEngine PRE("DMCOMPOPT","resetUpdateEngines");
  resetComponents(qmcNode);
  bool fixW = (Reconfiguration == "yes");
  Timer init_timer;
//     HACK HACK HACK
// This is so ugly it's probably a crime. It must be fixed.
  if(firsttime)
  {
//       for(int ip=1; ip<NumThreads; ++ip)
//       {
//         delete wClones[ip];
//         delete psiClones[ip];
//         delete hClones[ip];
//       }
//       wClones.resize(0);
    wClones.clear();
    psiClones.clear();
    hClones.clear();
    firsttime=false;
  }
  for (int ip=0; ip<NumThreads; ++ip)
  {
    opt_variables_type dummy;
    psiClones[ip]->checkInVariables(dummy);
    dummy.resetIndex();
    psiClones[ip]->checkOutVariables(dummy);
    dummyOptVars.push_back(dummy);
  }
  NumOptimizables=dummyOptVars[0].size();
  resizeForOpt(NumOptimizables);
  makeClones(W,Psi,H);
  if(Movers.empty())
  {
//       //load walkers
    Eindx = W.addPropertyHistory(wlen);
    W.loadEnsemble(wClones);
    for(int ip=1; ip<NumThreads; ++ip)
      wClones[ip]->addPropertyHistory(wlen);
//       m_param.put(qmcNode);
//       put(qmcNode);
//       //app_log()<<"DMCOMPOPT::resetComponents"<<endl;
//       Estimators->reset();
//       branchEngine->resetRun(qmcNode);
//       branchEngine->checkParameters(W);
    branchEngine->initWalkerController(W,fixW,false);
    //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
    Movers.resize(NumThreads,0);
    branchClones.resize(NumThreads,0);
    Rng.resize(NumThreads,0);
    estimatorClones.resize(NumThreads,0);
    FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    {
      //log file
      ostringstream o;
      o << "  Initial partition of walkers on a node: ";
      std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
      o << "\n";
      if(QMCDriverMode[QMC_UPDATE_MODE])
        o << "  Updates by particle-by-particle moves";
      else
        o << "  Updates by walker moves";
      if(UseFastGrad == "yes")
        o << " using fast gradient version ";
      else
        o << " using full-ratio version ";
      if(KillNodeCrossing)
        o << "\n  Walkers are killed when a node crossing is detected";
      else
        o << "\n  DMC moves are rejected when a node crossing is detected";
      app_log() << o.str() << endl;
    }
    #pragma omp parallel for
    for(int ip=0; ip<NumThreads; ++ip)
    {
      estimatorClones[ip]= new EstimatorManager(*Estimators);
      estimatorClones[ip]->setCollectionMode(false);
      Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
      hClones[ip]->setRandomGenerator(Rng[ip]);
      branchClones[ip] = new BranchEngineType(*branchEngine);
      if(QMCDriverMode[QMC_UPDATE_MODE])
      {
        if(UseFastGrad == "yes")
          Movers[ip] = new DMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        else
          Movers[ip] = new DMCUpdatePbyPWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
//           Movers[ip]->put(qmcNode);
//           Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
//           Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      }
      else
      {
        if(KillNodeCrossing)
          Movers[ip] = new DMCUpdateAllWithKill(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        else
          Movers[ip] = new DMCUpdateAllWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
//           Movers[ip]->put(qmcNode);
//           Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
//           Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      }
    }
//       #pragma omp parallel for
//       for (int ip=0; ip<NumThreads; ++ip)
//       {
//         MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
//         while (wit!=wit_end)
//         {
//           Walker_t& thisWalker(**wit);
//           Walker_t::Buffer_t& w_buffer(thisWalker.DataSet);
//           wClones[ip]->loadWalker(thisWalker,true);
//           psiClones[ip]->copyFromBuffer(*wClones[ip],w_buffer);
//           psiClones[ip]->updateBuffer(*wClones[ip],w_buffer,true);
//           wit++;
//         }
//       }
  }
  #pragma omp parallel for
  for(int ip=0; ip<NumThreads; ++ip)
  {
    if(QMCDriverMode[QMC_UPDATE_MODE])
    {
      Movers[ip]->put(qmcNode);
      Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
    }
    else
    {
      Movers[ip]->put(qmcNode);
      Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
    }
  }
  //       adding weight index
  MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end());
  (**wit).addPropertyHistory(wlen);
  wit++;
  while(wit!=wit_end)
  {
    (**wit).addPropertyHistory(wlen);
    wit++;
  }
  std::vector<IndexType> samples_th(omp_get_max_threads(),0);
  myPeriod4WalkerDump=(Period4WalkerDump>0)?Period4WalkerDump:(nBlocks+1)*nSteps;
  samples_this_node = nTargetSamples/myComm->size();
  if (nTargetSamples%myComm->size() > myComm->rank())
    samples_this_node+=1;
  int samples_each_thread = samples_this_node/omp_get_max_threads();
  for (int ip=0; ip<omp_get_max_threads(); ++ip)
    samples_th[ip]=samples_each_thread;
  if(samples_this_node%omp_get_max_threads())
    for (int ip=0; ip < samples_this_node%omp_get_max_threads(); ++ip)
      samples_th[ip] +=1;
  int ndumps = std::max(samples_this_node/W.getActiveWalkers() + 1,2);
  myPeriod4WalkerDump = nBlocks*nSteps/ndumps;
  app_log() << "  Samples are dumped every " << myPeriod4WalkerDump << " steps " << endl;
  app_log() << "  Total Sample Size =" << nTargetSamples << endl;
  app_log() << "  Nodes Sample Size =" << samples_this_node << endl;
  for (int ip=0; ip<NumThreads; ++ip)
    app_log()  << "    Sample size for thread " <<ip<<" = " << samples_th[ip] << endl;
  #pragma omp critical
  for(int ip=0; ip<NumThreads; ++ip)
  {
    wClones[ip]->clearEnsemble();
    wClones[ip]->setNumSamples(samples_th[ip]);
  }
  t= Movers[0]->getTau();
  clearComponentMatrices();
  branchEngine->checkParameters(W);
  int mxage=mover_MaxAge;
  if(fixW)
  {
    if(BranchInterval<0)
      BranchInterval=nSteps;
    mxage=(mover_MaxAge<0)?0:mover_MaxAge;
    for(int ip=0; ip<Movers.size(); ++ip)
      Movers[ip]->MaxAge=mxage;
  }
  else
  {
    if(BranchInterval<0)
      BranchInterval=1;
    int miage=(QMCDriverMode[QMC_UPDATE_MODE])?1:5;
    mxage=(mover_MaxAge<0)?miage:mover_MaxAge;
    for(int ip=0; ip<Movers.size(); ++ip)
      Movers[ip]->MaxAge=mxage;
  }
  {
    ostringstream o;
    if(fixW)
      o << "  Fixed population using reconfiguration method\n";
    else
      o << "  Fluctuating population\n";
    o << "  Persisent walkers are killed after " << mxage << " MC sweeps\n";
    o << "  BranchInterval = " << BranchInterval << "\n";
    o << "  Steps per block = " << nSteps << "\n";
    o << "  Number of blocks = " << nBlocks << "\n";
    app_log() << o.str() << endl;
  }
  app_log() << "  DMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
}
예제 #7
0
  bool DMCOMP::run() {

    bool variablePop = (Reconfiguration == "no");
    resetUpdateEngines();
    //estimator does not need to collect data
    Estimators->setCollectionMode(true);
    Estimators->start(nBlocks);
    for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startRun(nBlocks,false);

    Timer myclock;
    IndexType block = 0;
    IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps;

    do // block
    {
      Estimators->startBlock(nSteps);
      for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startBlock(nSteps);
      IndexType step = 0;

      for(IndexType step=0; step< nSteps; ++step, CurrentStep+=BranchInterval)
      {
        if(storeConfigs && (CurrentStep%storeConfigs == 0)) {
          ForwardWalkingHistory.storeConfigsForForwardWalking(W);
          W.resetWalkerParents();
        }
#pragma omp parallel for
        for(int ip=0; ip<NumThreads; ++ip)
        {
          int now=CurrentStep;
          MCWalkerConfiguration::iterator 
            wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);

          for(int interval = 0;interval<BranchInterval-1; ++interval,++now)
            Movers[ip]->advanceWalkers(wit,wit_end,false);

          wClones[ip]->resetCollectables();
          Movers[ip]->advanceWalkers(wit,wit_end,false);

          Movers[ip]->setMultiplicity(wit,wit_end);

          if(QMCDriverMode[QMC_UPDATE_MODE] && now%updatePeriod == 0) Movers[ip]->updateWalkers(wit, wit_end);
        }//#pragma omp parallel
        
        Estimators->accumulateCollectables(wClones,1.0);
        branchEngine->branch(CurrentStep,W, branchClones);
//         if(storeConfigs && (CurrentStep%storeConfigs == 0)) {
//           ForwardWalkingHistory.storeConfigsForForwardWalking(W);
//           W.resetWalkerParents();
//         }
        if(variablePop) FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
      } 
//       branchEngine->debugFWconfig();
      

      Estimators->stopBlock(acceptRatio());
      block++;
      recordBlock(block);

    } while(block<nBlocks && myclock.elapsed()<MaxCPUSecs);

    //for(int ip=0; ip<NumThreads; ip++) Movers[ip]->stopRun();
    for(int ip=0; ip<NumThreads; ip++) 
      *(RandomNumberControl::Children[ip])=*(Rng[ip]);

    Estimators->stop();
    return finalize(block);
  }
예제 #8
0
파일: WFMCSingleOMP.cpp 프로젝트: jyamu/qmc
bool WFMCSingleOMP::run()
{
  resetRun();
  //start the main estimator
  Estimators->start(nBlocks);
  ///Load a single walkers position into the walker.
  MCWalkerConfiguration Keeper(W);
  Keeper.createWalkers(W.begin(),W.end());
  MCWalkerConfiguration::iterator Kit=(Keeper.begin()), Kit_end(Keeper.end());
  int block=0;
  while ((Kit!=Kit_end)&&(nBlocks>block))
  {
    MCWalkerConfiguration::iterator Wit(W.begin()), Wit_end(W.end());
    while ((Wit!=Wit_end))
    {
      (*Wit)->R=(*Kit)->R;
      (*Wit)->G=(*Kit)->G;
      (*Wit)->L=(*Kit)->L;
      //(*Wit)->Drift=(*Kit)->Drift;
      (*Wit)->reset();
      (*Wit)->resetPropertyHistory();
      ++Wit;
    }
    #pragma omp parallel for
    for (int ip=0; ip<NumThreads; ++ip)
      Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
    Wit=W.begin();
    while ((Wit!=Wit_end))
    {
      //       app_log()<<std::exp((*Wit)->Properties(LOGPSI))<<endl;
      (*Wit)->PropertyHistory[0][0]=std::exp((*Wit)->Properties(LOGPSI));
      ++Wit;
    }
    #pragma omp parallel
    {
      #pragma omp for
      for (int ip=0; ip<NumThreads; ++ip)
        Movers[ip]->startRun(nBlocks,false);
      #pragma omp for
      for (int ip=0; ip<NumThreads; ++ip)
      {
        //assign the iterators and resuse them
        MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
        Movers[ip]->startBlock(nSteps);
        for (int step=0; step<nSteps; ++step)
        {
          Movers[ip]->advanceWalkers(wit,wit_end,false);
          //      Movers[ip]->updateWalkers(wit,wit_end);
          //      wClones[ip]->saveEnsemble(wit,wit_end);
        }
        Movers[ip]->accumulate(wit,wit_end);
        Movers[ip]->stopBlock();
      }
      #pragma omp master
      {
        Estimators->stopBlock(estimatorClones);
        recordBlock(block+1);
        block++;
        ++Kit;
      }
    }
  }//end of parallel
  Estimators->stop(estimatorClones);
  //copy back the random states
  for (int ip=0; ip<NumThreads; ++ip)
    *(RandomNumberControl::Children[ip])=*(Rng[ip]);
  //finalize a qmc section
  return finalize(nBlocks);
}
예제 #9
0
파일: RNDMCOMP.cpp 프로젝트: jyamu/qmc
void RNDMCOMP::resetUpdateEngines()
{
  ReportEngine PRE("RNDMCOMP","resetUpdateEngines");
  Timer init_timer;
  makeClones(W,Psi,H);
  makeClones(W,Guide);
  if(Movers.empty())
  {
    W.loadEnsemble(wClones);
    branchEngine->initWalkerController(W,false,true);
    branchEngine->setRN(true);
    //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
    Movers.resize(NumThreads,0);
    branchClones.resize(NumThreads,0);
    Rng.resize(NumThreads,0);
    estimatorClones.resize(NumThreads,0);
    FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    bool klw=(KillWalker=="yes");
    {
      //log file
      ostringstream o;
      o << "  Initial partition of walkers on a node: ";
      std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
      o << "\n";
      o << "Killing Walkers at nodes " << (useAlternate!="yes") <<endl;
      o << "Running the released node driver."<<endl;
      app_log() << o.str();
    }
    #pragma omp parallel for
    for(int ip=0; ip<NumThreads; ++ip)
    {
      estimatorClones[ip]= new EstimatorManager(*Estimators);
      estimatorClones[ip]->setCollectionMode(false);
      Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
      hClones[ip]->setRandomGenerator(Rng[ip]);
      branchClones[ip] = new BranchEngineType(*branchEngine);
//         Movers[ip] = new RNDMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
      Movers[ip] = new RNDMCUpdatePbyPFast(*wClones[ip],*wgClones[ip],*psiClones[ip],*guideClones[ip],*hClones[ip],*Rng[ip]);
      Movers[ip]->put(qmcNode);
      Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
      Movers[ip]->initWalkersForPbyP(wit, wit_end);
    }
  }
//     branchEngine->checkParameters(W);
  if(BranchInterval<0)
    BranchInterval=1;
  {
    ostringstream o;
//       if (useAlternate=="yes") o << "  Using Alternate Mover"<<endl;
//       else o << "  Using Ceperley Mover"<<endl;
    o << "  BranchInterval = " << BranchInterval << "\n";
    o << "  Steps per block = " << nSteps << "\n";
    o << "  Number of blocks = " << nBlocks << "\n";
    app_log() << endl << o.str() << endl;
  }
//     RealType avg_w(0);
//     RealType n_w(0);
//     RealType logepsilon(0);
// #pragma omp parallel
//       {
//         int ip=omp_get_thread_num();
//
//         for (int step=0; step<nWarmupSteps; ++step)
//         {
//           avg_w=0;
//           n_w=0;
//           for (int prestep=0; prestep<myRNWarmupSteps; ++prestep)
//           {
//             Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],true);
//             #pragma omp single
//             {
//               MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end());
//               while (wit!=wit_end)
//               {
//                 avg_w += (*wit)->Weight;
//                 n_w +=1;
//                 wit++;
//               }
//             }
//             #pragma omp barrier
//            }
//            #pragma omp single
//            {
//              avg_w *= 1.0/n_w;
//              RealType w_m = avg_w/(1.0-avg_w);
//              w_m = std::log(0.5+0.5*w_m);
//              if (std::abs(w_m)>0.01)
//                logepsilon += w_m;
//            }
//            #pragma omp barrier
//            Movers[ip]->setLogEpsilon(logepsilon);
//         }
//
//       }
  app_log() << " RNDMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
}
예제 #10
0
파일: VMCSingleOMP.cpp 프로젝트: jyamu/qmc
bool VMCSingleOMP::run()
{
  resetRun();
  //start the main estimator
  Estimators->start(nBlocks);
  for (int ip=0; ip<NumThreads; ++ip)
    Movers[ip]->startRun(nBlocks,false);
  Traces->startRun(nBlocks,traceClones);
  const bool has_collectables=W.Collectables.size();
  ADIOS_PROFILE::profile_adios_init(nBlocks);
  for (int block=0; block<nBlocks; ++block)
  {
    ADIOS_PROFILE::profile_adios_start_comp(block);
    #pragma omp parallel
    {
      int ip=omp_get_thread_num();
      //IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps;
      IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:0;
      //assign the iterators and resuse them
      MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
      Movers[ip]->startBlock(nSteps);
      int now_loc=CurrentStep;
      RealType cnorm=1.0/static_cast<RealType>(wPerNode[ip+1]-wPerNode[ip]);
      for (int step=0; step<nSteps; ++step)
      {
        Movers[ip]->set_step(now_loc);
        //collectables are reset, it is accumulated while advancing walkers
        wClones[ip]->resetCollectables();
        Movers[ip]->advanceWalkers(wit,wit_end,false);
        if(has_collectables)
          wClones[ip]->Collectables *= cnorm;
        Movers[ip]->accumulate(wit,wit_end);
        ++now_loc;
        //if (updatePeriod&& now_loc%updatePeriod==0) Movers[ip]->updateWalkers(wit,wit_end);
        if (Period4WalkerDump&& now_loc%Period4WalkerDump==0)
          wClones[ip]->saveEnsemble(wit,wit_end);
//           if(storeConfigs && (now_loc%storeConfigs == 0))
//             ForwardWalkingHistory.storeConfigsForForwardWalking(*wClones[ip]);
      }
      Movers[ip]->stopBlock(false);
    }//end-of-parallel for
    //Estimators->accumulateCollectables(wClones,nSteps);
    CurrentStep+=nSteps;
    Estimators->stopBlock(estimatorClones);
    ADIOS_PROFILE::profile_adios_end_comp(block);
    ADIOS_PROFILE::profile_adios_start_trace(block);
    Traces->write_buffers(traceClones, block);
    ADIOS_PROFILE::profile_adios_end_trace(block);
    ADIOS_PROFILE::profile_adios_start_checkpoint(block);
    if(storeConfigs)
      recordBlock(block);
    ADIOS_PROFILE::profile_adios_end_checkpoint(block);
  }//block
  ADIOS_PROFILE::profile_adios_finalize(myComm, nBlocks);
  Estimators->stop(estimatorClones);
  for (int ip=0; ip<NumThreads; ++ip)
    Movers[ip]->stopRun2();
  Traces->stopRun();
  //copy back the random states
  for (int ip=0; ip<NumThreads; ++ip)
    *(RandomNumberControl::Children[ip])=*(Rng[ip]);
  ///write samples to a file
  bool wrotesamples=DumpConfig;
  if(DumpConfig)
  {
    wrotesamples=W.dumpEnsemble(wClones,wOut,myComm->size(),nBlocks);
    if(wrotesamples)
      app_log() << "  samples are written to the config.h5" << endl;
  }
  //finalize a qmc section
  return finalize(nBlocks,!wrotesamples);
}