void mmTestSet(char *setName, int mapSize, int cloneSize, int cloneCount, double finRatio,
	double endRatio, double repRatio, double noiseRatio)
/* mmTestSet - Make test set for map-mender.. */
{
char fileName[512], answerName[512];
int *repMap;
struct clone *clones, *cloneList = NULL;
FILE *f;

//srand( (unsigned)time( NULL ) );
srand( 1235 );
sprintf(fileName, "%s.repMap", setName);
repMap = makeRepMap(mapSize, repRatio, fileName);
printf("Made repeat map of %d kb in %s\n", mapSize, fileName);

/* Make clone list. */
sprintf(fileName, "%s.clones", setName);
clones = makeClones(mapSize, cloneSize, cloneCount, finRatio, endRatio, repMap, fileName);
cloneList = cloneListFromArray(clones, cloneCount);
printf("Made %d clones in %s\n", cloneCount, fileName);

/* Make up overlap info. */
sprintf(fileName, "%s.overlap", setName);
makeOverlaps(repMap, cloneList, fileName, noiseRatio);

/* Write out answer. */
slSort(&cloneList, cloneCmpStart);
sprintf(fileName, "%s.answer", setName);
writeAnswer(cloneList, fileName);
}
Exemple #2
0
  bool FWSingleOMP::run() 
  {
    hdf_WGT_data.setFileName(xmlrootName);
    hdf_OBS_data.setFileName(xmlrootName);

    if (doWeights==1)
    {
      fillIDMatrix();        
      hdf_WGT_data.makeFile();
      hdf_WGT_data.openFile();
      hdf_WGT_data.addFW(0);
      for (int i=0;i<Weights.size();i++) hdf_WGT_data.addStep(i,Weights[i]);
      hdf_WGT_data.closeFW();
      hdf_WGT_data.closeFile();
      for(int ill=1;ill<weightLength;ill++)
      {
        transferParentsOneGeneration();
        FWOneStep();
        //       WeightHistory.push_back(Weights);
        hdf_WGT_data.openFile();
        hdf_WGT_data.addFW(ill);
        for (int i=0;i<Weights.size();i++) hdf_WGT_data.addStep(i,Weights[i]);
        hdf_WGT_data.closeFW();
        hdf_WGT_data.closeFile();
      }
    }
    else
    {
      fillIDMatrix();
      //           find weight length from the weight file
      hid_t f_file = H5Fopen(hdf_WGT_data.getFileName().c_str(),H5F_ACC_RDONLY,H5P_DEFAULT);
      hsize_t numGrps = 0;
      H5Gget_num_objs(f_file, &numGrps);
      weightLength = static_cast<int> (numGrps)-1;
      if (H5Fclose(f_file)>-1) f_file=-1;
      if (verbose>0) app_log()<<" weightLength "<<weightLength<<endl;
    }
    if (verbose>0) app_log()<<" Done Computing Weights"<<endl;


    if (doObservables==1)
    {
      int nprops = H.sizeOfObservables();//local energy, local potnetial and all hamiltonian elements
      int FirstHamiltonian = H.startIndex();
      //     vector<vector<vector<RealType> > > savedValues;

      int nelectrons = W[0]->R.size();
      int nfloats=OHMMS_DIM*nelectrons;


      //     W.clearEnsemble();
      makeClones(W,Psi,H);

      vector<ForwardWalkingData* > FWvector;
      for(int ip=0; ip<NumThreads; ip++) FWvector.push_back(new ForwardWalkingData(nelectrons));


      if (myComm->rank()==0) hdf_OBS_data.makeFile();
      hdf_float_data.openFile(fname.str());

      for(int step=0;step<numSteps;step++)
      {
        hdf_float_data.setStep(step);


        vector<RealType> stepObservables(walkersPerBlock[step]*(nprops+2), 0);
        for(int wstep=0; wstep<walkersPerBlock[step];)
        {
          vector<float> ThreadsCoordinate(NumThreads*nfloats);
          int nwalkthread = hdf_float_data.getFloat(wstep*nfloats, (wstep+NumThreads)*nfloats, ThreadsCoordinate) / nfloats;
          //         for(int j=0;j<ThreadsCoordinate.size();j++)cout<<ThreadsCoordinate[j]<<" ";
          //         cout<<endl;
#pragma omp parallel for
          for(int ip=0; ip<nwalkthread; ip++) 
          {
            vector<float> SINGLEcoordinate(0);
            vector<float>::iterator TCB1(ThreadsCoordinate.begin()+ip*nfloats), TCB2(ThreadsCoordinate.begin()+(1+ip)*nfloats);

            SINGLEcoordinate.insert(SINGLEcoordinate.begin(),TCB1,TCB2);
            FWvector[ip]->fromFloat(SINGLEcoordinate);
            wClones[ip]->R=FWvector[ip]->Pos;
            wClones[ip]->update();
            RealType logpsi(psiClones[ip]->evaluateLog(*wClones[ip]));
            RealType eloc=hClones[ip]->evaluate( *wClones[ip] );
            hClones[ip]->auxHevaluate(*wClones[ip]);
            int indx=(wstep+ip)*(nprops+2);
            stepObservables[indx]= eloc;
            stepObservables[indx+1]= hClones[ip]->getLocalPotential();
            for(int i=0;i<nprops;i++) stepObservables[indx+i+2] = hClones[ip]->getObservable(i) ;
          }
          wstep+=nwalkthread;
          for(int ip=0; ip<NumThreads; ip++)  wClones[ip]->resetCollectables();
        }
        hdf_OBS_data.openFile();
        hdf_OBS_data.addStep(step, stepObservables);
        hdf_OBS_data.closeFile();
        //       savedValues.push_back(stepObservables);
        hdf_float_data.endStep();
        if (verbose >1) cout<<"Done with step: "<<step<<endl;
      }
    }


    if(doDat>=1)
    {
      vector<int> Dimensions(4);
      hdf_WGT_data.openFile();
      hdf_OBS_data.openFile();
      Estimators->start(weightLength,1);
      int nprops;
      if (doObservables==1) nprops = H.sizeOfObservables()+2;
      else
      {
        int Noo = hdf_OBS_data.numObsStep(0);
        int Nwl = hdf_WGT_data.numWgtStep(0);
        nprops = Noo/Nwl;
      }
      for(int ill=0;ill<weightLength;ill++)
      {    
        Dimensions[0]=ill;
        Dimensions[1]= nprops ;
        Dimensions[2]=numSteps;
        Dimensions[3]=startStep;
        Estimators->startBlock(1);
        Estimators->accumulate(hdf_OBS_data,hdf_WGT_data,Dimensions);
        Estimators->stopBlock(getNumberOfSamples(ill));
      }
      hdf_OBS_data.closeFile();
      hdf_WGT_data.closeFile();
      Estimators->stop();
    }
    return true;
  }
Exemple #3
0
void DMCOMP::resetUpdateEngines()
{
  bool fixW = (Reconfiguration == "yes");
  makeClones(W,Psi,H);
  if(Movers.empty())
  {
    branchEngine->initWalkerController(Tau,fixW);
    //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
    Movers.resize(NumThreads,0);
    branchClones.resize(NumThreads,0);
    estimatorClones.resize(NumThreads,0);
    Rng.resize(NumThreads,0);
    FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    app_log() << "  Initial partition of walkers ";
    std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(app_log()," "));
    app_log() << endl;
    #pragma omp parallel
    {
      int ip = omp_get_thread_num();
      if(ip)
        hClones[ip]->add2WalkerProperty(*wClones[ip]);
      estimatorClones[ip]= new ScalarEstimatorManager(*Estimators,*hClones[ip]);
      Rng[ip]=new RandomGenerator_t();
      Rng[ip]->init(ip,NumThreads,-1);
      hClones[ip]->setRandomGenerator(Rng[ip]);
      branchClones[ip] = new BranchEngineType(*branchEngine);
      if(QMCDriverMode[QMC_UPDATE_MODE])
      {
        if(NonLocalMove == "yes")
        {
          DMCNonLocalUpdatePbyP* nlocMover=
            new DMCNonLocalUpdatePbyP(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
          nlocMover->put(qmcNode);
          Movers[ip]=nlocMover;
        }
        else
        {
          Movers[ip]= new DMCUpdatePbyPWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        }
        Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
        //Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      }
      else
      {
        if(NonLocalMove == "yes")
        {
          app_log() << "  Non-local update is used." << endl;
          DMCNonLocalUpdate* nlocMover= new DMCNonLocalUpdate(W,Psi,H,Random);
          nlocMover->put(qmcNode);
          Movers[ip]=nlocMover;
        }
        else
        {
          if(KillNodeCrossing)
          {
            Movers[ip] = new DMCUpdateAllWithKill(W,Psi,H,Random);
          }
          else
          {
            Movers[ip] = new DMCUpdateAllWithRejection(W,Psi,H,Random);
          }
        }
        Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
        //Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      }
    }
  }
  if(fixW)
  {
    if(QMCDriverMode[QMC_UPDATE_MODE])
      app_log() << "  DMCOMP PbyP Update with reconfigurations" << endl;
    else
      app_log() << "  DMCOMP walker Update with reconfigurations" << endl;
    for(int ip=0; ip<Movers.size(); ip++)
      Movers[ip]->MaxAge=0;
    if(BranchInterval<0)
    {
      BranchInterval=nSteps;
    }
  }
  else
  {
    if(QMCDriverMode[QMC_UPDATE_MODE])
    {
      app_log() << "  DMCOMP PbyP Update with a fluctuating population" << endl;
      for(int ip=0; ip<Movers.size(); ip++)
        Movers[ip]->MaxAge=1;
    }
    else
    {
      app_log() << "  DMCOMP walker Update with a fluctuating population" << endl;
      for(int ip=0; ip<Movers.size(); ip++)
        Movers[ip]->MaxAge=3;
    }
    if(BranchInterval<0)
      BranchInterval=1;
  }
  app_log() << "  BranchInterval = " << BranchInterval << endl;
  app_log() << "  Steps per block = " << nSteps << endl;
  app_log() << "  Number of blocks = " << nBlocks << endl;
}
Exemple #4
0
  void RNDMCOMP::resetUpdateEngines() 
  {

    ReportEngine PRE("RNDMCOMP","resetUpdateEngines");
 
    Timer init_timer;

    makeClones(W,Psi,H);

    if(Movers.empty()) 
    {
      //load walkers
      W.loadEnsemble();
      for(int ip=1;ip<NumThreads;++ip) wClones[ip]->loadEnsemble(W);

      if (useAlternate=="yes")
        branchEngine->initWalkerController(W,false,false);
        //branchEngine->initWalkerController(W,Tau,false,false);
      else
        branchEngine->initWalkerController(W,false,true);
        //branchEngine->initWalkerController(W,Tau,false,true);
      branchEngine->setRN(true);

      //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
      Movers.resize(NumThreads,0);
      branchClones.resize(NumThreads,0);
      Rng.resize(NumThreads,0);
      estimatorClones.resize(NumThreads,0);
      FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
      bool klw=(KillWalker=="yes");

      {//log file
        ostringstream o;
        o << "  Initial partition of walkers on a node: ";
        std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
        o << "\n";
        o << "Killing Walkers at nodes " << (useAlternate!="yes") <<endl;

        o << "Running the released node driver."<<endl;

        app_log() << o.str();
      }

#pragma omp parallel for
      for(int ip=0; ip<NumThreads; ++ip)
      {
        estimatorClones[ip]= new EstimatorManager(*Estimators);
        estimatorClones[ip]->setCollectionMode(false);

        Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
        hClones[ip]->setRandomGenerator(Rng[ip]);

        branchClones[ip] = new BranchEngineType(*branchEngine);
//         Movers[ip] = new RNDMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
        
        if (useAlternate=="yes")
          Movers[ip] = new RNDMCUpdatePbyPAlternate(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);  
        else
          Movers[ip] = new RNDMCUpdatePbyPCeperley(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
        Movers[ip]->put(qmcNode);
        Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);   
        MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
          Movers[ip]->initWalkersForPbyP(wit, wit_end);
        }
      } 
//     branchEngine->checkParameters(W);
    if(BranchInterval<0) BranchInterval=1;
    {
      ostringstream o;
      if (useAlternate=="yes") o << "  Using Alternate Mover"<<endl;
      else o << "  Using Ceperley Mover"<<endl;
      o << "  BranchInterval = " << BranchInterval << "\n";
      o << "  Steps per block = " << nSteps << "\n";
      o << "  Number of blocks = " << nBlocks << "\n";
      app_log() << endl << o.str() << endl;
    }
        
    app_log() << " RNDMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
  }
Exemple #5
0
void DMCOMPOPT::resetUpdateEngines()
{
  ReportEngine PRE("DMCOMPOPT","resetUpdateEngines");
  resetComponents(qmcNode);
  bool fixW = (Reconfiguration == "yes");
  Timer init_timer;
//     HACK HACK HACK
// This is so ugly it's probably a crime. It must be fixed.
  if(firsttime)
  {
//       for(int ip=1; ip<NumThreads; ++ip)
//       {
//         delete wClones[ip];
//         delete psiClones[ip];
//         delete hClones[ip];
//       }
//       wClones.resize(0);
    wClones.clear();
    psiClones.clear();
    hClones.clear();
    firsttime=false;
  }
  for (int ip=0; ip<NumThreads; ++ip)
  {
    opt_variables_type dummy;
    psiClones[ip]->checkInVariables(dummy);
    dummy.resetIndex();
    psiClones[ip]->checkOutVariables(dummy);
    dummyOptVars.push_back(dummy);
  }
  NumOptimizables=dummyOptVars[0].size();
  resizeForOpt(NumOptimizables);
  makeClones(W,Psi,H);
  if(Movers.empty())
  {
//       //load walkers
    Eindx = W.addPropertyHistory(wlen);
    W.loadEnsemble(wClones);
    for(int ip=1; ip<NumThreads; ++ip)
      wClones[ip]->addPropertyHistory(wlen);
//       m_param.put(qmcNode);
//       put(qmcNode);
//       //app_log()<<"DMCOMPOPT::resetComponents"<<endl;
//       Estimators->reset();
//       branchEngine->resetRun(qmcNode);
//       branchEngine->checkParameters(W);
    branchEngine->initWalkerController(W,fixW,false);
    //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
    Movers.resize(NumThreads,0);
    branchClones.resize(NumThreads,0);
    Rng.resize(NumThreads,0);
    estimatorClones.resize(NumThreads,0);
    FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    {
      //log file
      ostringstream o;
      o << "  Initial partition of walkers on a node: ";
      std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
      o << "\n";
      if(QMCDriverMode[QMC_UPDATE_MODE])
        o << "  Updates by particle-by-particle moves";
      else
        o << "  Updates by walker moves";
      if(UseFastGrad == "yes")
        o << " using fast gradient version ";
      else
        o << " using full-ratio version ";
      if(KillNodeCrossing)
        o << "\n  Walkers are killed when a node crossing is detected";
      else
        o << "\n  DMC moves are rejected when a node crossing is detected";
      app_log() << o.str() << endl;
    }
    #pragma omp parallel for
    for(int ip=0; ip<NumThreads; ++ip)
    {
      estimatorClones[ip]= new EstimatorManager(*Estimators);
      estimatorClones[ip]->setCollectionMode(false);
      Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
      hClones[ip]->setRandomGenerator(Rng[ip]);
      branchClones[ip] = new BranchEngineType(*branchEngine);
      if(QMCDriverMode[QMC_UPDATE_MODE])
      {
        if(UseFastGrad == "yes")
          Movers[ip] = new DMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        else
          Movers[ip] = new DMCUpdatePbyPWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
//           Movers[ip]->put(qmcNode);
//           Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
//           Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      }
      else
      {
        if(KillNodeCrossing)
          Movers[ip] = new DMCUpdateAllWithKill(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        else
          Movers[ip] = new DMCUpdateAllWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
//           Movers[ip]->put(qmcNode);
//           Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
//           Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      }
    }
//       #pragma omp parallel for
//       for (int ip=0; ip<NumThreads; ++ip)
//       {
//         MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
//         while (wit!=wit_end)
//         {
//           Walker_t& thisWalker(**wit);
//           Walker_t::Buffer_t& w_buffer(thisWalker.DataSet);
//           wClones[ip]->loadWalker(thisWalker,true);
//           psiClones[ip]->copyFromBuffer(*wClones[ip],w_buffer);
//           psiClones[ip]->updateBuffer(*wClones[ip],w_buffer,true);
//           wit++;
//         }
//       }
  }
  #pragma omp parallel for
  for(int ip=0; ip<NumThreads; ++ip)
  {
    if(QMCDriverMode[QMC_UPDATE_MODE])
    {
      Movers[ip]->put(qmcNode);
      Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
    }
    else
    {
      Movers[ip]->put(qmcNode);
      Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
    }
  }
  //       adding weight index
  MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end());
  (**wit).addPropertyHistory(wlen);
  wit++;
  while(wit!=wit_end)
  {
    (**wit).addPropertyHistory(wlen);
    wit++;
  }
  std::vector<IndexType> samples_th(omp_get_max_threads(),0);
  myPeriod4WalkerDump=(Period4WalkerDump>0)?Period4WalkerDump:(nBlocks+1)*nSteps;
  samples_this_node = nTargetSamples/myComm->size();
  if (nTargetSamples%myComm->size() > myComm->rank())
    samples_this_node+=1;
  int samples_each_thread = samples_this_node/omp_get_max_threads();
  for (int ip=0; ip<omp_get_max_threads(); ++ip)
    samples_th[ip]=samples_each_thread;
  if(samples_this_node%omp_get_max_threads())
    for (int ip=0; ip < samples_this_node%omp_get_max_threads(); ++ip)
      samples_th[ip] +=1;
  int ndumps = std::max(samples_this_node/W.getActiveWalkers() + 1,2);
  myPeriod4WalkerDump = nBlocks*nSteps/ndumps;
  app_log() << "  Samples are dumped every " << myPeriod4WalkerDump << " steps " << endl;
  app_log() << "  Total Sample Size =" << nTargetSamples << endl;
  app_log() << "  Nodes Sample Size =" << samples_this_node << endl;
  for (int ip=0; ip<NumThreads; ++ip)
    app_log()  << "    Sample size for thread " <<ip<<" = " << samples_th[ip] << endl;
  #pragma omp critical
  for(int ip=0; ip<NumThreads; ++ip)
  {
    wClones[ip]->clearEnsemble();
    wClones[ip]->setNumSamples(samples_th[ip]);
  }
  t= Movers[0]->getTau();
  clearComponentMatrices();
  branchEngine->checkParameters(W);
  int mxage=mover_MaxAge;
  if(fixW)
  {
    if(BranchInterval<0)
      BranchInterval=nSteps;
    mxage=(mover_MaxAge<0)?0:mover_MaxAge;
    for(int ip=0; ip<Movers.size(); ++ip)
      Movers[ip]->MaxAge=mxage;
  }
  else
  {
    if(BranchInterval<0)
      BranchInterval=1;
    int miage=(QMCDriverMode[QMC_UPDATE_MODE])?1:5;
    mxage=(mover_MaxAge<0)?miage:mover_MaxAge;
    for(int ip=0; ip<Movers.size(); ++ip)
      Movers[ip]->MaxAge=mxage;
  }
  {
    ostringstream o;
    if(fixW)
      o << "  Fixed population using reconfiguration method\n";
    else
      o << "  Fluctuating population\n";
    o << "  Persisent walkers are killed after " << mxage << " MC sweeps\n";
    o << "  BranchInterval = " << BranchInterval << "\n";
    o << "  Steps per block = " << nSteps << "\n";
    o << "  Number of blocks = " << nBlocks << "\n";
    app_log() << o.str() << endl;
  }
  app_log() << "  DMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
}
Exemple #6
0
  void DMCOMP::resetUpdateEngines() 
  {

    ReportEngine PRE("DMCOMP","resetUpdateEngines");

    bool fixW = (Reconfiguration == "yes");
    
    Timer init_timer;

    makeClones(W,Psi,H);

    if(Movers.empty()) 
    {
      //load walkers
      W.loadEnsemble();
      for(int ip=1;ip<NumThreads;++ip) wClones[ip]->loadEnsemble(W);

      branchEngine->initWalkerController(W,fixW,false);

      //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
      Movers.resize(NumThreads,0);
      branchClones.resize(NumThreads,0);
      Rng.resize(NumThreads,0);
      estimatorClones.resize(NumThreads,0);
      FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);

      {//log file
        ostringstream o;
        o << "  Initial partition of walkers on a node: ";
        std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
        o << "\n";

        if(QMCDriverMode[QMC_UPDATE_MODE]) 
          o << "  Updates by particle-by-particle moves";
        else 
          o << "  Updates by walker moves";

        if(UseFastGrad == "yes") 
          o << " using fast gradient version ";
        else 
          o << " using full-ratio version ";

        if(KillNodeCrossing)  
          o << "\n  Walkers are killed when a node crossing is detected";
        else
          o << "\n  DMC moves are rejected when a node crossing is detected";

        app_log() << o.str() << endl;
      }

#pragma omp parallel for
      for(int ip=0; ip<NumThreads; ++ip)
      {
        estimatorClones[ip]= new EstimatorManager(*Estimators);
        estimatorClones[ip]->setCollectionMode(false);

        Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
        hClones[ip]->setRandomGenerator(Rng[ip]);

        branchClones[ip] = new BranchEngineType(*branchEngine);
        if(QMCDriverMode[QMC_UPDATE_MODE])
        {
          if(UseFastGrad == "yes")
            Movers[ip] = new DMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
          else
            Movers[ip] = new DMCUpdatePbyPWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
          Movers[ip]->put(qmcNode);
          Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
          Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
        }
        else
        {
          if(KillNodeCrossing) 
            Movers[ip] = new DMCUpdateAllWithKill(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
          else 
            Movers[ip] = new DMCUpdateAllWithRejection(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
          Movers[ip]->put(qmcNode);
          Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
          Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
        }
      }
    }

    branchEngine->checkParameters(W);

    int mxage=mover_MaxAge;
    if(fixW) 
    {
      if(BranchInterval<0) BranchInterval=nSteps;
      mxage=(mover_MaxAge<0)?0:mover_MaxAge;
      for(int ip=0; ip<Movers.size(); ++ip) Movers[ip]->MaxAge=mxage;
    } 
    else 
    {
      if(BranchInterval<0) BranchInterval=1;
      int miage=(QMCDriverMode[QMC_UPDATE_MODE])?1:5;
      mxage=(mover_MaxAge<0)?miage:mover_MaxAge;
      for(int ip=0; ip<Movers.size(); ++ip) Movers[ip]->MaxAge=mxage;
    }

    {
      ostringstream o;
      if(fixW)
        o << "  Fixed population using reconfiguration method\n";
      else
        o << "  Fluctuating population\n";
      o << "  Persisent walkers are killed after " << mxage << " MC sweeps\n";
      o << "  BranchInterval = " << BranchInterval << "\n";
      o << "  Steps per block = " << nSteps << "\n";
      o << "  Number of blocks = " << nBlocks << "\n";
      app_log() << o.str() << endl;
    }
    app_log() << "  DMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
  }
Exemple #7
0
  void VMCSingleOMP::resetRun() 
  {

    makeClones(W,Psi,H);

    //determine dump period for walkers
    int samples_tot=W.getActiveWalkers()*nBlocks*nSteps*myComm->size();
    myPeriod4WalkerDump=(nTargetSamples>0)?samples_tot/nTargetSamples:Period4WalkerDump;
    //fall back to the default
    if(myPeriod4WalkerDump==0) myPeriod4WalkerDump=Period4WalkerDump;
    if(QMCDriverMode[QMC_WARMUP]) myPeriod4WalkerDump=nBlocks*nSteps;
    int samples_th=nTargetSamples/myComm->size()/NumThreads;
    for(int ip=0; ip<NumThreads;++ip)
    {
      wClones[ip]->clearEnsemble();
      wClones[ip]->setNumSamples(samples_th);
    }
    app_log() << "  Samples are dumped at every " << myPeriod4WalkerDump << " step " << endl;
    app_log() << "  Total Sample Size =" << nTargetSamples
      << "\n  Sample size per node per thread = " << samples_th << endl;
    app_log() << "  Warmup Steps " << myWarmupSteps << endl;

    if(Movers.empty()) 
    {
      Movers.resize(NumThreads,0);
      branchClones.resize(NumThreads,0);
      estimatorClones.resize(NumThreads,0);
      Rng.resize(NumThreads,0);
      int nwtot=(W.getActiveWalkers()/NumThreads)*NumThreads;
      FairDivideLow(nwtot,NumThreads,wPerNode);

      app_log() << "  Initial partition of walkers ";
      std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(app_log()," "));
      app_log() << endl;

//#pragma omp parallel for
      for(int ip=0; ip<NumThreads; ++ip)
      {
        estimatorClones[ip]= new EstimatorManager(*Estimators);//,*hClones[ip]);  
        estimatorClones[ip]->resetTargetParticleSet(*wClones[ip]);
        estimatorClones[ip]->setCollectionMode(false);
        Rng[ip]=new RandomGenerator_t(*(RandomNumberControl::Children[ip]));
        hClones[ip]->setRandomGenerator(Rng[ip]);

        branchClones[ip] = new BranchEngineType(*branchEngine);

        if(QMCDriverMode[QMC_UPDATE_MODE])
        {
          if(UseDrift == "yes")
            Movers[ip]=new VMCUpdatePbyPWithDrift(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
          else
            Movers[ip]=new VMCUpdatePbyP(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
          Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
        }
        else
        {
          if(UseDrift == "yes")
            Movers[ip]=new VMCUpdateAllWithDrift(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
          else
            Movers[ip]=new VMCUpdateAll(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]); 
          Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
        }
      }
    }

//#pragma omp parallel  for
    for(int ip=0; ip<NumThreads; ++ip)
    {
      if(QMCDriverMode[QMC_UPDATE_MODE])
        Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
      else
        Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);

      for(int prestep=0; prestep<myWarmupSteps; ++prestep)
        Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],true); 

      if(myWarmupSteps && QMCDriverMode[QMC_UPDATE_MODE])
        Movers[ip]->updateWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]); 
    }
    myWarmupSteps=0;
    //Used to debug and benchmark opnemp
    //#pragma omp parallel for
    //    for(int ip=0; ip<NumThreads; ip++)
    //    {
    //      Movers[ip]->benchMark(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],ip);
    //    }
  }
Exemple #8
0
void RNDMCOMP::resetUpdateEngines()
{
  ReportEngine PRE("RNDMCOMP","resetUpdateEngines");
  Timer init_timer;
  makeClones(W,Psi,H);
  makeClones(W,Guide);
  if(Movers.empty())
  {
    W.loadEnsemble(wClones);
    branchEngine->initWalkerController(W,false,true);
    branchEngine->setRN(true);
    //if(QMCDriverMode[QMC_UPDATE_MODE]) W.clearAuxDataSet();
    Movers.resize(NumThreads,0);
    branchClones.resize(NumThreads,0);
    Rng.resize(NumThreads,0);
    estimatorClones.resize(NumThreads,0);
    FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
    bool klw=(KillWalker=="yes");
    {
      //log file
      ostringstream o;
      o << "  Initial partition of walkers on a node: ";
      std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(o," "));
      o << "\n";
      o << "Killing Walkers at nodes " << (useAlternate!="yes") <<endl;
      o << "Running the released node driver."<<endl;
      app_log() << o.str();
    }
    #pragma omp parallel for
    for(int ip=0; ip<NumThreads; ++ip)
    {
      estimatorClones[ip]= new EstimatorManager(*Estimators);
      estimatorClones[ip]->setCollectionMode(false);
      Rng[ip]=new RandomGenerator_t(*RandomNumberControl::Children[ip]);
      hClones[ip]->setRandomGenerator(Rng[ip]);
      branchClones[ip] = new BranchEngineType(*branchEngine);
//         Movers[ip] = new RNDMCUpdatePbyPWithRejectionFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
      Movers[ip] = new RNDMCUpdatePbyPFast(*wClones[ip],*wgClones[ip],*psiClones[ip],*guideClones[ip],*hClones[ip],*Rng[ip]);
      Movers[ip]->put(qmcNode);
      Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]);
      Movers[ip]->initWalkersForPbyP(wit, wit_end);
    }
  }
//     branchEngine->checkParameters(W);
  if(BranchInterval<0)
    BranchInterval=1;
  {
    ostringstream o;
//       if (useAlternate=="yes") o << "  Using Alternate Mover"<<endl;
//       else o << "  Using Ceperley Mover"<<endl;
    o << "  BranchInterval = " << BranchInterval << "\n";
    o << "  Steps per block = " << nSteps << "\n";
    o << "  Number of blocks = " << nBlocks << "\n";
    app_log() << endl << o.str() << endl;
  }
//     RealType avg_w(0);
//     RealType n_w(0);
//     RealType logepsilon(0);
// #pragma omp parallel
//       {
//         int ip=omp_get_thread_num();
//
//         for (int step=0; step<nWarmupSteps; ++step)
//         {
//           avg_w=0;
//           n_w=0;
//           for (int prestep=0; prestep<myRNWarmupSteps; ++prestep)
//           {
//             Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],true);
//             #pragma omp single
//             {
//               MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end());
//               while (wit!=wit_end)
//               {
//                 avg_w += (*wit)->Weight;
//                 n_w +=1;
//                 wit++;
//               }
//             }
//             #pragma omp barrier
//            }
//            #pragma omp single
//            {
//              avg_w *= 1.0/n_w;
//              RealType w_m = avg_w/(1.0-avg_w);
//              w_m = std::log(0.5+0.5*w_m);
//              if (std::abs(w_m)>0.01)
//                logepsilon += w_m;
//            }
//            #pragma omp barrier
//            Movers[ip]->setLogEpsilon(logepsilon);
//         }
//
//       }
  app_log() << " RNDMC Engine Initialization = " << init_timer.elapsed() << " secs " << endl;
}
Exemple #9
0
void VMCSingleOMP::resetRun()
{
  ////only VMC can overwrite this
  if(nTargetPopulation>0)
    branchEngine->iParam[SimpleFixedNodeBranch::B_TARGETWALKERS]=static_cast<int>(std::ceil(nTargetPopulation));
  makeClones(W,Psi,H);
  FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode);
  app_log() << "  Initial partition of walkers ";
  std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(app_log()," "));
  app_log() << endl;
  if (Movers.empty())
  {
    Movers.resize(NumThreads,0);
    branchClones.resize(NumThreads,0);
    estimatorClones.resize(NumThreads,0);
    traceClones.resize(NumThreads,0);
    Rng.resize(NumThreads,0);
#if !defined(BGP_BUG)
    #pragma omp parallel for
#endif
    for(int ip=0; ip<NumThreads; ++ip)
    {
      ostringstream os;
      estimatorClones[ip]= new EstimatorManager(*Estimators);//,*hClones[ip]);
      estimatorClones[ip]->resetTargetParticleSet(*wClones[ip]);
      estimatorClones[ip]->setCollectionMode(false);
      traceClones[ip] = Traces->makeClone();
      Rng[ip]=new RandomGenerator_t(*(RandomNumberControl::Children[ip]));
      hClones[ip]->setRandomGenerator(Rng[ip]);
      branchClones[ip] = new BranchEngineType(*branchEngine);
      //         if(reweight=="yes")
      //         {
      //           if (ip== 0) app_log() << "  WFMCUpdateAllWithReweight"<<endl;
      //           Movers[ip]=new WFMCUpdateAllWithReweight(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip],weightLength,Eindex);
      //         }
      //         else
      //           if (reweight=="psi")
      //           {
      //             os << "  Sampling Psi to increase number of walkers near nodes"<<endl;
      //             if (QMCDriverMode[QMC_UPDATE_MODE]) Movers[ip]=new VMCUpdatePbyPSamplePsi(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
      //             else Movers[ip]=new VMCUpdateAllSamplePsi(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
      //           }
      //           else
      if (QMCDriverMode[QMC_UPDATE_MODE])
      {
        //             if (UseDrift == "rn")
        //             {
        //               os <<"  PbyP moves with RN, using VMCUpdatePbyPSampleRN"<<endl;
        //               Movers[ip]=new VMCUpdatePbyPSampleRN(*wClones[ip],*psiClones[ip],*guideClones[ip],*hClones[ip],*Rng[ip]);
        //               Movers[ip]->setLogEpsilon(logepsilon);
        //               // Movers[ip]=new VMCUpdatePbyPWithDrift(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        //             }
        //             else
        if (UseDrift == "yes")
        {
          os <<"  PbyP moves with drift, using VMCUpdatePbyPWithDriftFast"<<endl;
          Movers[ip]=new VMCUpdatePbyPWithDriftFast(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
          // Movers[ip]=new VMCUpdatePbyPWithDrift(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        }
        else
        {
          os <<"  PbyP moves with |psi^2|, using VMCUpdatePbyP"<<endl;
          Movers[ip]=new VMCUpdatePbyP(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        }
        //Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      }
      else
      {
        //             if (UseDrift == "rn")
        //             {
        //               os <<"  walker moves with RN, using VMCUpdateAllSampleRN"<<endl;
        //               Movers[ip]=new VMCUpdateAllSampleRN(*wClones[ip],*psiClones[ip],*guideClones[ip],*hClones[ip],*Rng[ip]);
        //               Movers[ip]->setLogEpsilon(logepsilon);
        //             }
        //             else
        if (UseDrift == "yes")
        {
          os <<"  walker moves with drift, using VMCUpdateAllWithDriftFast"<<endl;
          Movers[ip]=new VMCUpdateAllWithDrift(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        }
        else
        {
          os <<"  walker moves with |psi|^2, using VMCUpdateAll"<<endl;
          Movers[ip]=new VMCUpdateAll(*wClones[ip],*psiClones[ip],*hClones[ip],*Rng[ip]);
        }
        //Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip]);
      }
      Movers[ip]->nSubSteps=nSubSteps;
      if(ip==0)
        app_log() << os.str() << endl;
    }
  }
  else
  {
#if !defined(BGP_BUG)
    #pragma omp parallel for
#endif
    for(int ip=0; ip<NumThreads; ++ip)
    {
      traceClones[ip]->transfer_state_from(*Traces);
    }
  }
  app_log() << "  Total Sample Size   =" << nTargetSamples << endl;
  app_log() << "  Walker distribution on root = ";
  std::copy(wPerNode.begin(),wPerNode.end(),ostream_iterator<int>(app_log()," "));
  app_log() << endl;
  //app_log() << "  Sample Size per node=" << samples_this_node << endl;
  //for (int ip=0; ip<NumThreads; ++ip)
  //  app_log()  << "    Sample size for thread " <<ip<<" = " << samples_th[ip] << endl;
  app_log().flush();
#if !defined(BGP_BUG)
  #pragma omp parallel for
#endif
  for(int ip=0; ip<NumThreads; ++ip)
  {
    //int ip=omp_get_thread_num();
    Movers[ip]->put(qmcNode);
    Movers[ip]->resetRun(branchClones[ip],estimatorClones[ip],traceClones[ip]);
    if (QMCDriverMode[QMC_UPDATE_MODE])
      Movers[ip]->initWalkersForPbyP(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
    else
      Movers[ip]->initWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
//       if (UseDrift != "rn")
//       {
    for (int prestep=0; prestep<nWarmupSteps; ++prestep)
      Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],true);
    if (nWarmupSteps && QMCDriverMode[QMC_UPDATE_MODE])
      Movers[ip]->updateWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
//       }
  }
//     //JNKIM: THIS IS BAD AND WRONG
//     if (UseDrift == "rn")
//     {
//       RealType avg_w(0);
//       RealType n_w(0);
// #pragma omp parallel
//       {
//         int ip=omp_get_thread_num();
//         for (int step=0; step<nWarmupSteps; ++step)
//         {
//           avg_w=0;
//           n_w=0;
//           for (int prestep=0; prestep<myRNWarmupSteps; ++prestep)
//           {
//             Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],true);
//             #pragma omp single
//             {
//               MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end());
//               while (wit!=wit_end)
//               {
//                 avg_w += (*wit)->Weight;
//                 n_w +=1;
//                 wit++;
//               }
//             }
//             #pragma omp barrier
//            }
//            #pragma omp single
//            {
//              avg_w *= 1.0/n_w;
//              RealType w_m = avg_w/(1.0-avg_w);
//              w_m = std::log(0.5+0.5*w_m);
//              if (std::abs(w_m)>0.01)
//                logepsilon += w_m;
//            }
//            #pragma omp barrier
//            Movers[ip]->setLogEpsilon(logepsilon);
//           }
//
//         for (int prestep=0; prestep<nWarmupSteps; ++prestep)
//           Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],true);
//
//         if (nWarmupSteps && QMCDriverMode[QMC_UPDATE_MODE])
//           Movers[ip]->updateWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]);
//       }
//     }
  for(int ip=0; ip<NumThreads; ++ip)
    wClones[ip]->clearEnsemble();
  if(nSamplesPerThread)
    for(int ip=0; ip<NumThreads; ++ip)
      wClones[ip]->setNumSamples(nSamplesPerThread);
  nWarmupSteps=0;
  //Used to debug and benchmark opnemp
  //#pragma omp parallel for
  //    for(int ip=0; ip<NumThreads; ip++)
  //    {
  //      Movers[ip]->benchMark(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1],ip);
  //    }
}