bool RNDMCOMP::run() { resetUpdateEngines(); //estimator does not need to collect data Estimators->setCollectionMode(true); Estimators->start(nBlocks); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startRun(nBlocks,false); Timer myclock; IndexType block = 0; IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps; do // block { Estimators->startBlock(nSteps); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startBlock(nSteps); IndexType step = 0; for(IndexType step=0; step< nSteps; ++step, CurrentStep+=BranchInterval) { #pragma omp parallel for for(int ip=0; ip<NumThreads; ++ip) { int now=CurrentStep; MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]); for(int interval = 0; interval<BranchInterval-1; ++interval,++now) Movers[ip]->advanceWalkers(wit,wit_end,false); wClones[ip]->resetCollectables(); Movers[ip]->advanceWalkers(wit,wit_end,false); Movers[ip]->setReleasedNodeMultiplicity(wit,wit_end); if(QMCDriverMode[QMC_UPDATE_MODE] && now%updatePeriod == 0) Movers[ip]->updateWalkers(wit, wit_end); }//#pragma omp parallel for(int ip=1; ip<NumThreads; ++ip) { for(int j=0; j<W.Collectables.size(); ++j) W.Collectables[j]+=wClones[ip]->Collectables[j]; } branchEngine->branch(CurrentStep,W, branchClones); FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode); } // #pragma omp parallel for // for(int ip=0; ip<NumThreads; ++ip) // { // MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]); // Movers[ip]->resetbuffers(wit, wit_end); // } Estimators->stopBlock(acceptRatio()); block++; recordBlock(block); } while(block<nBlocks && myclock.elapsed()<MaxCPUSecs); //for(int ip=0; ip<NumThreads; ip++) Movers[ip]->stopRun(); for(int ip=0; ip<NumThreads; ip++) *(RandomNumberControl::Children[ip])=*(Rng[ip]); Estimators->stop(); return finalize(block); }
bool DMCOMP::run() { bool variablePop = (Reconfiguration == "no"); resetUpdateEngines(); //set the collection mode for the estimator Estimators->setCollectionMode(branchEngine->SwapMode); Estimators->reportHeader(AppendRun); Estimators->reset(); IndexType block = 0; do // block { Estimators->startBlock(); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startBlock(); IndexType step = 0; do //step { #pragma omp parallel { int ip = omp_get_thread_num(); IndexType interval = 0; do // interval { Movers[ip]->advanceWalkers(W.begin()+wPerNode[ip],W.begin()+wPerNode[ip+1]); ++interval; } while(interval<BranchInterval); }//#pragma omp parallel Movers[0]->setMultiplicity(W.begin(),W.end()); Estimators->accumulate(W); branchEngine->branch(CurrentStep,W, branchClones); if(variablePop) FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode); ++step; CurrentStep+=BranchInterval; } while(step<nSteps); Estimators->stopBlock(acceptRatio()); block++; recordBlock(block); if(QMCDriverMode[QMC_UPDATE_MODE] && CurrentStep%100 == 0) { #pragma omp parallel { int ip = omp_get_thread_num(); Movers[ip]->updateWalkers(W.begin()+wPerNode[ip], W.begin()+wPerNode[ip+1]); } } } while(block<nBlocks); return finalize(block); }
bool DMCOMPOPT::run() { bool variablePop = (Reconfiguration == "no"); resetUpdateEngines(); //estimator does not need to collect data Estimators->setCollectionMode(true); Estimators->start(nBlocks); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startRun(nBlocks,false); Timer myclock; IndexType block = 0; IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps; int nsampls(0); int g_nsampls(0); do // block { Estimators->startBlock(nSteps); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startBlock(nSteps); IndexType step = 0; for(IndexType step=0; step< nSteps; ++step, CurrentStep+=BranchInterval) { #pragma omp parallel { int ip=omp_get_thread_num(); int now=CurrentStep; MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_first(W.begin()+wPerNode[ip]), wit2(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]); for(int interval = 0; interval<BranchInterval-1; ++interval,++now) { Movers[ip]->advanceWalkers(wit,wit_end,false); while(wit2!=wit_end) { (**wit2).addPropertyHistoryPoint(Eindx,(**wit2).getPropertyBase()[LOCALENERGY]); wit2++; } wit2=wit_first; if (Period4WalkerDump&&((now+1)%myPeriod4WalkerDump==0)) { wClones[ip]->saveEnsemble(wit2,wit_end); #pragma omp master nsampls+=W.getActiveWalkers(); } } wClones[ip]->resetCollectables(); Movers[ip]->advanceWalkers(wit,wit_end,false); wit2=wit_first; while(wit2!=wit_end) { (**wit2).addPropertyHistoryPoint(Eindx,(**wit2).getPropertyBase()[LOCALENERGY]); wit2++; } wit2=wit_first; if (myPeriod4WalkerDump&&((now+1)%myPeriod4WalkerDump==0)) { wClones[ip]->saveEnsemble(wit2,wit_end); #pragma omp master nsampls+=W.getActiveWalkers(); } Movers[ip]->setMultiplicity(wit,wit_end); if(QMCDriverMode[QMC_UPDATE_MODE] && now%updatePeriod == 0) Movers[ip]->updateWalkers(wit, wit_end); }//#pragma omp parallel } // branchEngine->debugFWconfig(); #pragma omp parallel for for (int ip=0; ip<NumThreads; ++ip) { MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]); while (wit!=wit_end) { Walker_t& thisWalker(**wit); Walker_t::Buffer_t& w_buffer(thisWalker.DataSet); wClones[ip]->loadWalker(thisWalker,true); psiClones[ip]->copyFromBuffer(*wClones[ip],w_buffer); vector<RealType> Dsaved(NumOptimizables,0); vector<RealType> HDsaved(NumOptimizables,0); psiClones[ip]->evaluateDerivatives(*wClones[ip],dummyOptVars[ip],Dsaved,HDsaved,true);//SH like deriv style #pragma omp critical fillComponentMatrices(Dsaved,HDsaved,thisWalker); wit++; } } branchEngine->branch(CurrentStep,W, branchClones); if(variablePop) FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode); Estimators->stopBlock(acceptRatio()); block++; recordBlock(block); g_nsampls=nsampls; myComm->allreduce(g_nsampls); } while(((block<nBlocks) || (g_nsampls<nTargetSamples)) && myclock.elapsed()<MaxCPUSecs); //for(int ip=0; ip<NumThreads; ip++) Movers[ip]->stopRun(); for(int ip=0; ip<NumThreads; ip++) *(RandomNumberControl::Children[ip])=*(Rng[ip]); // adding weight index MCWalkerConfiguration::iterator wit(W.begin()), wit_end(W.end()); while(wit!=wit_end) { (**wit).deletePropertyHistory(); wit++; } Estimators->stop(); return finalize(block); }
bool DMCOMP::run() { bool variablePop = (Reconfiguration == "no"); resetUpdateEngines(); //estimator does not need to collect data Estimators->setCollectionMode(true); Estimators->start(nBlocks); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startRun(nBlocks,false); Timer myclock; IndexType block = 0; IndexType updatePeriod=(QMCDriverMode[QMC_UPDATE_MODE])?Period4CheckProperties:(nBlocks+1)*nSteps; do // block { Estimators->startBlock(nSteps); for(int ip=0; ip<NumThreads; ip++) Movers[ip]->startBlock(nSteps); IndexType step = 0; for(IndexType step=0; step< nSteps; ++step, CurrentStep+=BranchInterval) { if(storeConfigs && (CurrentStep%storeConfigs == 0)) { ForwardWalkingHistory.storeConfigsForForwardWalking(W); W.resetWalkerParents(); } #pragma omp parallel for for(int ip=0; ip<NumThreads; ++ip) { int now=CurrentStep; MCWalkerConfiguration::iterator wit(W.begin()+wPerNode[ip]), wit_end(W.begin()+wPerNode[ip+1]); for(int interval = 0;interval<BranchInterval-1; ++interval,++now) Movers[ip]->advanceWalkers(wit,wit_end,false); wClones[ip]->resetCollectables(); Movers[ip]->advanceWalkers(wit,wit_end,false); Movers[ip]->setMultiplicity(wit,wit_end); if(QMCDriverMode[QMC_UPDATE_MODE] && now%updatePeriod == 0) Movers[ip]->updateWalkers(wit, wit_end); }//#pragma omp parallel Estimators->accumulateCollectables(wClones,1.0); branchEngine->branch(CurrentStep,W, branchClones); // if(storeConfigs && (CurrentStep%storeConfigs == 0)) { // ForwardWalkingHistory.storeConfigsForForwardWalking(W); // W.resetWalkerParents(); // } if(variablePop) FairDivideLow(W.getActiveWalkers(),NumThreads,wPerNode); } // branchEngine->debugFWconfig(); Estimators->stopBlock(acceptRatio()); block++; recordBlock(block); } while(block<nBlocks && myclock.elapsed()<MaxCPUSecs); //for(int ip=0; ip<NumThreads; ip++) Movers[ip]->stopRun(); for(int ip=0; ip<NumThreads; ip++) *(RandomNumberControl::Children[ip])=*(Rng[ip]); Estimators->stop(); return finalize(block); }