void Dmc_method::doTmove(Properties_point & pt,Pseudopotential * pseudo, System * sys, Wavefunction_data * wfdata, Wavefunction * wf, Sample_point * sample, Guiding_function * guideingwf) { vector <Tmove> tmov; pt.setSize(nwf); wf->getVal(wfdata,0,pt.wf_val); sys->calcKinetic(wfdata,sample,wf,pt.kinetic); pt.potential=sys->calcLoc(sample); pt.weight=1.0; //this gets set later anyway pt.count=1; pseudo->calcNonlocTmove(wfdata,sys,sample,wf,pt.nonlocal,tmov); doublevar sum=1; for(vector<Tmove>::iterator mov=tmov.begin(); mov!=tmov.end(); mov++) { assert(mov->vxx < 0); sum-=timestep*mov->vxx; } pt.nonlocal(0)-=(sum-1)/timestep; //subtract_out_enwt=-(sum-1)/timestep; assert(sum >= 0); if(tmoves) { ///Non-size consistent doublevar rand=rng.ulec()*sum; sum=1; //reset to choose the move if(rand > sum) { for(vector<Tmove>::iterator mov=tmov.begin(); mov!=tmov.end(); mov++) { sum-=timestep*mov->vxx; if(rand < sum) { sample->translateElectron(mov->e,mov->pos); break; } } } } else { // Size-consistent vector < vector<Tmove> > tmv_by_e(nelectrons); for(vector<Tmove>::iterator mov=tmov.begin(); mov!=tmov.end(); mov++) { tmv_by_e[mov->e].push_back(*mov); } for(int e=0; e< nelectrons; e++) { doublevar sum_e=1.0; for(vector<Tmove>::iterator mov=tmv_by_e[e].begin(); mov!=tmv_by_e[e].end(); mov++) { sum_e-=timestep*mov->vxx; } doublevar rand=rng.ulec()*sum_e; doublevar sel_sum=1; if(rand > sel_sum) { for(vector<Tmove>::iterator mov=tmv_by_e[e].begin(); mov!=tmv_by_e[e].end(); mov++) { sel_sum-=timestep*mov->vxx; if(rand < sel_sum) { sample->translateElectron(e,mov->pos); break; } } } } } }
void Reptation_method::get_center_avg(deque <Reptile_point> & reptile, Properties_point & pt) { int nwf=reptile[0].prop.kinetic.GetDim(0); int size=reptile.size(); if(nwf >1) error("nwf > 0 not supported yet"); pt.setSize(nwf); int num=size/2+1; pt=reptile[num].prop; pt.count=1; pt.weight=1; }
void Reptation_method::get_avg(deque <Reptile_point> & reptile, Properties_point & pt) { int size=reptile.size(); int nwf=reptile[0].prop.kinetic.GetDim(0); if(nwf >1) error("nwf > 0 not supported yet"); pt.setSize(nwf); Reptile_point & last(reptile[size-1]); //How to do averaging at either end. Not doing this right //now because of correlated sampling..if we really want energies, //usually DMC is a better choice. pt=last.prop; //Reptile_point & first(reptile[0]); //pt.kinetic(0)=.5*(first.prop.kinetic(0)+last.prop.kinetic(0)); //pt.nonlocal(0)=.5*(first.prop.nonlocal(0)+last.prop.nonlocal(0)); //pt.potential(0)=.5*(first.prop.potential(0) + last.prop.potential(0)); pt.count=1; pt.weight=1; }
//---------------------------------------------------------------------- int Postprocess_method::worker(Wavefunction * wf, Sample_point * sample) { #ifdef USE_MPI Config_save_point tmpconfig; doublevar weight; Properties_point pt; pt.setSize(1); tmpconfig.mpiReceive(0); MPI_Recv(weight,0); MPI_Status status; while(true) { gen_point(wf,sample,tmpconfig,weight,pt); int done=1; MPI_Send(done,0); pt.mpiSend(0); MPI_Recv(done,0); if(done==0) break; tmpconfig.mpiReceive(0); MPI_Recv(weight,0); } cout << mpi_info.node << " : done " << endl; #endif //USE_MPI }
void Postprocess_method::run(Program_options & options, ostream & output) { Sample_point * sample=NULL; Wavefunction * wf=NULL; sys->generateSample(sample); wfdata->generateWavefunction(wf); sample->attachObserver(wf); Properties_gather gather; Primary guide; int nelec=sample->electronSize(); int ndim=3; int npoints_tot=0; FILE * f; if(mpi_info.node==0) { f=fopen(configfile.c_str(),"r"); if(ferror(f)) error("Could not open",configfile); fseek(f,0,SEEK_END); long int lSize=ftell(f); rewind(f); npoints_tot=lSize/(sizeof(doublevar)*(nelec*3+1+4)); output << "Estimated number of samples in this file: " << npoints_tot << endl; output << "We are skipping the first " << nskip << " of these " << endl; Config_save_point tmpconfig; for(int i=0; i< nskip; i++) { doublevar weight; tmpconfig.readBinary(f,nelec,ndim,weight); // doublevar weight; // if(!fread(&weight,sizeof(doublevar),1,f)) error("Misformatting in binary file",configfile, " perhaps nskip is too large?"); } } #ifdef USE_MPI if(mpi_info.nprocs<2) error("POSTPROCESS must be run with at least 2 processes if it is run in parallel."); if(mpi_info.node==0) { master(wf,sample,f,output); } else { worker(wf,sample); } #else Config_save_point tmpconfig; Properties_point pt; pt.setSize(1); int npoints=0; Postprocess_average postavg(average_var.GetDim(0)); doublevar weight; while(tmpconfig.readBinary(f,nelec,ndim,weight)) { tmpconfig.restorePos(sample); gen_point(wf,sample,tmpconfig,weight,pt); postavg.update_average(pt); npoints++; doublevar progress=doublevar(npoints)/doublevar(npoints_tot); if(fabs(progress*10-int(progress*10)) < 0.5/npoints_tot) { cout << "progress: " << progress*100 << "% done" << endl; } } postavg.print(average_var,output); #endif //USE_MPI for(int i=0; i< densplt.GetDim(0); i++) densplt(i)->write(); if(mpi_info.node==0) fclose(f); delete sample; delete wf; }
int Postprocess_method::master(Wavefunction * wf, Sample_point * sample,FILE * f, ostream & os) { #ifdef USE_MPI Config_save_point tmpconfig; doublevar weight; Properties_point pt; pt.setSize(1); int nelec=sample->electronSize(); int ndim=3; MPI_Status status; Postprocess_average postavg(average_var.GetDim(0)); //Get everyone started with data cout << "master: sending initial data" << endl; for(int r=1; r < mpi_info.nprocs; r++) { if(!tmpconfig.readBinary(f,nelec,ndim,weight)) { error("Binary file may not contain enough walkers; finished after ",r); } tmpconfig.mpiSend(r); MPI_Send(weight,r); } int totcount=0; cout << "master : going through file " << endl; while(tmpconfig.readBinary(f,nelec,ndim,weight)) { // doublevar weight; // if(!fread(&weight,sizeof(doublevar),1,f)) error("Misformatting in binary file",configfile); //Is anyone done? //When done, receive completed point and send out new point int done; MPI_Recv(&done,1,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_Comm_grp,&status); //cout << "master: received " << done << " from " << status.MPI_SOURCE << endl; done=1; pt.mpiReceive(status.MPI_SOURCE); MPI_Send(done,status.MPI_SOURCE); tmpconfig.mpiSend(status.MPI_SOURCE); MPI_Send(weight,status.MPI_SOURCE); //introduce completed point into the average //cout << "master: updating average " << endl; postavg.update_average(pt); totcount++; if(totcount%1000==0) cout << "Completed " << totcount << " walkers " << endl; } cout << "master: collecting final averages " << endl; //Loop through all the nodes and collect their last points, adding them in //Write out the final averages. for(int r=1; r < mpi_info.nprocs; r++) { int done; MPI_Recv(done,r); done=0; pt.mpiReceive(r); MPI_Send(done,r); postavg.update_average(pt); } postavg.print(average_var,os); #endif //USE_MPI }