int tclcommand_ghmc(ClientData data, Tcl_Interp *interp, int argc, char **argv) { #ifdef GHMC int status = TCL_OK; THERMO_TRACE(fprintf(stderr,"%d: ghmc:\n",this_node)); Tcl_ResetResult(interp); /* print ghmc status */ if(argc == 1) { status = tclcommand_ghmc_print_status(interp) ; } else if (ARG1_IS_S("statistics")) { status = tclcommand_ghmc_print_statistics(interp); } else { Tcl_AppendResult(interp, "Unknown keyword: \n", (char *)NULL); status = tclcommand_ghmc_print_usage(interp); } return status; #else INTEG_TRACE(fprintf(stderr,"%d: call to ghmc but not compiled in!\n",this_node)); return tclcommand_ghmc_print_usage(interp); #endif }
void rescale_forces() { Particle *p; int i, np, c; Cell *cell; double scale ; INTEG_TRACE(fprintf(stderr,"%d: rescale_forces:\n",this_node)); scale = 0.5 * time_step * time_step; for (c = 0; c < local_cells.n; c++) { cell = local_cells.cell[c]; p = cell->part; np = cell->n; for(i = 0; i < np; i++) { check_particle_force(&p[i]); p[i].f.f[0] *= scale/PMASS(p[i]); p[i].f.f[1] *= scale/PMASS(p[i]); p[i].f.f[2] *= scale/PMASS(p[i]); ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: SCAL f = (%.3e,%.3e,%.3e) v_old = (%.3e,%.3e,%.3e)\n",this_node,p[i].f.f[0],p[i].f.f[1],p[i].f.f[2],p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); } } }
/** Initialize all data structures for nemd. */ void nemd_init(int n_slabs, int n_exchange, double shear_rate) { int i; INTEG_TRACE(fprintf(stderr,"%d: nemd_init: n_slabs=%d n_exchange=%d\n",this_node, n_slabs, n_exchange)); /* check node grid */ if( n_nodes > 1 ) { runtimeErrorMsg() <<"NEMD is a single node feature"; return; } /* first free old structures befor initializing new ones */ if(nemddata.n_slabs > -1) nemd_free(); /* exit nemd integration */ if(n_slabs == 0) return; /* fill nemd structure */ nemddata.n_slabs = n_slabs; nemddata.top_slab = 0; nemddata.mid_slab = n_slabs/2; nemddata.thickness = box_l[2]/(double)nemddata.n_slabs; nemddata.invthickness = 1.0 / nemddata.thickness; nemddata.shear_rate = shear_rate; nemddata.slab_vel = time_step*shear_rate*box_l[2]/4.0; nemddata.n_exchange = n_exchange; nemddata.slab = (Slab *)Utils::malloc(nemddata.n_slabs*sizeof(Slab)); nemddata.velocity_profile = (double *)Utils::malloc(nemddata.n_slabs*sizeof(double)); nemddata.momentum = 0.0; nemddata.momentum_norm = 0; /* initialize slabs and velocity profile */ for(i=0;i<nemddata.n_slabs;i++) { nemddata.velocity_profile[i] = 0.0; nemddata.slab[i].v_mean = 0.0; nemddata.slab[i].n_parts_in_slab = 0; nemddata.slab[i].v_min = 0.0; nemddata.slab[i].ind_min = 0; nemddata.slab[i].fastest = NULL; nemddata.slab[i].n_fastest = 0; nemddata.slab[i].vel_diff = 0.0; } /* allocate arrays for indices of fastest particles in slab */ if(nemddata.n_exchange > 0) { nemddata.slab[nemddata.top_slab].fastest = (int *)Utils::malloc(nemddata.n_exchange*sizeof(int)); nemddata.slab[nemddata.mid_slab].fastest = (int *)Utils::malloc(nemddata.n_exchange*sizeof(int)); } for(i=0;i<nemddata.n_exchange;i++) { nemddata.slab[nemddata.top_slab].fastest[i] = -1; nemddata.slab[nemddata.mid_slab].fastest[i] = -1; } nemddata.slab[nemddata.top_slab].v_min = -1e10; nemddata.slab[nemddata.mid_slab].v_min = +1e10; }
/** Gives back the calculated viscosity */ int tclcommand_nemd_parse_and_print_viscosity(Tcl_Interp *interp) { double shear_rate=0.0, mean_force, viscosity; char buffer[TCL_DOUBLE_SPACE]; INTEG_TRACE(fprintf(stderr,"%d: tclcommand_nemd_parse_and_print_viscosity:\n",this_node)); /* calculate shear_rate */ switch (nemd_method) { case NEMD_METHOD_OFF: Tcl_AppendResult(interp, "nemd is off", (char *)NULL); return (TCL_OK); case NEMD_METHOD_EXCHANGE: shear_rate = 1.0; case NEMD_METHOD_SHEARRATE: shear_rate = nemddata.shear_rate; } /* rescale momentum exchange (vel_internal = time_step * vel) */ nemddata.momentum /= time_step; /* Calculate average Force := Momentum transfer per time unit */ mean_force = nemddata.momentum / (nemddata.momentum_norm*time_step); /* Calculate viscosity := mean_force / (shearrate * area) */ viscosity = mean_force / (shear_rate*4.0*box_l[0]*box_l[1]); Tcl_PrintDouble(interp, viscosity, buffer); Tcl_AppendResult(interp,buffer, (char *)NULL); nemddata.momentum = 0.0; nemddata.momentum_norm = 0; return (TCL_OK); }
void ghmc_close() { INTEG_TRACE(fprintf(stderr,"%d: ghmc_close:\n",this_node)); ghmc_att += ghmcdata.att; ghmc_acc += ghmcdata.acc; }
void ghmc_init() { INTEG_TRACE(fprintf(stderr,"%d: ghmc_init:\n",this_node)); ghmcdata.att=0; ghmcdata.acc=0; save_last_state(); }
/* monte carlo step of ghmc - evaluation stage */ void ghmc_mc() { INTEG_TRACE(fprintf(stderr,"%d: ghmc_mc:\n",this_node)); double boltzmann; int ekin_update_flag = 0; hamiltonian_calc(ekin_update_flag); //make MC decision only on the master if (this_node==0) { ghmcdata.att++; //metropolis algorithm boltzmann = ghmcdata.hmlt_new - ghmcdata.hmlt_old; if (boltzmann < 0) boltzmann = 1.0; else if (boltzmann > 30) boltzmann = 0.0; else boltzmann = exp(-beta*boltzmann); //fprintf(stderr,"old hamiltonian : %f, new hamiltonian: % f, boltzmann factor: %f\n",ghmcdata.hmlt_old,ghmcdata.hmlt_new,boltzmann); if ( d_random() < boltzmann) { ghmcdata.acc++; ghmc_mc_res = GHMC_MOVE_ACCEPT; } else { ghmc_mc_res = GHMC_MOVE_REJECT; } } //let all nodes know about the MC decision result mpi_bcast_parameter(FIELD_GHMC_RES); if (ghmc_mc_res == GHMC_MOVE_ACCEPT) { save_last_state(); //fprintf(stderr,"%d: mc move accepted\n",this_node); } else { load_last_state(); //fprintf(stderr,"%d: mc move rejected\n",this_node); //if the move is rejected we might need to resort particles according to the loaded configurations cells_resort_particles(CELL_GLOBAL_EXCHANGE); invalidate_obs(); if (ghmc_mflip == GHMC_MFLIP_ON) { momentum_flip(); } else if (ghmc_mflip == GHMC_MFLIP_RAND) { if (d_random() < 0.5) momentum_flip(); } } //fprintf(stderr,"%d: temp after mc: %f\n",this_node,calc_local_temp()); }
void rescale_forces_propagate_vel() { Cell *cell; Particle *p; int i, j, np, c; double scale; #ifdef NPT if(integ_switch == INTEG_METHOD_NPT_ISO){ nptiso.p_vel[0] = nptiso.p_vel[1] = nptiso.p_vel[2] = 0.0;} #endif scale = 0.5 * time_step * time_step; INTEG_TRACE(fprintf(stderr,"%d: rescale_forces_propagate_vel:\n",this_node)); for (c = 0; c < local_cells.n; c++) { cell = local_cells.cell[c]; p = cell->part; np = cell->n; for(i = 0; i < np; i++) { check_particle_force(&p[i]); /* Rescale forces: f_rescaled = 0.5*dt*dt * f_calculated * (1/mass) */ p[i].f.f[0] *= scale/PMASS(p[i]); p[i].f.f[1] *= scale/PMASS(p[i]); p[i].f.f[2] *= scale/PMASS(p[i]); ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: SCAL f = (%.3e,%.3e,%.3e) v_old = (%.3e,%.3e,%.3e)\n",this_node,p[i].f.f[0],p[i].f.f[1],p[i].f.f[2],p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); #ifdef VIRTUAL_SITES // Virtual sites are not propagated during integration if (ifParticleIsVirtual(&p[i])) continue; #endif for(j = 0; j < 3 ; j++) { #ifdef EXTERNAL_FORCES if (!(p[i].l.ext_flag & COORD_FIXED(j))) { #endif #ifdef NPT if(integ_switch == INTEG_METHOD_NPT_ISO && ( nptiso.geometry & nptiso.nptgeom_dir[j] )) { nptiso.p_vel[j] += SQR(p[i].m.v[j])*PMASS(p[i]); p[i].m.v[j] += p[i].f.f[j] + friction_therm0_nptiso(p[i].m.v[j])/PMASS(p[i]); } else #endif /* Propagate velocity: v(t+dt) = v(t+0.5*dt) + 0.5*dt * f(t+dt) */ { p[i].m.v[j] += p[i].f.f[j]; } #ifdef EXTERNAL_FORCES } #endif } ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: PV_2 v_new = (%.3e,%.3e,%.3e)\n",this_node,p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); } } #ifdef NPT finalize_p_inst_npt(); #endif }
void announce_resort_particles() { int sum; MPI_Allreduce(&resort_particles, &sum, 1, MPI_INT, MPI_SUM, comm_cart); resort_particles = (sum > 0) ? 1 : 0; INTEG_TRACE(fprintf(stderr,"%d: announce_resort_particles: resort_particles=%d\n", this_node, resort_particles)); }
/** convert the torques to the body-fixed frames before the integration loop */ void convert_initial_torques() { Particle *p; Cell *cell; int c,i, np; double tx, ty, tz; INTEG_TRACE(fprintf(stderr,"%d: convert_initial_torques:\n",this_node)); for (c = 0; c < local_cells.n; c++) { cell = local_cells.cell[c]; p = cell->part; np = cell->n; for(i = 0; i < np; i++) { #ifdef ROTATION_PER_PARTICLE if (!p[i].p.rotation) continue; #endif double A[9]; define_rotation_matrix(&p[i], A); tx = A[0 + 3*0]*p[i].f.torque[0] + A[0 + 3*1]*p[i].f.torque[1] + A[0 + 3*2]*p[i].f.torque[2]; ty = A[1 + 3*0]*p[i].f.torque[0] + A[1 + 3*1]*p[i].f.torque[1] + A[1 + 3*2]*p[i].f.torque[2]; tz = A[2 + 3*0]*p[i].f.torque[0] + A[2 + 3*1]*p[i].f.torque[1] + A[2 + 3*2]*p[i].f.torque[2]; if ( thermo_switch & THERMO_LANGEVIN ) { friction_thermo_langevin_rotation(&p[i]); p[i].f.torque[0]+= tx; p[i].f.torque[1]+= ty; p[i].f.torque[2]+= tz; } else { p[i].f.torque[0] = tx; p[i].f.torque[1] = ty; p[i].f.torque[2] = tz; } #ifdef ROTATION_PER_PARTICLE if (!(p[i].p.rotation & 2)) p[i].f.torque[0]=0; if (!(p[i].p.rotation & 4)) p[i].f.torque[1]=0; if (!(p[i].p.rotation & 8)) p[i].f.torque[2]=0; #endif ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: SCAL f = (%.3e,%.3e,%.3e) v_old = (%.3e,%.3e,%.3e)\n",this_node,p[i].f.f[0],p[i].f.f[1],p[i].f.f[2],p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); } } }
/** propagate angular velocities and quaternions \todo implement for fixed_coord_flag */ void propagate_omega_quat() { Particle *p; Cell *cell; int c,i,j, np; double lambda, dt2, dt4, dtdt, dtdt2; dt2 = time_step*0.5; dt4 = time_step*0.25; dtdt = time_step*time_step; dtdt2 = dtdt*0.5; INTEG_TRACE(fprintf(stderr,"%d: propagate_omega_quat:\n",this_node)); for (c = 0; c < local_cells.n; c++) { cell = local_cells.cell[c]; p = cell->part; np = cell->n; for(i = 0; i < np; i++) { double Qd[4], Qdd[4], S[3], Wd[3]; #ifdef VIRTUAL_SITES // Virtual sites are not propagated in the integration step if (ifParticleIsVirtual(&p[i])) continue; #endif define_Qdd(&p[i], Qd, Qdd, S, Wd); lambda = 1 - S[0]*dtdt2 - sqrt(1 - dtdt*(S[0] + time_step*(S[1] + dt4*(S[2]-S[0]*S[0])))); for(j=0; j < 3; j++){ p[i].m.omega[j]+= dt2*Wd[j]; } ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: PV_1 v_new = (%.3e,%.3e,%.3e)\n",this_node,p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); p[i].r.quat[0]+= time_step*(Qd[0] + dt2*Qdd[0]) - lambda*p[i].r.quat[0]; p[i].r.quat[1]+= time_step*(Qd[1] + dt2*Qdd[1]) - lambda*p[i].r.quat[1]; p[i].r.quat[2]+= time_step*(Qd[2] + dt2*Qdd[2]) - lambda*p[i].r.quat[2]; p[i].r.quat[3]+= time_step*(Qd[3] + dt2*Qdd[3]) - lambda*p[i].r.quat[3]; // Update the director convert_quat_to_quatu(p[i].r.quat, p[i].r.quatu); #ifdef DIPOLES // When dipoles are enabled, update dipole moment convert_quatu_to_dip(p[i].r.quatu, p[i].p.dipm, p[i].r.dip); #endif ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: PPOS p = (%.3f,%.3f,%.3f)\n",this_node,p[i].r.p[0],p[i].r.p[1],p[i].r.p[2])); } } }
int tclcommand_nemd(ClientData data, Tcl_Interp *interp, int argc, char **argv) { #ifdef NEMD int status = TCL_OK; INTEG_TRACE(fprintf(stderr,"%d: nemd:\n",this_node)); Tcl_ResetResult(interp); /* print nemd status */ if(argc == 1) { status = tclcommand_nemd_print_status(interp) ; } else if (ARG1_IS_S("off")) { nemd_method = NEMD_METHOD_OFF; status = nemd_free(); } else if (ARG1_IS_S("exchange")) { status = tclcommand_nemd_parse_exchange(interp,argc,argv); } else if (ARG1_IS_S("shearrate")) { status = tclcommand_nemd_parse_shearrate(interp,argc,argv); } else if (ARG1_IS_S("profile")) { status = tclcommand_nemd_parse_and_print_profile(interp); } else if (ARG1_IS_S("viscosity")) { status = tclcommand_nemd_parse_and_print_viscosity(interp); } else { Tcl_AppendResult(interp, "Unkwnown keyword: \n", (char *)NULL); return tclcommand_nemd_print_usage(interp); } return gather_runtime_errors(interp, status); #endif INTEG_TRACE(fprintf(stderr,"%d: call to nemd but not compiled in!\n",this_node)); return tclcommand_nemd_print_usage(interp); }
void nemd_store_velocity_profile() { int i; if(nemddata.n_slabs == -1) return; INTEG_TRACE(fprintf(stderr,"%d: nemd_store_velocity_profile:\n",this_node)); for(i=0;i<nemddata.n_slabs;i++) { if(nemddata.slab[i].n_parts_in_slab == 0) fprintf(stderr,"Zero particles in slab %d!!!\n",i); nemddata.velocity_profile[i] += nemddata.slab[i].v_mean/(double)nemddata.slab[i].n_parts_in_slab; nemddata.slab[i].v_mean = 0.0; nemddata.slab[i].n_parts_in_slab = 0; } nemddata.profile_norm++; }
/* momentum update step of ghmc */ void ghmc_momentum_update() { INTEG_TRACE(fprintf(stderr,"%d: ghmc_momentum_update:\n",this_node)); /* currently, when temperature scaling is enabled the procedure tscale_momentum_update is used, although it may seem like a code duplication, this separation is maintained for now until this feature is tested thoroughly*/ if (ghmc_tscale == GHMC_TSCALE_ON) tscale_momentum_update(); else partial_momentum_update(); int ekin_update_flag = 1; hamiltonian_calc(ekin_update_flag); }
int tclcommand_save_state(ClientData data, Tcl_Interp *interp, int argc, char **argv) { #ifdef GHMC Tcl_ResetResult(interp); /* save system state */ save_last_state(); return TCL_OK; #else INTEG_TRACE(fprintf(stderr,"%d: call to ghmc but not compiled in!\n",this_node)); return tclcommand_ghmc_print_usage(interp); #endif }
int tclcommand_integrate(ClientData data, Tcl_Interp *interp, int argc, char **argv) { int n_steps; int i; INTEG_TRACE(fprintf(stderr,"%d: integrate:\n",this_node)); if (argc < 1) { Tcl_AppendResult(interp, "wrong # args: \n\"", (char *) NULL); return tclcommand_integrate_print_usage(interp); } else if (argc < 2) { return tclcommand_integrate_print_status(interp); } if (ARG1_IS_S("set")) { if (argc < 3) return tclcommand_integrate_print_status(interp); if (ARG_IS_S(2,"nvt")) return tclcommand_integrate_set_nvt(interp, argc, argv); #ifdef NPT else if (ARG_IS_S(2,"npt_isotropic")) return tclcommand_integrate_set_npt_isotropic(interp, argc, argv); #endif else { Tcl_AppendResult(interp, "unknown integrator method:\n", (char *)NULL); return tclcommand_integrate_print_usage(interp); } } else if ( !ARG_IS_I(1,n_steps) ) return tclcommand_integrate_print_usage(interp); /* go on with integrate <n_steps> */ if(n_steps < 0) { Tcl_AppendResult(interp, "illegal number of steps (must be >0) \n", (char *) NULL); return tclcommand_integrate_print_usage(interp);; } /* perform integration */ if (!correlations_autoupdate) { if (mpi_integrate(n_steps)) return gather_runtime_errors(interp, TCL_OK); } else { for (i=0; i<n_steps; i++) { if (mpi_integrate(1)) return gather_runtime_errors(interp, TCL_OK); autoupdate_correlations(); } } return TCL_OK; }
int tclcommand_load_state(ClientData data, Tcl_Interp *interp, int argc, char **argv) { #ifdef GHMC Tcl_ResetResult(interp); /* load last saved system state */ load_last_state(); cells_resort_particles(CELL_GLOBAL_EXCHANGE); invalidate_obs(); return TCL_OK; #else INTEG_TRACE(fprintf(stderr,"%d: call to ghmc but not compiled in!\n",this_node)); return tclcommand_ghmc_print_usage(interp); #endif }
void integrate_vv_recalc_maxrange() { INTEG_TRACE(fprintf(stderr,"%d: integrate_vv_recalc_maxrange:\n",this_node)); /* maximal interaction cutoff */ calc_maximal_cutoff(); if (max_cut <= 0.0) { max_range = -1.0; max_range2 = -1.0; return; } max_range = max_cut; max_range_non_bonded = max_cut_non_bonded; /* at beginning be nice */ if (skin > 0.0) { max_range += skin; max_range_non_bonded += skin; } max_range2 = SQR(max_range); max_range_non_bonded2 = SQR(max_range_non_bonded); }
/* momentum flip for ghmc */ void momentum_flip() { int i, j, c, np; Particle *part; INTEG_TRACE(fprintf(stderr,"%d: ghmc_momentum_flip:\n",this_node)); for (c = 0; c < local_cells.n; c++) { np = local_cells.cell[c]->n; part = local_cells.cell[c]->part; for (i = 0; i < np; i++) { for (j = 0; j < 3; j++) { part[i].m.v[j] = -(part[i].m.v[j]); #ifdef ROTATION part[i].m.omega[j] = -(part[i].m.omega[j]); #endif } } } }
/** Free all associated memory. */ int nemd_free(void) { INTEG_TRACE(fprintf(stderr,"%d: nemd_free\n",this_node)); if(nemddata.n_exchange > 0) { free(nemddata.slab[nemddata.mid_slab].fastest); free(nemddata.slab[nemddata.top_slab].fastest); } free(nemddata.velocity_profile); free(nemddata.slab); nemddata.n_slabs = -1; nemddata.mid_slab = 0; nemddata.top_slab = 0; nemddata.thickness = 0.0; nemddata.invthickness = 1.0; nemddata.velocity_profile = NULL; nemddata.profile_norm = 0; nemddata.n_exchange = 0; nemddata.slab = NULL; nemddata.shear_rate = 0.0; nemddata.slab_vel = 0.0; return ES_OK; }
void hamiltonian_calc(int ekin_update_flag) { /* if ekin_update_flag = 0, we calculate all energies with \ref master_energy_calc(). if ekin_update_flag = 1, we only updated momenta, so there we only need to recalculate kinetic energy with \ref calc_kinetic(). */ int i; double result = 0.0; double ekt, ekr; INTEG_TRACE(fprintf(stderr,"%d: hamiltonian_calc:\n",this_node)); //initialize energy struct if (total_energy.init_status == 0) { init_energies(&total_energy); //if we are initializing energy we have to calculate all energies anyway ekin_update_flag = 0; } //calculate energies if (ekin_update_flag == 0) master_energy_calc(); else calc_kinetic(&ekt, &ekr); //sum up energies on master node, and update ghmcdata struct if (this_node==0) { ghmcdata.hmlt_old = ghmcdata.hmlt_new; for (i = ekin_update_flag; i < total_energy.data.n; i++) { result += total_energy.data.e[i]; } if (ekin_update_flag == 1) result += ekt+ekr; ghmcdata.hmlt_new=result; } }
/** Hand over velocity profile to tcl interpreter */ int tclcommand_nemd_parse_and_print_profile(Tcl_Interp *interp) { int i; double val; char buffer[TCL_DOUBLE_SPACE]; INTEG_TRACE(fprintf(stderr,"%d: tclcommand_nemd_parse_and_print_profile:\n",this_node)); if(nemd_method == NEMD_METHOD_OFF) { Tcl_AppendResult(interp, "nemd is off", (char *)NULL); return (TCL_OK); } for(i=0;i<nemddata.n_slabs;i++) { /* note: output velocities as usual have to be resacled by 1/time_step! */ val = nemddata.velocity_profile[i]/(nemddata.profile_norm*time_step); Tcl_PrintDouble(interp, val, buffer); Tcl_AppendResult(interp," ", buffer, (char *)NULL); nemddata.velocity_profile[i] = 0.0; } nemddata.profile_norm = 0; return (TCL_OK); }
void on_integration_start() { char *errtext; EVENT_TRACE(fprintf(stderr, "%d: on_integration_start\n", this_node)); INTEG_TRACE(fprintf(stderr,"%d: on_integration_start: reinit_thermo = %d, resort_particles=%d\n", this_node,reinit_thermo,resort_particles)); /********************************************/ /* sanity checks */ /********************************************/ if ( time_step < 0.0 ) { errtext = runtime_error(128); ERROR_SPRINTF(errtext, "{010 time_step not set} "); } if ( skin < 0.0 ) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{011 skin not set} "); } if ( temperature < 0.0 ) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{012 thermostat not initialized} "); } #ifdef NPT if (integ_switch == INTEG_METHOD_NPT_ISO) { if (nptiso.piston <= 0.0) { char *errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{014 npt on, but piston mass not set} "); } #ifdef ELECTROSTATICS switch(coulomb.method) { case COULOMB_NONE: break; #ifdef P3M case COULOMB_P3M: break; #endif /*P3M*/ default: { char *errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{014 npt only works with P3M} "); } } #endif /*ELECTROSTATICS*/ #ifdef DIPOLES switch (coulomb.Dmethod) { case DIPOLAR_NONE: break; #ifdef DP3M case DIPOLAR_P3M: break; #endif default: { char *errtext = runtime_error(128); ERROR_SPRINTF(errtext,"NpT does not work with your dipolar method, please use P3M."); } } #endif /* ifdef DIPOLES */ } #endif /*NPT*/ if (!check_obs_calc_initialized()) return; #ifdef LB if(lattice_switch & LATTICE_LB) { if (lbpar.agrid <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{098 Lattice Boltzmann agrid not set} "); } if (lbpar.tau <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{099 Lattice Boltzmann time step not set} "); } if (lbpar.rho <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{100 Lattice Boltzmann fluid density not set} "); } if (lbpar.viscosity <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{101 Lattice Boltzmann fluid viscosity not set} "); } if (dd.use_vList && skin>=lbpar.agrid/2.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext, "{104 LB requires either no Verlet lists or that the skin of the verlet list to be less than half of lattice-Boltzmann grid spacing.} "); } } #endif #ifdef LB_GPU if(this_node == 0) { if(lattice_switch & LATTICE_LB_GPU) { if (lbpar_gpu.agrid < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{098 Lattice Boltzmann agrid not set} "); } if (lbpar_gpu.tau < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{099 Lattice Boltzmann time step not set} "); } if (lbpar_gpu.rho < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{100 Lattice Boltzmann fluid density not set} "); } if (lbpar_gpu.viscosity < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{101 Lattice Boltzmann fluid viscosity not set} "); } if (lb_reinit_particles_gpu) { lb_realloc_particles_gpu(); lb_reinit_particles_gpu = 0; } } } #endif #ifdef METADYNAMICS meta_init(); #endif #ifdef REACTIONS if(reaction.rate != 0.0) { if(max_cut < reaction.range) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{105 Reaction range of %f exceeds maximum cutoff of %f} ", reaction.range, max_cut); } } #endif /********************************************/ /* end sanity checks */ /********************************************/ /* Prepare the thermostat */ if (reinit_thermo) { thermo_init(); reinit_thermo = 0; recalc_forces = 1; } /* Ensemble preparation: NVT or NPT */ integrate_ensemble_init(); #ifdef SCAFACOS /* initialize Scafacos, set up the system and solver specific parameters, all on each node. functions include MPI_Bcast */ mpi_bcast_coulomb_method(); switch(coulomb.method) { case COULOMB_SCAFACOS_DIRECT: case COULOMB_SCAFACOS_EWALD: case COULOMB_SCAFACOS_FMM: case COULOMB_SCAFACOS_MEMD: case COULOMB_SCAFACOS_MMM1D: case COULOMB_SCAFACOS_MMM2D: case COULOMB_SCAFACOS_P2NFFT: case COULOMB_SCAFACOS_P3M: case COULOMB_SCAFACOS_PEPC: case COULOMB_SCAFACOS_PP3MG: case COULOMB_SCAFACOS_VMG: { mpi_scafacos_bcast_common_params(); mpi_scafacos_bcast_solver_specific(); mpi_scafacos_init(); mpi_scafacos_common_set(); mpi_scafacos_solver_specific_set(); break; } default: break; } /* tune in order to generate at least defaults for cutoff, transfer the cutoff back to Espresso and generate new cell system on each node*/ switch(coulomb.method) { case COULOMB_SCAFACOS_P2NFFT: if( scafacos.short_range_flag == 0 ) { scafacos_tune(); recalc_maximal_cutoff(); cells_on_geometry_change(0); } break; case COULOMB_SCAFACOS_P3M: if( scafacos.short_range_flag == 0 ) { scafacos_tune(); recalc_maximal_cutoff(); cells_on_geometry_change(0); } default: break; } #endif /* Update particle and observable information for routines in statistics.c */ invalidate_obs(); freePartCfg(); on_observable_calc(); }
void propagate_pos_sd() { /* Verlet list criterion */ double skin2 = SQR(0.5 * skin); INTEG_TRACE(fprintf(stderr,"%d: propagate_pos:\n",this_node)); Cell *cell; Particle *p; int c, i, np; //get total number of particles int N=sd_get_particle_num(); // gather all the data for mobility calculation real * pos=NULL; pos=(real *)Utils::malloc(DIM*N*sizeof(double)); assert(pos!=NULL); real * force=NULL; force=(real *)Utils::malloc(DIM*N*sizeof(double)); assert(force!=NULL); real * velocity=NULL; velocity=(real *)Utils::malloc(DIM*N*sizeof(real)); assert(velocity!=NULL); #ifdef EXTERNAL_FORCES const int COORD_ALL=COORD_FIXED(0)&COORD_FIXED(1)&COORD_FIXED(2); #endif int j=0; // total particle counter for (c = 0; c < local_cells.n; c++){ cell = local_cells.cell[c]; p = cell->part; np = cell->n; for (i = 0; i < np; i++) { // only count nonVirtual Particles #ifdef EXTERNAL_FORCES if (p[i].p.ext_flag & COORD_ALL) { fprintf (stderr, "Warning: Fixing particle in StokesDynamics this way with EXTERNAL_FORCES is not possible (and will be ignored). Please try to bind them e.g. harmonicaly.\n"); } #endif #ifdef VIRTUAL_SITES if (!ifParticleIsVirtual(&p[i])) #endif { #ifdef SD_USE_FLOAT for (int d=0;d<3;d++){ pos[3*j+d] = p[i].r.p[d]; pos[3*j+d] -=rint(pos[3*j+d]/box_l[d])*box_l[d]; force[3*j+d] = p[i].f.f[d]; } #else memmove(&pos[3*j], p[i].r.p, 3*sizeof(double)); memmove(&force[3*j], p[i].f.f, 3*sizeof(double)); for (int d=0;d<3;d++){ pos[3*j+d] -=rint(pos[3*j+d]/box_l[d])*box_l[d]; } #endif j++; } } } if (!(thermo_switch & THERMO_SD) && thermo_switch & THERMO_BD){ propagate_pos_bd(N,pos,force, velocity); } else { // cuda part #ifdef CUDA //void propagate_pos_sd_cuda(double * box_l_h, int N,double * pos_h, double * force_h, double * velo_h){ #ifdef SD_USE_FLOAT real box_size[3]; for (int d=0;d<3;d++){ box_size[d]=box_l[d]; } #else real * box_size = box_l; #endif if (!(thermo_switch & THERMO_SD)){ temperature*=-1; } propagate_pos_sd_cuda(box_size,N,pos,force, velocity); if (!(thermo_switch & THERMO_SD)){ temperature*=-1; } #else fprintf(stderr, "Warning - CUDA is currently required for SD\n"); fprintf(stderr, "So i am just sitting here and copying stupidly stuff :'(\n"); #endif } #ifdef NEMD /* change momentum of each particle in top and bottom slab */ fprintf (stderr, "Warning: NEMD is in SD not supported.\n"); #endif j=0; for (c = 0; c < local_cells.n; c++) { cell = local_cells.cell[c]; p = cell->part; np = cell->n; for (i = 0; i < np; i++) { #ifdef VIRTUAL_SITES if (ifParticleIsVirtual(&p[i])) continue; #endif // write back of position and velocity data #ifdef SD_USE_FLOAT for (int d=0;d<3;d++){ p[i].r.p[d] = pos[3*j+d]+box_l[d]*rint(p[i].r.p[d]/box_l[d]); p[i].m.v[d] = velocity[3*j+d]; //p[i].f.f[d] *= (0.5*time_step*time_step)/(*part).p.mass; } #else for (int d=0;d<3;d++){ p[i].r.p[d] = pos[3*j+d]+box_l[d]*rint(p[i].r.p[d]/box_l[d]); } memmove(p[i].m.v, &velocity[DIM*j], 3*sizeof(double)); #endif // somehow this does not effect anything, although it is called ... for (int d=0;d<3;d++){ p[i].f.f[d] *= (0.5*time_step*time_step)/(*p).p.mass; } for (int d=0;d<DIM;d++){ assert(!isnan(pos[DIM*i+d])); } j++; ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: PV_1 v_new = (%.3e,%.3e,%.3e)\n",this_node,p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: PPOS p = (%.3f,%.3f,%.3f)\n",this_node,p[i].r.p[0],p[i].r.p[1],p[i].r.p[2])); #ifdef ROTATION propagate_omega_quat_particle(&p[i]); #endif /* Verlet criterion check */ if(distance2(p[i].r.p,p[i].l.p_old) > skin2 ) resort_particles = 1; } } free(pos); free(force); free(velocity); announce_resort_particles(); }
int tclcommand_integrate(ClientData data, Tcl_Interp *interp, int argc, char **argv) { int n_steps, reuse_forces = 0; INTEG_TRACE(fprintf(stderr, "%d: integrate:\n", this_node)); if (argc < 1) { Tcl_AppendResult(interp, "wrong # args: \n\"", (char *)NULL); return tclcommand_integrate_print_usage(interp); } else if (argc < 2) { return tclcommand_integrate_print_status(interp); } if (ARG1_IS_S("set")) { if (argc < 3) return tclcommand_integrate_print_status(interp); if (ARG_IS_S(2, "nvt")) return tclcommand_integrate_set_nvt(interp, argc, argv); #ifdef NPT else if (ARG_IS_S(2, "npt_isotropic")) return tclcommand_integrate_set_npt_isotropic(interp, argc, argv); #endif else { Tcl_AppendResult(interp, "unknown integrator method:\n", (char *)NULL); return tclcommand_integrate_print_usage(interp); } } else { if (!ARG_IS_I(1, n_steps)) return tclcommand_integrate_print_usage(interp); // actual integration if ((argc == 3) && ARG_IS_S(2, "reuse_forces")) { reuse_forces = 1; } else if ((argc == 3) && ARG_IS_S(2, "recalc_forces")) { reuse_forces = -1; } else if (argc != 2) return tclcommand_integrate_print_usage(interp); } /* go on with integrate <n_steps> */ if (n_steps < 0) { Tcl_AppendResult(interp, "illegal number of steps (must be >0) \n", (char *)NULL); return tclcommand_integrate_print_usage(interp); ; } /* if skin wasn't set, do an educated guess now */ if (!skin_set) { if (max_cut == 0.0) { Tcl_AppendResult(interp, "cannot automatically determine skin, please " "set it manually via \"setmd skin\"\n", (char *)NULL); return TCL_ERROR; } skin = 0.4 * max_cut; mpi_bcast_parameter(FIELD_SKIN); } /* perform integration */ if (!correlations_autoupdate && !observables_autoupdate) { if (mpi_integrate(n_steps, reuse_forces)) return gather_runtime_errors(interp, TCL_OK); } else { for (int i = 0; i < n_steps; i++) { if (mpi_integrate(1, reuse_forces)) return gather_runtime_errors(interp, TCL_OK); reuse_forces = 1; autoupdate_observables(); autoupdate_correlations(); } if (n_steps == 0) { if (mpi_integrate(0, reuse_forces)) return gather_runtime_errors(interp, TCL_OK); } } return TCL_OK; }
void integrate_vv(int n_steps) { int i; /* Prepare the Integrator */ on_integration_start(); /* if any method vetoes (P3M not initialized), immediately bail out */ if (check_runtime_errors()) return; /* Verlet list criterion */ skin2 = SQR(0.5 * skin); INTEG_TRACE(fprintf(stderr,"%d: integrate_vv: integrating %d steps (recalc_forces=%d)\n", this_node, n_steps, recalc_forces)); /* Integration Step: Preparation for first integration step: Calculate forces f(t) as function of positions p(t) ( and velocities v(t) ) */ if (recalc_forces) { thermo_heat_up(); #ifdef LB transfer_momentum = 0; #endif #ifdef LB_GPU transfer_momentum_gpu = 0; #endif //VIRTUAL_SITES pos (and vel for DPD) update for security reason !!! #ifdef VIRTUAL_SITES update_mol_vel_pos(); ghost_communicator(&cell_structure.update_ghost_pos_comm); if (check_runtime_errors()) return; #ifdef ADRESS // adress_update_weights(); if (check_runtime_errors()) return; #endif #endif force_calc(); //VIRTUAL_SITES distribute forces #ifdef VIRTUAL_SITES ghost_communicator(&cell_structure.collect_ghost_force_comm); init_forces_ghosts(); distribute_mol_force(); if (check_runtime_errors()) return; #endif ghost_communicator(&cell_structure.collect_ghost_force_comm); #ifdef ROTATION convert_initial_torques(); #endif thermo_cool_down(); /* Communication Step: ghost forces */ /*apply trap forces to trapped molecules*/ #ifdef MOLFORCES calc_and_apply_mol_constraints(); #endif rescale_forces(); recalc_forces = 0; } if (check_runtime_errors()) return; n_verlet_updates = 0; /* Integration loop */ for(i=0;i<n_steps;i++) { INTEG_TRACE(fprintf(stderr,"%d: STEP %d\n",this_node,i)); #ifdef BOND_CONSTRAINT save_old_pos(); #endif /* Integration Steps: Step 1 and 2 of Velocity Verlet scheme: v(t+0.5*dt) = v(t) + 0.5*dt * f(t) p(t + dt) = p(t) + dt * v(t+0.5*dt) NOTE 1: Prefactors do not occur in formulas since we use rescaled forces and velocities. NOTE 2: Depending on the integration method Step 1 and Step 2 cannot be combined for the translation. */ if(integ_switch == INTEG_METHOD_NPT_ISO || nemd_method != NEMD_METHOD_OFF) { propagate_vel(); propagate_pos(); } else propagate_vel_pos(); #ifdef ROTATION propagate_omega_quat(); #endif #ifdef BOND_CONSTRAINT /**Correct those particle positions that participate in a rigid/constrained bond */ ghost_communicator(&cell_structure.update_ghost_pos_comm); correct_pos_shake(); #endif #ifdef ELECTROSTATICS if(coulomb.method == COULOMB_MAGGS) { propagate_B_field(0.5*time_step); if(maggs.yukawa) maggs_propagate_psi_vel_pos(time_step); } #endif #ifdef NPT if (check_runtime_errors()) break; #endif cells_update_ghosts(); //VIRTUAL_SITES update pos and vel (for DPD) #ifdef VIRTUAL_SITES update_mol_vel_pos(); ghost_communicator(&cell_structure.update_ghost_pos_comm); if (check_runtime_errors()) break; #ifdef ADRESS //adress_update_weights(); if (check_runtime_errors()) break; #endif #endif /* Integration Step: Step 3 of Velocity Verlet scheme: Calculate f(t+dt) as function of positions p(t+dt) ( and velocities v(t+0.5*dt) ) */ #ifdef LB transfer_momentum = 1; #endif #ifdef LB_GPU transfer_momentum_gpu = 1; #endif force_calc(); //VIRTUAL_SITES distribute forces #ifdef VIRTUAL_SITES ghost_communicator(&cell_structure.collect_ghost_force_comm); init_forces_ghosts(); distribute_mol_force(); if (check_runtime_errors()) break; #endif /* Communication step: ghost forces */ ghost_communicator(&cell_structure.collect_ghost_force_comm); /*apply trap forces to trapped molecules*/ #ifdef MOLFORCES calc_and_apply_mol_constraints(); #endif if (check_runtime_errors()) break; /* Integration Step: Step 4 of Velocity Verlet scheme: v(t+dt) = v(t+0.5*dt) + 0.5*dt * f(t+dt) */ rescale_forces_propagate_vel(); #ifdef LB if (lattice_switch & LATTICE_LB) lattice_boltzmann_update(); if (check_runtime_errors()) break; #endif #ifdef LB_GPU if(this_node == 0){ if (lattice_switch & LATTICE_LB_GPU) lattice_boltzmann_update_gpu(); } #endif #ifdef BOND_CONSTRAINT ghost_communicator(&cell_structure.update_ghost_pos_comm); correct_vel_shake(); #endif //VIRTUAL_SITES update vel #ifdef VIRTUAL_SITES ghost_communicator(&cell_structure.update_ghost_pos_comm); update_mol_vel(); if (check_runtime_errors()) break; #endif #ifdef ELECTROSTATICS if(coulomb.method == COULOMB_MAGGS) { propagate_B_field(0.5*time_step); if(maggs.yukawa) maggs_propagate_psi_vel(0.5*time_step); MAGGS_TRACE(check_gauss_law();); }
void on_integration_start() { char *errtext; EVENT_TRACE(fprintf(stderr, "%d: on_integration_start\n", this_node)); INTEG_TRACE(fprintf(stderr,"%d: on_integration_start: reinit_thermo = %d, resort_particles=%d\n", this_node,reinit_thermo,resort_particles)); /********************************************/ /* sanity checks */ /********************************************/ if ( time_step < 0.0 ) { errtext = runtime_error(128); ERROR_SPRINTF(errtext, "{010 time_step not set} "); } if ( skin < 0.0 ) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{011 skin not set} "); } if ( temperature < 0.0 ) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{012 thermostat not initialized} "); } #ifdef NPT if (integ_switch == INTEG_METHOD_NPT_ISO) { if (nptiso.piston <= 0.0) { char *errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{014 npt on, but piston mass not set} "); } #ifdef ELECTROSTATICS switch(coulomb.method) { case COULOMB_NONE: break; #ifdef P3M case COULOMB_P3M: break; #endif /*P3M*/ default: { char *errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{014 npt only works with P3M} "); } } #endif /*ELECTROSTATICS*/ #ifdef DIPOLES switch (coulomb.Dmethod) { case DIPOLAR_NONE: break; #ifdef DP3M case DIPOLAR_P3M: break; #endif default: { char *errtext = runtime_error(128); ERROR_SPRINTF(errtext,"NpT does not work with your dipolar method, please use P3M."); } } #endif /* ifdef DIPOLES */ } #endif /*NPT*/ if (!check_obs_calc_initialized()) return; #ifdef LB if(lattice_switch & LATTICE_LB) { if (lbpar.agrid <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{098 Lattice Boltzmann agrid not set} "); } if (lbpar.tau <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{099 Lattice Boltzmann time step not set} "); } if (lbpar.rho <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{100 Lattice Boltzmann fluid density not set} "); } if (lbpar.viscosity <= 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{101 Lattice Boltzmann fluid viscosity not set} "); } if (dd.use_vList && skin>=lbpar.agrid/2.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext, "{104 LB requires either no Verlet lists or that the skin of the verlet list to be less than half of lattice-Boltzmann grid spacing.} "); } } #endif #ifdef LB_GPU if(this_node == 0){ if(lattice_switch & LATTICE_LB_GPU) { if (lbpar_gpu.agrid < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{098 Lattice Boltzmann agrid not set} "); } if (lbpar_gpu.tau < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{099 Lattice Boltzmann time step not set} "); } if (lbpar_gpu.rho < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{100 Lattice Boltzmann fluid density not set} "); } if (lbpar_gpu.viscosity < 0.0) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{101 Lattice Boltzmann fluid viscosity not set} "); } if (lb_reinit_particles_gpu) { lb_realloc_particles_gpu(); lb_reinit_particles_gpu = 0; } } } #endif #ifdef METADYNAMICS meta_init(); #endif #ifdef CATALYTIC_REACTIONS if(reaction.ct_rate != 0.0) { if( dd.use_vList == 0 || cell_structure.type != CELL_STRUCTURE_DOMDEC) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{105 The CATALYTIC_REACTIONS feature requires verlet lists and domain decomposition} "); check_runtime_errors(); } if(max_cut < reaction.range) { errtext = runtime_error(128); ERROR_SPRINTF(errtext,"{106 Reaction range of %f exceeds maximum cutoff of %f} ", reaction.range, max_cut); } } #endif /********************************************/ /* end sanity checks */ /********************************************/ /* Prepare the thermostat */ if (reinit_thermo) { thermo_init(); reinit_thermo = 0; recalc_forces = 1; } /* Ensemble preparation: NVT or NPT */ integrate_ensemble_init(); /* Update particle and observable information for routines in statistics.c */ invalidate_obs(); freePartCfg(); on_observable_calc(); }
void integrate_vv(int n_steps) { int i; /* Prepare the Integrator */ on_integration_start(); /* if any method vetoes (P3M not initialized), immediately bail out */ if (check_runtime_errors()) return; /* Verlet list criterion */ skin2 = SQR(0.5 * skin); INTEG_TRACE(fprintf(stderr,"%d: integrate_vv: integrating %d steps (recalc_forces=%d)\n", this_node, n_steps, recalc_forces)); /* Integration Step: Preparation for first integration step: Calculate forces f(t) as function of positions p(t) ( and velocities v(t) ) */ if (recalc_forces) { thermo_heat_up(); #ifdef LB transfer_momentum = 0; #endif #ifdef LB_GPU transfer_momentum_gpu = 0; #endif //VIRTUAL_SITES pos (and vel for DPD) update for security reason !!! #ifdef VIRTUAL_SITES update_mol_vel_pos(); ghost_communicator(&cell_structure.update_ghost_pos_comm); if (check_runtime_errors()) return; #ifdef ADRESS // adress_update_weights(); if (check_runtime_errors()) return; #endif #endif force_calc(); //VIRTUAL_SITES distribute forces #ifdef VIRTUAL_SITES ghost_communicator(&cell_structure.collect_ghost_force_comm); init_forces_ghosts(); distribute_mol_force(); if (check_runtime_errors()) return; #endif ghost_communicator(&cell_structure.collect_ghost_force_comm); #ifdef ROTATION convert_initial_torques(); #endif thermo_cool_down(); /* Communication Step: ghost forces */ /*apply trap forces to trapped molecules*/ #ifdef MOLFORCES calc_and_apply_mol_constraints(); #endif rescale_forces(); recalc_forces = 0; } if (check_runtime_errors()) return; n_verlet_updates = 0; /* Integration loop */ for(i=0;i<n_steps;i++) { INTEG_TRACE(fprintf(stderr,"%d: STEP %d\n",this_node,i)); #ifdef BOND_CONSTRAINT save_old_pos(); #endif /* Integration Steps: Step 1 and 2 of Velocity Verlet scheme: v(t+0.5*dt) = v(t) + 0.5*dt * f(t) p(t + dt) = p(t) + dt * v(t+0.5*dt) NOTE 1: Prefactors do not occur in formulas since we use rescaled forces and velocities. NOTE 2: Depending on the integration method Step 1 and Step 2 cannot be combined for the translation. */ if(integ_switch == INTEG_METHOD_NPT_ISO || nemd_method != NEMD_METHOD_OFF) { propagate_vel(); propagate_pos(); } else propagate_vel_pos(); #ifdef ROTATION propagate_omega_quat(); #endif #ifdef BOND_CONSTRAINT /**Correct those particle positions that participate in a rigid/constrained bond */ ghost_communicator(&cell_structure.update_ghost_pos_comm); correct_pos_shake(); #endif #ifdef ELECTROSTATICS if(coulomb.method == COULOMB_MAGGS) { maggs_propagate_B_field(0.5*time_step); } #endif #ifdef NPT if (check_runtime_errors()) break; #endif cells_update_ghosts(); //VIRTUAL_SITES update pos and vel (for DPD) #ifdef VIRTUAL_SITES update_mol_vel_pos(); ghost_communicator(&cell_structure.update_ghost_pos_comm); if (check_runtime_errors()) break; #if defined(VIRTUAL_SITES_RELATIVE) && defined(LB) // This is on a workaround stage: // When using virtual sites relative and LB at the same time, it is necessary // to reassemble the cell lists after all position updates, also of virtual // particles. if (cell_structure.type == CELL_STRUCTURE_DOMDEC && (!dd.use_vList) ) cells_update_ghosts(); #endif #ifdef ADRESS //adress_update_weights(); if (check_runtime_errors()) break; #endif #endif /* Integration Step: Step 3 of Velocity Verlet scheme: Calculate f(t+dt) as function of positions p(t+dt) ( and velocities v(t+0.5*dt) ) */ #ifdef LB transfer_momentum = 1; #endif #ifdef LB_GPU transfer_momentum_gpu = 1; #endif force_calc(); //VIRTUAL_SITES distribute forces #ifdef VIRTUAL_SITES ghost_communicator(&cell_structure.collect_ghost_force_comm); init_forces_ghosts(); distribute_mol_force(); if (check_runtime_errors()) break; #endif /* Communication step: ghost forces */ ghost_communicator(&cell_structure.collect_ghost_force_comm); /*apply trap forces to trapped molecules*/ #ifdef MOLFORCES calc_and_apply_mol_constraints(); #endif if (check_runtime_errors()) break; /* Integration Step: Step 4 of Velocity Verlet scheme: v(t+dt) = v(t+0.5*dt) + 0.5*dt * f(t+dt) */ rescale_forces_propagate_vel(); recalc_forces = 0; #ifdef LB if (lattice_switch & LATTICE_LB) lattice_boltzmann_update(); if (check_runtime_errors()) break; #endif #ifdef LB_GPU if(this_node == 0){ if (lattice_switch & LATTICE_LB_GPU) lattice_boltzmann_update_gpu(); } #endif #ifdef BOND_CONSTRAINT ghost_communicator(&cell_structure.update_ghost_pos_comm); correct_vel_shake(); #endif #ifdef ROTATION convert_torques_propagate_omega(); #endif //VIRTUAL_SITES update vel #ifdef VIRTUAL_SITES ghost_communicator(&cell_structure.update_ghost_pos_comm); update_mol_vel(); if (check_runtime_errors()) break; #endif #ifdef ELECTROSTATICS if(coulomb.method == COULOMB_MAGGS) { maggs_propagate_B_field(0.5*time_step); } #endif #ifdef NPT if((this_node==0) && (integ_switch == INTEG_METHOD_NPT_ISO)) nptiso.p_inst_av += nptiso.p_inst; #endif /* Propagate time: t = t+dt */ sim_time += time_step; } /* verlet list statistics */ if(n_verlet_updates>0) verlet_reuse = n_steps/(double) n_verlet_updates; else verlet_reuse = 0; #ifdef NPT if(integ_switch == INTEG_METHOD_NPT_ISO) { nptiso.invalidate_p_vel = 0; MPI_Bcast(&nptiso.p_inst, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&nptiso.p_diff, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&nptiso.volume, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); if(this_node==0) nptiso.p_inst_av /= 1.0*n_steps; MPI_Bcast(&nptiso.p_inst_av, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); } #endif }
/** convert the torques to the body-fixed frames and propagate angular velocities */ void convert_torques_propagate_omega() { Particle *p; Cell *cell; int c,i, np, times; double dt2, tx, ty, tz; dt2 = time_step*0.5; INTEG_TRACE(fprintf(stderr,"%d: convert_torques_propagate_omega:\n",this_node)); for (c = 0; c < local_cells.n; c++) { cell = local_cells.cell[c]; p = cell->part; np = cell->n; for(i = 0; i < np; i++) { double A[9]; define_rotation_matrix(&p[i], A); tx = A[0 + 3*0]*p[i].f.torque[0] + A[0 + 3*1]*p[i].f.torque[1] + A[0 + 3*2]*p[i].f.torque[2]; ty = A[1 + 3*0]*p[i].f.torque[0] + A[1 + 3*1]*p[i].f.torque[1] + A[1 + 3*2]*p[i].f.torque[2]; tz = A[2 + 3*0]*p[i].f.torque[0] + A[2 + 3*1]*p[i].f.torque[1] + A[2 + 3*2]*p[i].f.torque[2]; if ( thermo_switch & THERMO_LANGEVIN ) { #ifdef THERMOSTAT_IGNORE_NON_VIRTUAL if (!ifParticleIsVirtual(&p[i])) #endif { friction_thermo_langevin_rotation(&p[i]); p[i].f.torque[0]+= tx; p[i].f.torque[1]+= ty; p[i].f.torque[2]+= tz; } } else { p[i].f.torque[0] = tx; p[i].f.torque[1] = ty; p[i].f.torque[2] = tz; } ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: SCAL f = (%.3e,%.3e,%.3e) v_old = (%.3e,%.3e,%.3e)\n",this_node,p[i].f.f[0],p[i].f.f[1],p[i].f.f[2],p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); #ifdef ROTATIONAL_INERTIA p[i].m.omega[0]+= dt2*p[i].f.torque[0]/p[i].p.rinertia[0]/I[0]; p[i].m.omega[1]+= dt2*p[i].f.torque[1]/p[i].p.rinertia[1]/I[1]; p[i].m.omega[2]+= dt2*p[i].f.torque[2]/p[i].p.rinertia[2]/I[2]; #else p[i].m.omega[0]+= dt2*p[i].f.torque[0]/I[0]; p[i].m.omega[1]+= dt2*p[i].f.torque[1]/I[1]; p[i].m.omega[2]+= dt2*p[i].f.torque[2]/I[2]; #endif /* if the tensor of inertia is isotrpic, the following refinement is not needed. Otherwise repeat this loop 2-3 times depending on the required accuracy */ for(times=0;times<=5;times++) { double Wd[3]; #ifdef ROTATIONAL_INERTIA Wd[0] = (p[i].m.omega[1]*p[i].m.omega[2]*(I[1]-I[2]))/I[0]/p[i].p.rinertia[0]; Wd[1] = (p[i].m.omega[2]*p[i].m.omega[0]*(I[2]-I[0]))/I[1]/p[i].p.rinertia[1]; Wd[2] = (p[i].m.omega[0]*p[i].m.omega[1]*(I[0]-I[1]))/I[2]/p[i].p.rinertia[2]; #else Wd[0] = (p[i].m.omega[1]*p[i].m.omega[2]*(I[1]-I[2]))/I[0]; Wd[1] = (p[i].m.omega[2]*p[i].m.omega[0]*(I[2]-I[0]))/I[1]; Wd[2] = (p[i].m.omega[0]*p[i].m.omega[1]*(I[0]-I[1]))/I[2]; #endif p[i].m.omega[0]+= dt2*Wd[0]; p[i].m.omega[1]+= dt2*Wd[1]; p[i].m.omega[2]+= dt2*Wd[2]; } ONEPART_TRACE(if(p[i].p.identity==check_id) fprintf(stderr,"%d: OPT: PV_2 v_new = (%.3e,%.3e,%.3e)\n",this_node,p[i].m.v[0],p[i].m.v[1],p[i].m.v[2])); } } }
void integrate_sd(int n_steps) { /* Prepare the Integrator */ on_integration_start(); /* if any method vetoes (P3M not initialized), immediately bail out */ if (check_runtime_errors()) return; INTEG_TRACE(fprintf(stderr,"%d: integrate_vv: integrating %d steps (recalc_forces=%d)\n", this_node, n_steps, recalc_forces)); /* Integration Step: Calculate forces f(t) as function of positions p(t) ( and velocities v(t) ) */ //if (recalc_forces) { //thermo_heat_up(); ghost_communicator(&cell_structure.collect_ghost_force_comm); #ifdef ROTATION convert_initial_torques(); #endif //thermo_cool_down(); /* Communication Step: ghost forces */ /*apply trap forces to trapped molecules*/ #ifdef MOLFORCES // prob. works only with harmonic bounds calc_and_apply_mol_constraints(); #endif /* should be pretty late, since it needs to zero out the total force */ #ifdef COMFIXED calc_comfixed(); #endif //rescale_forces(); #ifdef COLLISION_DETECTION //should not be neccessery, as integrator avoids collision handle_collisions(); #endif // end of force calculation #ifdef GHMC if(thermo_switch & THERMO_GHMC) ghmc_init(); #endif if (check_runtime_errors()) return; n_verlet_updates = 0; /* Integration loop */ for(int step=0;step<n_steps;step++) { INTEG_TRACE(fprintf(stderr,"%d: STEP %d\n",this_node,step)); //sd_set_particles_apart(); #ifdef BOND_CONSTRAINT save_old_pos(); #endif #ifdef GHMC if(thermo_switch & THERMO_GHMC) { if (step % ghmc_nmd == 0) ghmc_momentum_update(); } #endif if(thermo_switch & ~(THERMO_SD|THERMO_BD) ){ static bool warned_thermo_sd_other=false; if (!warned_thermo_sd_other){ fprintf (stderr, "Warning, using another thermo than the one provided by StokesDynamics breaks (most likely) StokesDynamics.\n"); warned_thermo_sd_other=true; } } if (thermo_switch & THERMO_SD && thermo_switch &THERMO_BD) { fprintf (stderr, "Warning: cannot use BD and SD. Disabeling BD!\n"); thermo_switch &= ~THERMO_BD; } /* Integration Step: Step 3 of Velocity Verlet scheme: Calculate f(t) as function of positions p(t) ( and ``velocities'' v(t) ) */ #ifdef LB transfer_momentum = 1; #endif #ifdef LB_GPU transfer_momentum_gpu = 1; #endif force_calc(); #ifdef CATALYTIC_REACTIONS integrate_reaction(); #endif if (check_runtime_errors()) break; #ifdef LB if (lattice_switch & LATTICE_LB) lattice_boltzmann_update(); if (check_runtime_errors()) break; #endif #ifdef LB_GPU if(this_node == 0){ #ifdef ELECTROKINETICS if (ek_initialized) { ek_integrate(); } else { #endif if (lattice_switch & LATTICE_LB_GPU) lattice_boltzmann_update_gpu(); #ifdef ELECTROKINETICS } #endif } #endif //LB_GPU #ifdef ELECTROSTATICS if(coulomb.method == COULOMB_MAGGS) { maggs_propagate_B_field(0.5*time_step); } #endif #ifdef NPT if((this_node==0) && (integ_switch == INTEG_METHOD_NPT_ISO)) nptiso.p_inst_av += nptiso.p_inst; #endif #ifdef GHMC if(thermo_switch & THERMO_GHMC) { if (step % ghmc_nmd == ghmc_nmd-1) ghmc_mc(); } #endif /** Integration Steps: Update the Positions \[ p_i(t + dt) = p_i(t) + dt * \mu_{ij} * f_j(t) + dt * \mu_{ij} * f^B_j \] */ propagate_pos_sd(); // we dont have velocities #ifdef BOND_CONSTRAINT static bool bond_constraint_with_sd_warned=false; if (!bond_constraint_with_sd_warned){ // warn only once fprintf (stderr, "Warning, using BOND_CONSTRAINT with StokesDynamics might not work as expected!.\n"); bond_constraint_with_sd_warned=true; } /**Correct those particle positions that participate in a rigid/constrained bond */ cells_update_ghosts(); correct_pos_shake(); #endif #ifdef ELECTROSTATICS if(coulomb.method == COULOMB_MAGGS) { maggs_propagate_B_field(0.5*time_step); } #endif #ifdef NPT if (check_runtime_errors()) break; #endif /* Propagate time: t = t+dt */ sim_time += time_step; } /* verlet list statistics */ if(n_verlet_updates>0) verlet_reuse = n_steps/(double) n_verlet_updates; else verlet_reuse = 0; #ifdef NPT if(integ_switch == INTEG_METHOD_NPT_ISO) { nptiso.invalidate_p_vel = 0; MPI_Bcast(&nptiso.p_inst, 1, MPI_DOUBLE, 0, comm_cart); MPI_Bcast(&nptiso.p_diff, 1, MPI_DOUBLE, 0, comm_cart); MPI_Bcast(&nptiso.volume, 1, MPI_DOUBLE, 0, comm_cart); if(this_node==0) nptiso.p_inst_av /= 1.0*n_steps; MPI_Bcast(&nptiso.p_inst_av, 1, MPI_DOUBLE, 0, comm_cart); } #endif #ifdef GHMC if(thermo_switch & THERMO_GHMC) ghmc_close(); #endif }