Ejemplo n.º 1
0
static void mv_atoms(int dest,t_symtab *symtab,t_atoms *atoms)
{
  int nr;
  
  nr=atoms->nr;
  blocktx(dest,nr);
  nblocktx(dest,nr,atoms->atom);
  mv_strings(dest,symtab,atoms->nr,atoms->atomname);
  mv_strings(dest,symtab,atoms->nres,atoms->resname);
  mv_strings(dest,symtab,atoms->ngrpname,atoms->grpname);
  mv_grps(dest,atoms->grps);
  mv_block(dest,&atoms->excl);
}
Ejemplo n.º 2
0
void mv_data(int left,int right,t_parm *parm,t_nsborder *nsb,
             t_topology *top,rvec x[],rvec v[])
{
  int i;
  
  mv_parm(right,parm);
  mv_nsb(right,nsb);
  mv_symtab(right,&top->symtab);
  mv_string(right,&top->symtab,top->name);
  mv_atoms(right,&top->symtab,&top->atoms);
  mv_idef(right,&top->idef);
  for (i=0; (i<ebNR); i++) 
    mv_block(right,&top->blocks[i]);
  mv_vectors(right,top->atoms.nr,x,v);
}
Ejemplo n.º 3
0
int main(int argc,char *argv[])
{
  static char *desc[] = {
    "The [TT]pmetest[tt] program tests the scaling of the PME code. When only given",
    "a [TT].tpr[tt] file it will compute PME for one frame. When given a trajectory",
    "it will do so for all the frames in the trajectory. Before the PME",
    "routine is called the coordinates are sorted along the X-axis.[PAR]",
    "As an extra service to the public the program can also compute",
    "long-range Coulomb energies for components of the system. When the",
    "[TT]-groups[tt] flag is given to the program the energy groups",
    "from the [TT].tpr[tt] file will be read, and half an energy matrix computed."
  };
  t_commrec    *cr,*mcr;
  static t_filenm fnm[] = {
    { efTPX, NULL,      NULL,       ffREAD  },
    { efTRN, "-o",      NULL,       ffWRITE },
    { efLOG, "-g",      "pme",      ffWRITE },
    { efTRX, "-f",      NULL,       ffOPTRD },
    { efXVG, "-x",      "ener-pme", ffWRITE }
  };
#define NFILE asize(fnm)

  /* Command line options ! */
  static gmx_bool bVerbose=FALSE;
  static gmx_bool bOptFFT=FALSE;
  static gmx_bool bSort=FALSE;
  static int  ewald_geometry=eewg3D;
  static int  nnodes=1;
  static int  pme_order=0;
  static rvec grid = { -1, -1, -1 };
  static real rc   = 0.0;
  static real dtol = 0.0;
  static gmx_bool bGroups = FALSE;
  static t_pargs pa[] = {
    { "-np",      FALSE, etINT, {&nnodes},
      "Number of nodes, must be the same as used for [TT]grompp[tt]" },
    { "-v",       FALSE, etBOOL,{&bVerbose},  
      "Be loud and noisy" },
    { "-sort",    FALSE, etBOOL,{&bSort},  
      "Sort coordinates. Crucial for domain decomposition." },
    { "-grid",    FALSE, etRVEC,{&grid},
      "Number of grid cells in X, Y, Z dimension (if -1 use from [TT].tpr[tt])" },
    { "-order",   FALSE, etINT, {&pme_order},
      "Order of the PME spreading algorithm" },
    { "-groups",  FALSE, etBOOL, {&bGroups},
      "Compute half an energy matrix based on the energy groups in your [TT].tpr[tt] file" },
    { "-rc",      FALSE, etREAL, {&rc},
      "Rcoulomb for Ewald summation" },
    { "-tol",     FALSE, etREAL, {&dtol},
      "Tolerance for Ewald summation" }
  };
  FILE        *fp;
  t_inputrec  *ir;
  t_topology  top;
  t_tpxheader tpx;
  t_nrnb      nrnb;
  t_nsborder  *nsb;
  t_forcerec  *fr;
  t_mdatoms   *mdatoms;
  char        title[STRLEN];
  int         natoms,step,status,i,ncg,root;
  real        t,lambda,ewaldcoeff,qtot;
  rvec        *x,*f,*xbuf;
  int         *index;
  gmx_bool        bCont;
  real        *charge,*qbuf,*qqbuf;
  matrix      box;
  
  /* Start the actual parallel code if necessary */
  cr   = init_par(&argc,&argv);
  root = 0;
  
  if (MASTER(cr)) 
    CopyRight(stderr,argv[0]);
  
  /* Parse command line on all processors, arguments are passed on in 
   * init_par (see above)
   */
  parse_common_args(&argc,argv,
		    PCA_KEEP_ARGS | PCA_NOEXIT_ON_ARGS | PCA_BE_NICE |
		    PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET),
		    NFILE,fnm,asize(pa),pa,asize(desc),desc,0,NULL);
  
#ifndef GMX_MPI
  if (nnodes > 1) 
    gmx_fatal(FARGS,"GROMACS compiled without MPI support - can't do parallel runs");
#endif

  /* Open log files on all processors */
  open_log(ftp2fn(efLOG,NFILE,fnm),cr);
  snew(ir,1);
  
  if (MASTER(cr)) {
    /* Read tpr file etc. */
    read_tpxheader(ftp2fn(efTPX,NFILE,fnm),&tpx,FALSE,NULL,NULL);
    snew(x,tpx.natoms);
    read_tpx(ftp2fn(efTPX,NFILE,fnm),&step,&t,&lambda,ir,
	     box,&natoms,x,NULL,NULL,&top);
    /* Charges */
    qtot = 0;
    snew(charge,natoms);
    for(i=0; (i<natoms); i++) {
      charge[i] = top.atoms.atom[i].q;
      qtot += charge[i];
    }
  
    /* Grid stuff */
    if (opt2parg_bSet("-grid",asize(pa),pa)) {
      ir->nkx = grid[XX];
      ir->nky = grid[YY];
      ir->nkz = grid[ZZ];
    }
    /* Check command line parameters for consistency */
    if ((ir->nkx <= 0) || (ir->nky <= 0) || (ir->nkz <= 0))
      gmx_fatal(FARGS,"PME grid = %d %d %d",ir->nkx,ir->nky,ir->nkz);
    if (opt2parg_bSet("-rc",asize(pa),pa)) 
      ir->rcoulomb = rc;
    if (ir->rcoulomb <= 0)
      gmx_fatal(FARGS,"rcoulomb should be > 0 (not %f)",ir->rcoulomb);
    if (opt2parg_bSet("-order",asize(pa),pa)) 
      ir->pme_order = pme_order;
    if (ir->pme_order <= 0)
      gmx_fatal(FARGS,"pme_order should be > 0 (not %d)",ir->pme_order);
    if (opt2parg_bSet("-tol",asize(pa),pa))
      ir->ewald_rtol = dtol;
    if (ir->ewald_rtol <= 0)
      gmx_fatal(FARGS,"ewald_tol should be > 0 (not %f)",ir->ewald_rtol);
  }
  else {
    init_top(&top);
  }

  /* Add parallellization code here */
  snew(nsb,1);
  if (MASTER(cr)) {
    ncg = top.blocks[ebCGS].multinr[0];
    for(i=0; (i<cr->nnodes-1); i++)
      top.blocks[ebCGS].multinr[i] = min(ncg,(ncg*(i+1))/cr->nnodes);
    for( ; (i<MAXNODES); i++)
      top.blocks[ebCGS].multinr[i] = ncg;
  }
  if (PAR(cr)) {
    /* Set some variables to zero to avoid core dumps */
    ir->opts.ngtc = ir->opts.ngacc = ir->opts.ngfrz = ir->opts.ngener = 0;
#ifdef GMX_MPI
    /* Distribute the data over processors */
    MPI_Bcast(&natoms,1,MPI_INT,root,MPI_COMM_WORLD);
    MPI_Bcast(ir,sizeof(*ir),MPI_BYTE,root,MPI_COMM_WORLD);
    MPI_Bcast(&qtot,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
#endif

    /* Call some dedicated communication routines, master sends n-1 times */
    if (MASTER(cr)) {
      for(i=1; (i<cr->nnodes); i++) {
	mv_block(i,&(top.blocks[ebCGS]));
	mv_block(i,&(top.atoms.excl));
      }
    }
    else {
      ld_block(root,&(top.blocks[ebCGS]));
      ld_block(root,&(top.atoms.excl));
    }
    if (!MASTER(cr)) {
      snew(charge,natoms);
      snew(x,natoms);
    }
#ifdef GMX_MPI
    MPI_Bcast(charge,natoms,GMX_MPI_REAL,root,MPI_COMM_WORLD);
#endif
  }
  ewaldcoeff = calc_ewaldcoeff(ir->rcoulomb,ir->ewald_rtol);
  
  
  if (bVerbose)
    pr_inputrec(stdlog,0,"Inputrec",ir);

  /* Allocate memory for temp arrays etc. */
  snew(xbuf,natoms);
  snew(f,natoms);
  snew(qbuf,natoms);
  snew(qqbuf,natoms);
  snew(index,natoms);

  /* Initialize the PME code */  
  init_pme(stdlog,cr,ir->nkx,ir->nky,ir->nkz,ir->pme_order,
	   natoms,FALSE,bOptFFT,ewald_geometry);
	   
  /* MFlops accounting */
  init_nrnb(&nrnb);
  
  /* Initialize the work division */
  calc_nsb(stdlog,&(top.blocks[ebCGS]),cr->nnodes,nsb,0);
  nsb->nodeid = cr->nodeid;
  print_nsb(stdlog,"pmetest",nsb);  

  /* Initiate forcerec */
  mdatoms = atoms2md(stdlog,&top.atoms,ir->opts.nFreeze,ir->eI,
		     ir->delta_t,0,ir->opts.tau_t,FALSE,FALSE);
  snew(fr,1);
  init_forcerec(stdlog,fr,ir,&top,cr,mdatoms,nsb,box,FALSE,NULL,NULL,FALSE);
  
  /* First do PME based on coordinates in tpr file, send them to
   * other processors if needed.
   */
  if (MASTER(cr))
    fprintf(stdlog,"-----\n"
	    "Results based on tpr file %s\n",ftp2fn(efTPX,NFILE,fnm));
#ifdef GMX_MPI
  if (PAR(cr)) {
    MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
    MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
    MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
  }
#endif
  do_my_pme(stdlog,0,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,
	    cr,nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,NULL,
	    bGroups ? ir->opts.ngener : 1,mdatoms->cENER);

  /* If we have a trajectry file, we will read the frames in it and compute
   * the PME energy.
   */
  if (ftp2bSet(efTRX,NFILE,fnm)) {
    fprintf(stdlog,"-----\n"
	    "Results based on trx file %s\n",ftp2fn(efTRX,NFILE,fnm));
    if (MASTER(cr)) {
      sfree(x);
      natoms = read_first_x(&status,ftp2fn(efTRX,NFILE,fnm),&t,&x,box); 
      if (natoms != top.atoms.nr)
	gmx_fatal(FARGS,"natoms in trx = %d, in tpr = %d",natoms,top.atoms.nr);
      fp = xvgropen(ftp2fn(efXVG,NFILE,fnm),"PME Energy","Time (ps)","E (kJ/mol)");
    }
    else
      fp = NULL;
    do {
      /* Send coordinates, box and time to the other nodes */
#ifdef GMX_MPI
      if (PAR(cr)) {
	MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
	MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
	MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
      }
#endif
      rm_pbc(&top.idef,nsb->natoms,box,x,x);
      /* Call the PME wrapper function */
      do_my_pme(stdlog,t,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,cr,
		nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,fp,
		bGroups ? ir->opts.ngener : 1,mdatoms->cENER);
      /* Only the master processor reads more data */
      if (MASTER(cr))
          bCont = read_next_x(status,&t,natoms,x,box);
      /* Check whether we need to continue */
#ifdef GMX_MPI
      if (PAR(cr))
          MPI_Bcast(&bCont,1,MPI_INT,root,MPI_COMM_WORLD);
#endif
      
    } while (bCont);
    
    /* Finish I/O, close files */
    if (MASTER(cr)) {
      close_trx(status);
      ffclose(fp);
    }
  }
  
  if (bVerbose) {
    /* Do some final I/O about performance, might be useful in debugging */
    fprintf(stdlog,"-----\n");
    print_nrnb(stdlog,&nrnb);
  }
  
  /* Finish the parallel stuff */  
  if (gmx_parallel_env_initialized())
    gmx_finalize(cr);

  /* Thank the audience, as usual */
  if (MASTER(cr)) 
    thanx(stderr);

  return 0;
}