Exemple #1
0
static void make_shake_sblock_dd(struct gmx_constr *constr,
                                 t_ilist *ilcon, t_block *cgs,
                                 gmx_domdec_t *dd)
{
    int      ncons, c, cg;
    t_iatom *iatom;

    if (dd->ncg_home+1 > constr->sblock_nalloc)
    {
        constr->sblock_nalloc = over_alloc_dd(dd->ncg_home+1);
        srenew(constr->sblock, constr->sblock_nalloc);
    }

    ncons           = ilcon->nr/3;
    iatom           = ilcon->iatoms;
    constr->nblocks = 0;
    cg              = 0;
    for (c = 0; c < ncons; c++)
    {
        if (c == 0 || iatom[1] >= cgs->index[cg+1])
        {
            constr->sblock[constr->nblocks++] = 3*c;
            while (iatom[1] >= cgs->index[cg+1])
            {
                cg++;
            }
        }
        iatom += 3;
    }
    constr->sblock[constr->nblocks] = 3*ncons;
}
Exemple #2
0
static void make_local_pull_group(gmx_ga2la_t ga2la,
				  t_pullgrp *pg,int start,int end)
{
  int i,ii;

  pg->nat_loc = 0;
  for(i=0; i<pg->nat; i++) {
    ii = pg->ind[i];
    if (ga2la) {
      if (!ga2la_get_home(ga2la,ii,&ii)) {
        ii = -1;
      }
    }
    if (ii >= start && ii < end) {
      /* This is a home atom, add it to the local pull group */
      if (pg->nat_loc >= pg->nalloc_loc) {
	pg->nalloc_loc = over_alloc_dd(pg->nat_loc+1);
	srenew(pg->ind_loc,pg->nalloc_loc);
	if (pg->epgrppbc == epgrppbcCOS || pg->weight) {
	  srenew(pg->weight_loc,pg->nalloc_loc);
	}
      }
      pg->ind_loc[pg->nat_loc] = ii;
      if (pg->weight) {
	  pg->weight_loc[pg->nat_loc] = pg->weight[i];
      }
      pg->nat_loc++;
    }
  }
}
Exemple #3
0
void gmx_pme_receive_f(t_commrec *cr,
		       rvec f[], matrix vir, 
		       real *energy, real *dvdlambda,
		       float *pme_cycles)
{
  int natoms,i;

#ifdef GMX_PME_DELAYED_WAIT
  /* Wait for the x request to finish */
  gmx_pme_send_q_x_wait(cr->dd);
#endif

  natoms = cr->dd->nat_home;

  if (natoms > cr->dd->pme_recv_f_alloc)
  {
      cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
      srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
  }

#ifdef GMX_MPI  
  MPI_Recv(cr->dd->pme_recv_f_buf[0], 
           natoms*sizeof(rvec),MPI_BYTE,
	   cr->dd->pme_nodeid,0,cr->mpi_comm_mysim,
	   MPI_STATUS_IGNORE);
#endif

  for(i=0; i<natoms; i++)
      rvec_inc(f[i],cr->dd->pme_recv_f_buf[i]);

  
  receive_virial_energy(cr,vir,energy,dvdlambda,pme_cycles);
}
static void do_update_sd1(t_gmx_stochd *sd,
			  int start,int homenr,double dt,
			  rvec accel[],ivec nFreeze[],
			  real invmass[],unsigned short ptype[],
			  unsigned short cFREEZE[],unsigned short cACC[],
			  unsigned short cTC[],
			  rvec x[],rvec xprime[],rvec v[],rvec f[],
			  rvec sd_X[],
			  int ngtc,real tau_t[],real ref_t[])
{
  gmx_sd_const_t *sdc;
  gmx_sd_sigma_t *sig;
  gmx_rng_t gaussrand;
  real   kT;
  int    gf=0,ga=0,gt=0;
  real   ism,sd_V;
  int    n,d;

  sdc = sd->sdc;
  sig = sd->sdsig;
  if (homenr > sd->sd_V_nalloc) {
    sd->sd_V_nalloc = over_alloc_dd(homenr);
    srenew(sd->sd_V,sd->sd_V_nalloc);
  }
  gaussrand = sd->gaussrand;
  
  for(n=0; n<ngtc; n++) {
    kT = BOLTZ*ref_t[n];
    /* The mass is encounted for later, since this differs per atom */
    sig[n].V  = sqrt(2*kT*(1 - sdc[n].em));
  }

  for(n=start; n<start+homenr; n++) {
    ism = sqrt(invmass[n]);
    if (cFREEZE)
      gf  = cFREEZE[n];
    if (cACC)
      ga  = cACC[n];
    if (cTC)
      gt  = cTC[n];

    for(d=0; d<DIM; d++) {
      if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) {
	sd_V = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand);
	
	v[n][d] = v[n][d]*sdc[gt].em 
	  + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
	  + sd_V;

	xprime[n][d] = x[n][d] + v[n][d]*dt;
      } else {
	v[n][d]      = 0.0;
	xprime[n][d] = x[n][d];
      }
    }
  }
}
Exemple #5
0
void make_local_shells(t_commrec *cr,t_mdatoms *md,
		       struct gmx_shellfc *shfc)
{
  t_shell *shell;
  int a0,a1,*ind,nshell,i;
  gmx_domdec_t *dd=NULL;

  if (PAR(cr)) {
    if (DOMAINDECOMP(cr)) {
      dd = cr->dd;
      a0 = 0;
      a1 = dd->nat_home;
    } else {
      pd_at_range(cr,&a0,&a1);
    }
  } else {
    /* Single node: we need all shells, just copy the pointer */
    shfc->nshell = shfc->nshell_gl;
    shfc->shell  = shfc->shell_gl;
    
    return;
  }

  ind = shfc->shell_index_gl;

  nshell = 0;
  shell  = shfc->shell; 
  for(i=a0; i<a1; i++) {
    if (md->ptype[i] == eptShell) {
      if (nshell+1 > shfc->shell_nalloc) {
	shfc->shell_nalloc = over_alloc_dd(nshell+1);
	srenew(shell,shfc->shell_nalloc);
      }
      if (dd) {
	shell[nshell] = shfc->shell_gl[ind[dd->gatindex[i]]];
      } else {
	shell[nshell] = shfc->shell_gl[ind[i]];
      }
      /* With inter-cg shells we can no do shell prediction,
       * so we do not need the nuclei numbers.
       */
      if (!shfc->bInterCG) {
	shell[nshell].nucl1   = i + shell[nshell].nucl1 - shell[nshell].shell;
	if (shell[nshell].nnucl > 1)
	  shell[nshell].nucl2 = i + shell[nshell].nucl2 - shell[nshell].shell;
	if (shell[nshell].nnucl > 2)
	  shell[nshell].nucl3 = i + shell[nshell].nucl3 - shell[nshell].shell;
      }
      shell[nshell].shell = i;
      nshell++;
    }
  }

  shfc->nshell = nshell;
  shfc->shell  = shell;
}
Exemple #6
0
/* Select the indices of the group's atoms which are local and store them in 
 * anrs_loc[0..nr_loc]. The indices are saved in coll_ind[] for later reduction
 * in communicate_group_positions()
 */
extern void dd_make_local_group_indices(
        gmx_ga2la_t   ga2la,
        const int     nr,          /* IN:  Total number of atoms in the group */
        int           anrs[],      /* IN:  Global atom numbers of the groups atoms */
        int           *nr_loc,     /* OUT: Number of group atoms found locally */
        int           *anrs_loc[], /* OUT: Local atom numbers of the group  */
        int           *nalloc_loc, /* IN+OUT: Allocation size of anrs_loc */
        int           coll_ind[])  /* OUT (opt): Where is this position found in the collective array? */
{
    int  i,ii;
    int  localnr;       

    
    /* Loop over all the atom indices of the group to check
     * which ones are on the local node */
    localnr = 0;
    for(i=0; i<nr; i++)
    {
        if (ga2la_get_home(ga2la,anrs[i],&ii))
        {
            /* The atom with this index is a home atom */
            if (localnr >= *nalloc_loc) /* Check whether memory suffices */
            {
                *nalloc_loc = over_alloc_dd(localnr+1);
                /* We never need more memory than the number of atoms in the group */
                *nalloc_loc = MIN(*nalloc_loc, nr);
                srenew(*anrs_loc,*nalloc_loc);
            }
            /* Save the atoms index in the local atom numbers array */
            (*anrs_loc)[localnr] = ii;

            if (coll_ind != NULL)
            {
                /* Keep track of where this local atom belongs in the collective index array.
                 * This is needed when reducing the local arrays to a collective/global array
                 * in communicate_group_positions */
                coll_ind[localnr] = i;
            }

            /* add one to the local atom count */
            localnr++;
        }
    }
 
    /* Return the number of local atoms that were found */
    *nr_loc = localnr;
}
Exemple #7
0
static void set_grid_ncg(t_grid *grid, int ncg)
{
    int nr_old, i;

    grid->nr = ncg;
    if (grid->nr+1 > grid->nr_alloc)
    {
        nr_old         = grid->nr_alloc;
        grid->nr_alloc = over_alloc_dd(grid->nr) + 1;
        srenew(grid->cell_index, grid->nr_alloc);
        for (i = nr_old; i < grid->nr_alloc; i++)
        {
            grid->cell_index[i] = 0;
        }
        srenew(grid->a, grid->nr_alloc);
    }
}
void
do_redist_pos_coeffs(struct gmx_pme_t *pme, t_commrec *cr, int start, int homenr,
                     gmx_bool bFirst, rvec x[], real *data)
{
    int             d;
    pme_atomcomm_t *atc;
    atc = &pme->atc[0];

    for (d = pme->ndecompdim - 1; d >= 0; d--)
    {
        int             n_d;
        rvec           *x_d;
        real           *param_d;

        if (d == pme->ndecompdim - 1)
        {
            n_d     = homenr;
            x_d     = x + start;
            param_d = data;
        }
        else
        {
            n_d     = pme->atc[d + 1].n;
            x_d     = atc->x;
            param_d = atc->coefficient;
        }
        atc      = &pme->atc[d];
        atc->npd = n_d;
        if (atc->npd > atc->pd_nalloc)
        {
            atc->pd_nalloc = over_alloc_dd(atc->npd);
            srenew(atc->pd, atc->pd_nalloc);
        }
        pme_calc_pidx_wrapper(n_d, pme->recipbox, x_d, atc);
        where();
        /* Redistribute x (only once) and qA/c6A or qB/c6B */
        if (DOMAINDECOMP(cr))
        {
            dd_pmeredist_pos_coeffs(pme, n_d, bFirst, x_d, param_d, atc);
        }
    }
}
void pme_realloc_atomcomm_things(pme_atomcomm_t *atc)
{
    int nalloc_old, i, j, nalloc_tpl;

    /* We have to avoid a NULL pointer for atc->x to avoid
     * possible fatal errors in MPI routines.
     */
    if (atc->n > atc->nalloc || atc->nalloc == 0)
    {
        nalloc_old  = atc->nalloc;
        atc->nalloc = over_alloc_dd(max(atc->n, 1));

        if (atc->nslab > 1)
        {
            srenew(atc->x, atc->nalloc);
            srenew(atc->coefficient, atc->nalloc);
            srenew(atc->f, atc->nalloc);
            for (i = nalloc_old; i < atc->nalloc; i++)
            {
                clear_rvec(atc->f[i]);
            }
        }
        if (atc->bSpread)
        {
            srenew(atc->fractx, atc->nalloc);
            srenew(atc->idx, atc->nalloc);

            if (atc->nthread > 1)
            {
                srenew(atc->thread_idx, atc->nalloc);
            }

            for (i = 0; i < atc->nthread; i++)
            {
                pme_realloc_splinedata(&atc->spline[i], atc);
            }
        }
    }
}
Exemple #10
0
void set_constraints(struct gmx_constr *constr,
		     gmx_localtop_t *top,t_inputrec *ir,
		     t_mdatoms *md,gmx_domdec_t *dd)
{
  t_idef *idef;
  int    ncons;

  if (constr->ncon_tot > 0) {
    idef = &top->idef;
    
    ncons = idef->il[F_CONSTR].nr/3;
    
    /* With DD we might also need to call LINCS with ncons=0 for communicating
     * coordinates to other nodes that do have constraints.
     */
    if (ir->eConstrAlg == econtLINCS) {
      set_lincs(idef,md,EI_DYNAMICS(ir->eI),dd,constr->lincsd);
    }
    if (ir->eConstrAlg == econtSHAKE) {
      if (dd) {
	make_shake_sblock_dd(constr,&idef->il[F_CONSTR],&top->cgs,dd);
      } else {
	make_shake_sblock_pd(constr,idef,md);
      }
      if (ncons > constr->lagr_nalloc) {
	constr->lagr_nalloc = over_alloc_dd(ncons);
	srenew(constr->lagr,constr->lagr_nalloc);
      }
    }
  }

  /* Make a selection of the local atoms for essential dynamics */
  if (constr->ed && dd) {
    dd_make_local_ed_indices(dd,constr->ed,md);
  }
}
Exemple #11
0
static void init_adir(FILE *log, gmx_shellfc_t shfc,
                      gmx_constr_t constr, t_idef *idef, t_inputrec *ir,
                      t_commrec *cr, int dd_ac1,
                      gmx_int64_t step, t_mdatoms *md, int start, int end,
                      rvec *x_old, rvec *x_init, rvec *x,
                      rvec *f, rvec *acc_dir,
                      gmx_bool bMolPBC, matrix box,
                      real *lambda, real *dvdlambda, t_nrnb *nrnb)
{
    rvec           *xnold, *xnew;
    double          w_dt;
    int             gf, ga, gt;
    real            dt, scale;
    int             n, d;
    unsigned short *ptype;
    rvec            p, dx;

    if (DOMAINDECOMP(cr))
    {
        n = dd_ac1;
    }
    else
    {
        n = end - start;
    }
    if (n > shfc->adir_nalloc)
    {
        shfc->adir_nalloc = over_alloc_dd(n);
        srenew(shfc->adir_xnold, shfc->adir_nalloc);
        srenew(shfc->adir_xnew, shfc->adir_nalloc);
    }
    xnold = shfc->adir_xnold;
    xnew  = shfc->adir_xnew;

    ptype = md->ptype;

    dt = ir->delta_t;

    /* Does NOT work with freeze or acceleration groups (yet) */
    for (n = start; n < end; n++)
    {
        w_dt = md->invmass[n]*dt;

        for (d = 0; d < DIM; d++)
        {
            if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
            {
                xnold[n-start][d] = x[n][d] - (x_init[n][d] - x_old[n][d]);
                xnew[n-start][d]  = 2*x[n][d] - x_old[n][d] + f[n][d]*w_dt*dt;
            }
            else
            {
                xnold[n-start][d] = x[n][d];
                xnew[n-start][d]  = x[n][d];
            }
        }
    }
    constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
              x, xnold-start, NULL, bMolPBC, box,
              lambda[efptBONDED], &(dvdlambda[efptBONDED]),
              NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);
    constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
              x, xnew-start, NULL, bMolPBC, box,
              lambda[efptBONDED], &(dvdlambda[efptBONDED]),
              NULL, NULL, nrnb, econqCoord, FALSE, 0, 0);

    for (n = start; n < end; n++)
    {
        for (d = 0; d < DIM; d++)
        {
            xnew[n-start][d] =
                -(2*x[n][d]-xnold[n-start][d]-xnew[n-start][d])/sqr(dt)
                - f[n][d]*md->invmass[n];
        }
        clear_rvec(acc_dir[n]);
    }

    /* Project the acceleration on the old bond directions */
    constrain(log, FALSE, FALSE, constr, idef, ir, NULL, cr, step, 0, md,
              x_old, xnew-start, acc_dir, bMolPBC, box,
              lambda[efptBONDED], &(dvdlambda[efptBONDED]),
              NULL, NULL, nrnb, econqDeriv_FlexCon, FALSE, 0, 0);
}
Exemple #12
0
int relax_shell_flexcon(FILE *fplog, t_commrec *cr, gmx_bool bVerbose,
                        gmx_int64_t mdstep, t_inputrec *inputrec,
                        gmx_bool bDoNS, int force_flags,
                        gmx_localtop_t *top,
                        gmx_constr_t constr,
                        gmx_enerdata_t *enerd, t_fcdata *fcd,
                        t_state *state, rvec f[],
                        tensor force_vir,
                        t_mdatoms *md,
                        t_nrnb *nrnb, gmx_wallcycle_t wcycle,
                        t_graph *graph,
                        gmx_groups_t *groups,
                        struct gmx_shellfc *shfc,
                        t_forcerec *fr,
                        gmx_bool bBornRadii,
                        double t, rvec mu_tot,
                        gmx_bool *bConverged,
                        gmx_vsite_t *vsite,
                        FILE *fp_field)
{
    int        nshell;
    t_shell   *shell;
    t_idef    *idef;
    rvec      *pos[2], *force[2], *acc_dir = NULL, *x_old = NULL;
    real       Epot[2], df[2];
    rvec       dx;
    real       sf_dir, invdt;
    real       ftol, xiH, xiS, dum = 0;
    char       sbuf[22];
    gmx_bool   bCont, bInit;
    int        nat, dd_ac0, dd_ac1 = 0, i;
    int        start = 0, homenr = md->homenr, end = start+homenr, cg0, cg1;
    int        nflexcon, g, number_steps, d, Min = 0, count = 0;
#define  Try (1-Min)             /* At start Try = 1 */

    bCont        = (mdstep == inputrec->init_step) && inputrec->bContinuation;
    bInit        = (mdstep == inputrec->init_step) || shfc->bRequireInit;
    ftol         = inputrec->em_tol;
    number_steps = inputrec->niter;
    nshell       = shfc->nshell;
    shell        = shfc->shell;
    nflexcon     = shfc->nflexcon;

    idef = &top->idef;

    if (DOMAINDECOMP(cr))
    {
        nat = dd_natoms_vsite(cr->dd);
        if (nflexcon > 0)
        {
            dd_get_constraint_range(cr->dd, &dd_ac0, &dd_ac1);
            nat = max(nat, dd_ac1);
        }
    }
    else
    {
        nat = state->natoms;
    }

    if (nat > shfc->x_nalloc)
    {
        /* Allocate local arrays */
        shfc->x_nalloc = over_alloc_dd(nat);
        for (i = 0; (i < 2); i++)
        {
            srenew(shfc->x[i], shfc->x_nalloc);
            srenew(shfc->f[i], shfc->x_nalloc);
        }
    }
    for (i = 0; (i < 2); i++)
    {
        pos[i]   = shfc->x[i];
        force[i] = shfc->f[i];
    }

    /* When we had particle decomposition, this code only worked with
     * PD when all particles involved with each shell were in the same
     * charge group. Not sure if this is still relevant. */
    if (bDoNS && inputrec->ePBC != epbcNONE && !DOMAINDECOMP(cr))
    {
        /* This is the only time where the coordinates are used
         * before do_force is called, which normally puts all
         * charge groups in the box.
         */
        cg0 = 0;
        cg1 = top->cgs.nr;
        put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, state->box,
                                 &(top->cgs), state->x, fr->cg_cm);
        if (graph)
        {
            mk_mshift(fplog, graph, fr->ePBC, state->box, state->x);
        }
    }

    /* After this all coordinate arrays will contain whole molecules */
    if (graph)
    {
        shift_self(graph, state->box, state->x);
    }

    if (nflexcon)
    {
        if (nat > shfc->flex_nalloc)
        {
            shfc->flex_nalloc = over_alloc_dd(nat);
            srenew(shfc->acc_dir, shfc->flex_nalloc);
            srenew(shfc->x_old, shfc->flex_nalloc);
        }
        acc_dir = shfc->acc_dir;
        x_old   = shfc->x_old;
        for (i = 0; i < homenr; i++)
        {
            for (d = 0; d < DIM; d++)
            {
                shfc->x_old[i][d] =
                    state->x[start+i][d] - state->v[start+i][d]*inputrec->delta_t;
            }
        }
    }

    /* Do a prediction of the shell positions */
    if (shfc->bPredict && !bCont)
    {
        predict_shells(fplog, state->x, state->v, inputrec->delta_t, nshell, shell,
                       md->massT, NULL, bInit);
    }

    /* do_force expected the charge groups to be in the box */
    if (graph)
    {
        unshift_self(graph, state->box, state->x);
    }

    /* Calculate the forces first time around */
    if (gmx_debug_at)
    {
        pr_rvecs(debug, 0, "x b4 do_force", state->x + start, homenr);
    }
    do_force(fplog, cr, inputrec, mdstep, nrnb, wcycle, top, groups,
             state->box, state->x, &state->hist,
             force[Min], force_vir, md, enerd, fcd,
             state->lambda, graph,
             fr, vsite, mu_tot, t, fp_field, NULL, bBornRadii,
             (bDoNS ? GMX_FORCE_NS : 0) | force_flags);

    sf_dir = 0;
    if (nflexcon)
    {
        init_adir(fplog, shfc,
                  constr, idef, inputrec, cr, dd_ac1, mdstep, md, start, end,
                  shfc->x_old-start, state->x, state->x, force[Min],
                  shfc->acc_dir-start,
                  fr->bMolPBC, state->box, state->lambda, &dum, nrnb);

        for (i = start; i < end; i++)
        {
            sf_dir += md->massT[i]*norm2(shfc->acc_dir[i-start]);
        }
    }

    Epot[Min] = enerd->term[F_EPOT];

    df[Min] = rms_force(cr, shfc->f[Min], nshell, shell, nflexcon, &sf_dir, &Epot[Min]);
    df[Try] = 0;
    if (debug)
    {
        fprintf(debug, "df = %g  %g\n", df[Min], df[Try]);
    }

    if (gmx_debug_at)
    {
        pr_rvecs(debug, 0, "force0", force[Min], md->nr);
    }

    if (nshell+nflexcon > 0)
    {
        /* Copy x to pos[Min] & pos[Try]: during minimization only the
         * shell positions are updated, therefore the other particles must
         * be set here.
         */
        memcpy(pos[Min], state->x, nat*sizeof(state->x[0]));
        memcpy(pos[Try], state->x, nat*sizeof(state->x[0]));
    }

    if (bVerbose && MASTER(cr))
    {
        print_epot(stdout, mdstep, 0, Epot[Min], df[Min], nflexcon, sf_dir);
    }

    if (debug)
    {
        fprintf(debug, "%17s: %14.10e\n",
                interaction_function[F_EKIN].longname, enerd->term[F_EKIN]);
        fprintf(debug, "%17s: %14.10e\n",
                interaction_function[F_EPOT].longname, enerd->term[F_EPOT]);
        fprintf(debug, "%17s: %14.10e\n",
                interaction_function[F_ETOT].longname, enerd->term[F_ETOT]);
        fprintf(debug, "SHELLSTEP %s\n", gmx_step_str(mdstep, sbuf));
    }

    /* First check whether we should do shells, or whether the force is
     * low enough even without minimization.
     */
    *bConverged = (df[Min] < ftol);

    for (count = 1; (!(*bConverged) && (count < number_steps)); count++)
    {
        if (vsite)
        {
            construct_vsites(vsite, pos[Min], inputrec->delta_t, state->v,
                             idef->iparams, idef->il,
                             fr->ePBC, fr->bMolPBC, cr, state->box);
        }

        if (nflexcon)
        {
            init_adir(fplog, shfc,
                      constr, idef, inputrec, cr, dd_ac1, mdstep, md, start, end,
                      x_old-start, state->x, pos[Min], force[Min], acc_dir-start,
                      fr->bMolPBC, state->box, state->lambda, &dum, nrnb);

            directional_sd(pos[Min], pos[Try], acc_dir-start, start, end,
                           fr->fc_stepsize);
        }

        /* New positions, Steepest descent */
        shell_pos_sd(pos[Min], pos[Try], force[Min], nshell, shell, count);

        /* do_force expected the charge groups to be in the box */
        if (graph)
        {
            unshift_self(graph, state->box, pos[Try]);
        }

        if (gmx_debug_at)
        {
            pr_rvecs(debug, 0, "RELAX: pos[Min]  ", pos[Min] + start, homenr);
            pr_rvecs(debug, 0, "RELAX: pos[Try]  ", pos[Try] + start, homenr);
        }
        /* Try the new positions */
        do_force(fplog, cr, inputrec, 1, nrnb, wcycle,
                 top, groups, state->box, pos[Try], &state->hist,
                 force[Try], force_vir,
                 md, enerd, fcd, state->lambda, graph,
                 fr, vsite, mu_tot, t, fp_field, NULL, bBornRadii,
                 force_flags);

        if (gmx_debug_at)
        {
            pr_rvecs(debug, 0, "RELAX: force[Min]", force[Min] + start, homenr);
            pr_rvecs(debug, 0, "RELAX: force[Try]", force[Try] + start, homenr);
        }
        sf_dir = 0;
        if (nflexcon)
        {
            init_adir(fplog, shfc,
                      constr, idef, inputrec, cr, dd_ac1, mdstep, md, start, end,
                      x_old-start, state->x, pos[Try], force[Try], acc_dir-start,
                      fr->bMolPBC, state->box, state->lambda, &dum, nrnb);

            for (i = start; i < end; i++)
            {
                sf_dir += md->massT[i]*norm2(acc_dir[i-start]);
            }
        }

        Epot[Try] = enerd->term[F_EPOT];

        df[Try] = rms_force(cr, force[Try], nshell, shell, nflexcon, &sf_dir, &Epot[Try]);

        if (debug)
        {
            fprintf(debug, "df = %g  %g\n", df[Min], df[Try]);
        }

        if (debug)
        {
            if (gmx_debug_at)
            {
                pr_rvecs(debug, 0, "F na do_force", force[Try] + start, homenr);
            }
            if (gmx_debug_at)
            {
                fprintf(debug, "SHELL ITER %d\n", count);
                dump_shells(debug, pos[Try], force[Try], ftol, nshell, shell);
            }
        }

        if (bVerbose && MASTER(cr))
        {
            print_epot(stdout, mdstep, count, Epot[Try], df[Try], nflexcon, sf_dir);
        }

        *bConverged = (df[Try] < ftol);

        if ((df[Try] < df[Min]))
        {
            if (debug)
            {
                fprintf(debug, "Swapping Min and Try\n");
            }
            if (nflexcon)
            {
                /* Correct the velocities for the flexible constraints */
                invdt = 1/inputrec->delta_t;
                for (i = start; i < end; i++)
                {
                    for (d = 0; d < DIM; d++)
                    {
                        state->v[i][d] += (pos[Try][i][d] - pos[Min][i][d])*invdt;
                    }
                }
            }
            Min  = Try;
        }
        else
        {
            decrease_step_size(nshell, shell);
        }
    }
    if (MASTER(cr) && !(*bConverged))
    {
        /* Note that the energies and virial are incorrect when not converged */
        if (fplog)
        {
            fprintf(fplog,
                    "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
                    gmx_step_str(mdstep, sbuf), number_steps, df[Min]);
        }
        fprintf(stderr,
                "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
                gmx_step_str(mdstep, sbuf), number_steps, df[Min]);
    }

    /* Copy back the coordinates and the forces */
    memcpy(state->x, pos[Min], nat*sizeof(state->x[0]));
    memcpy(f, force[Min], nat*sizeof(f[0]));

    return count;
}
static void do_update_sd2(t_gmx_stochd *sd,bool bInitStep,
			  int start,int homenr,
			  rvec accel[],ivec nFreeze[],
			  real invmass[],unsigned short ptype[],
			  unsigned short cFREEZE[],unsigned short cACC[],
			  unsigned short cTC[],
			  rvec x[],rvec xprime[],rvec v[],rvec f[],
			  rvec sd_X[],
			  int ngtc,real tau_t[],real ref_t[],
			  bool bFirstHalf)
{
  gmx_sd_const_t *sdc;
  gmx_sd_sigma_t *sig;
  /* The random part of the velocity update, generated in the first
   * half of the update, needs to be remembered for the second half.
   */
  rvec *sd_V;
  gmx_rng_t gaussrand;
  real   kT;
  int    gf=0,ga=0,gt=0;
  real   vn=0,Vmh,Xmh;
  real   ism;
  int    n,d;

  sdc = sd->sdc;
  sig = sd->sdsig;
  if (homenr > sd->sd_V_nalloc) {
    sd->sd_V_nalloc = over_alloc_dd(homenr);
    srenew(sd->sd_V,sd->sd_V_nalloc);
  }
  sd_V = sd->sd_V;
  gaussrand = sd->gaussrand;

  if(bFirstHalf) {
    for(n=0; n<ngtc; n++) {
      kT = BOLTZ*ref_t[n];
      /* The mass is encounted for later, since this differs per atom */
      sig[n].V  = sqrt(kT*(1-sdc[n].em));
      sig[n].X  = sqrt(kT*sqr(tau_t[n])*sdc[n].c);
      sig[n].Yv = sqrt(kT*sdc[n].b/sdc[n].c);
      sig[n].Yx = sqrt(kT*sqr(tau_t[n])*sdc[n].b/(1-sdc[n].em));
    }
  }

  for(n=start; n<start+homenr; n++) {
    ism = sqrt(invmass[n]);
    if (cFREEZE)
      gf  = cFREEZE[n];
    if (cACC)
      ga  = cACC[n];
    if (cTC)
      gt  = cTC[n];

    for(d=0; d<DIM; d++) {
      if(bFirstHalf) {
        vn             = v[n][d];
      }
      if((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d]) {
        if (bFirstHalf) {

          if (bInitStep)
            sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand);

          Vmh = sd_X[n][d]*sdc[gt].d/(tau_t[gt]*sdc[gt].c) 
                + ism*sig[gt].Yv*gmx_rng_gaussian_table(gaussrand);
          sd_V[n-start][d] = ism*sig[gt].V*gmx_rng_gaussian_table(gaussrand);

          v[n][d] = vn*sdc[gt].em 
                    + (invmass[n]*f[n][d] + accel[ga][d])*tau_t[gt]*(1 - sdc[gt].em)
                    + sd_V[n-start][d] - sdc[gt].em*Vmh;

          xprime[n][d] = x[n][d] + v[n][d]*tau_t[gt]*(sdc[gt].eph - sdc[gt].emh); 

        } else {

          /* Correct the velocities for the constraints.
	   * This operation introduces some inaccuracy,
	   * since the velocity is determined from differences in coordinates.
	   */
          v[n][d] = 
          (xprime[n][d] - x[n][d])/(tau_t[gt]*(sdc[gt].eph - sdc[gt].emh));  

          Xmh = sd_V[n-start][d]*tau_t[gt]*sdc[gt].d/(sdc[gt].em-1) 
                + ism*sig[gt].Yx*gmx_rng_gaussian_table(gaussrand);
          sd_X[n][d] = ism*sig[gt].X*gmx_rng_gaussian_table(gaussrand);

          xprime[n][d] += sd_X[n][d] - Xmh;

        }
      } else {
        if(bFirstHalf) {
          v[n][d]        = 0.0;
          xprime[n][d]   = x[n][d];
        }
      }
    }
  }
}
Exemple #14
0
void set_constraints(struct gmx_constr *constr,
                     gmx_localtop_t *top, t_inputrec *ir,
                     t_mdatoms *md, t_commrec *cr)
{
    t_idef  *idef;
    int      ncons;
    t_ilist *settle;
    int      iO, iH;

    idef = &top->idef;

    if (constr->ncon_tot > 0)
    {
        /* We are using the local topology,
         * so there are only F_CONSTR constraints.
         */
        ncons = idef->il[F_CONSTR].nr/3;

        /* With DD we might also need to call LINCS with ncons=0 for
         * communicating coordinates to other nodes that do have constraints.
         */
        if (ir->eConstrAlg == econtLINCS)
        {
            set_lincs(idef, md, EI_DYNAMICS(ir->eI), cr, constr->lincsd);
        }
        if (ir->eConstrAlg == econtSHAKE)
        {
            if (cr->dd)
            {
                make_shake_sblock_dd(constr, &idef->il[F_CONSTR], &top->cgs, cr->dd);
            }
            else
            {
                make_shake_sblock_serial(constr, idef, md);
            }
            if (ncons > constr->lagr_nalloc)
            {
                constr->lagr_nalloc = over_alloc_dd(ncons);
                srenew(constr->lagr, constr->lagr_nalloc);
            }
        }
    }

    if (idef->il[F_SETTLE].nr > 0 && constr->settled == NULL)
    {
        settle          = &idef->il[F_SETTLE];
        iO              = settle->iatoms[1];
        iH              = settle->iatoms[2];
        constr->settled =
            settle_init(md->massT[iO], md->massT[iH],
                        md->invmass[iO], md->invmass[iH],
                        idef->iparams[settle->iatoms[0]].settle.doh,
                        idef->iparams[settle->iatoms[0]].settle.dhh);
    }

    /* Make a selection of the local atoms for essential dynamics */
    if (constr->ed && cr->dd)
    {
        dd_make_local_ed_indices(cr->dd, constr->ed);
    }
}
Exemple #15
0
int vec_shakef(FILE *fplog, gmx_shakedata_t shaked,
               real invmass[], int ncon,
               t_iparams ip[], t_iatom *iatom,
               real tol, rvec x[], rvec prime[], real omega,
               gmx_bool bFEP, real lambda, real scaled_lagrange_multiplier[],
               real invdt, rvec *v,
               gmx_bool bCalcVir, tensor vir_r_m_dr, int econq)
{
    rvec    *rij;
    real    *half_of_reduced_mass, *distance_squared_tolerance, *constraint_distance_squared;
    int      maxnit = 1000;
    int      nit    = 0, ll, i, j, d, d2, type;
    t_iatom *ia;
    real     L1;
    real     mm    = 0., tmp;
    int      error = 0;
    real     constraint_distance;

    if (ncon > shaked->nalloc)
    {
        shaked->nalloc = over_alloc_dd(ncon);
        srenew(shaked->rij, shaked->nalloc);
        srenew(shaked->half_of_reduced_mass, shaked->nalloc);
        srenew(shaked->distance_squared_tolerance, shaked->nalloc);
        srenew(shaked->constraint_distance_squared, shaked->nalloc);
    }
    rij                          = shaked->rij;
    half_of_reduced_mass         = shaked->half_of_reduced_mass;
    distance_squared_tolerance   = shaked->distance_squared_tolerance;
    constraint_distance_squared  = shaked->constraint_distance_squared;

    L1   = 1.0-lambda;
    ia   = iatom;
    for (ll = 0; (ll < ncon); ll++, ia += 3)
    {
        type  = ia[0];
        i     = ia[1];
        j     = ia[2];

        mm                       = 2.0*(invmass[i]+invmass[j]);
        rij[ll][XX]              = x[i][XX]-x[j][XX];
        rij[ll][YY]              = x[i][YY]-x[j][YY];
        rij[ll][ZZ]              = x[i][ZZ]-x[j][ZZ];
        half_of_reduced_mass[ll] = 1.0/mm;
        if (bFEP)
        {
            constraint_distance = L1*ip[type].constr.dA + lambda*ip[type].constr.dB;
        }
        else
        {
            constraint_distance = ip[type].constr.dA;
        }
        constraint_distance_squared[ll]  = gmx::square(constraint_distance);
        distance_squared_tolerance[ll]   = 0.5/(constraint_distance_squared[ll]*tol);
    }

    switch (econq)
    {
        case econqCoord:
            cshake(iatom, ncon, &nit, maxnit, constraint_distance_squared, prime[0], rij[0], half_of_reduced_mass, omega, invmass, distance_squared_tolerance, scaled_lagrange_multiplier, &error);
            break;
        case econqVeloc:
            crattle(iatom, ncon, &nit, maxnit, constraint_distance_squared, prime[0], rij[0], half_of_reduced_mass, omega, invmass, distance_squared_tolerance, scaled_lagrange_multiplier, &error, invdt);
            break;
    }

    if (nit >= maxnit)
    {
        if (fplog)
        {
            fprintf(fplog, "Shake did not converge in %d steps\n", maxnit);
        }
        fprintf(stderr, "Shake did not converge in %d steps\n", maxnit);
        nit = 0;
    }
    else if (error != 0)
    {
        if (fplog)
        {
            fprintf(fplog, "Inner product between old and new vector <= 0.0!\n"
                    "constraint #%d atoms %d and %d\n",
                    error-1, iatom[3*(error-1)+1]+1, iatom[3*(error-1)+2]+1);
        }
        fprintf(stderr, "Inner product between old and new vector <= 0.0!\n"
                "constraint #%d atoms %d and %d\n",
                error-1, iatom[3*(error-1)+1]+1, iatom[3*(error-1)+2]+1);
        nit = 0;
    }

    /* Constraint virial and correct the Lagrange multipliers for the length */

    ia = iatom;

    for (ll = 0; (ll < ncon); ll++, ia += 3)
    {
        type  = ia[0];
        i     = ia[1];
        j     = ia[2];

        if ((econq == econqCoord) && v != NULL)
        {
            /* Correct the velocities */
            mm = scaled_lagrange_multiplier[ll]*invmass[i]*invdt;
            for (d = 0; d < DIM; d++)
            {
                v[ia[1]][d] += mm*rij[ll][d];
            }
            mm = scaled_lagrange_multiplier[ll]*invmass[j]*invdt;
            for (d = 0; d < DIM; d++)
            {
                v[ia[2]][d] -= mm*rij[ll][d];
            }
            /* 16 flops */
        }

        /* constraint virial */
        if (bCalcVir)
        {
            mm = scaled_lagrange_multiplier[ll];
            for (d = 0; d < DIM; d++)
            {
                tmp = mm*rij[ll][d];
                for (d2 = 0; d2 < DIM; d2++)
                {
                    vir_r_m_dr[d][d2] -= tmp*rij[ll][d2];
                }
            }
            /* 21 flops */
        }

        /* cshake and crattle produce Lagrange multipliers scaled by
           the reciprocal of the constraint length, so fix that */
        if (bFEP)
        {
            constraint_distance = L1*ip[type].constr.dA + lambda*ip[type].constr.dB;
        }
        else
        {
            constraint_distance = ip[type].constr.dA;
        }
        scaled_lagrange_multiplier[ll] *= constraint_distance;
    }

    return nit;
}
int setup_specat_communication(gmx_domdec_t               *dd,
                               ind_req_t                  *ireq,
                               gmx_domdec_specat_comm_t   *spac,
                               gmx_hash_t                 *ga2la_specat,
                               int                         at_start,
                               int                         vbuf_fac,
                               const char                 *specat_type,
                               const char                 *add_err)
{
    int               nsend[2], nlast, nsend_zero[2] = {0, 0}, *nsend_ptr;
    int               d, dim, ndir, dir, nr, ns, i, nrecv_local, n0, start, indr, ind, buf[2];
    int               nat_tot_specat, nat_tot_prev, nalloc_old;
    gmx_bool          bPBC;
    gmx_specatsend_t *spas;

    if (debug)
    {
        fprintf(debug, "Begin setup_specat_communication for %s\n", specat_type);
    }

    /* nsend[0]: the number of atoms requested by this node only,
     *           we communicate this for more efficients checks
     * nsend[1]: the total number of requested atoms
     */
    nsend[0] = ireq->n;
    nsend[1] = nsend[0];
    nlast    = nsend[1];
    for (d = dd->ndim-1; d >= 0; d--)
    {
        /* Pulse the grid forward and backward */
        dim  = dd->dim[d];
        bPBC = (dim < dd->npbcdim);
        if (dd->nc[dim] == 2)
        {
            /* Only 2 cells, so we only need to communicate once */
            ndir = 1;
        }
        else
        {
            ndir = 2;
        }
        for (dir = 0; dir < ndir; dir++)
        {
            if (!bPBC &&
                dd->nc[dim] > 2 &&
                ((dir == 0 && dd->ci[dim] == dd->nc[dim] - 1) ||
                 (dir == 1 && dd->ci[dim] == 0)))
            {
                /* No pbc: the fist/last cell should not request atoms */
                nsend_ptr = nsend_zero;
            }
            else
            {
                nsend_ptr = nsend;
            }
            /* Communicate the number of indices */
            dd_sendrecv_int(dd, d, dir == 0 ? dddirForward : dddirBackward,
                            nsend_ptr, 2, spac->nreq[d][dir], 2);
            nr = spac->nreq[d][dir][1];
            if (nlast+nr > ireq->nalloc)
            {
                ireq->nalloc = over_alloc_dd(nlast+nr);
                srenew(ireq->ind, ireq->nalloc);
            }
            /* Communicate the indices */
            dd_sendrecv_int(dd, d, dir == 0 ? dddirForward : dddirBackward,
                            ireq->ind, nsend_ptr[1], ireq->ind+nlast, nr);
            nlast += nr;
        }
        nsend[1] = nlast;
    }
    if (debug)
    {
        fprintf(debug, "Communicated the counts\n");
    }

    /* Search for the requested atoms and communicate the indices we have */
    nat_tot_specat = at_start;
    nrecv_local    = 0;
    for (d = 0; d < dd->ndim; d++)
    {
        /* Pulse the grid forward and backward */
        if (dd->dim[d] >= dd->npbcdim || dd->nc[dd->dim[d]] > 2)
        {
            ndir = 2;
        }
        else
        {
            ndir = 1;
        }
        nat_tot_prev = nat_tot_specat;
        for (dir = ndir-1; dir >= 0; dir--)
        {
            if (nat_tot_specat > spac->bSendAtom_nalloc)
            {
                nalloc_old             = spac->bSendAtom_nalloc;
                spac->bSendAtom_nalloc = over_alloc_dd(nat_tot_specat);
                srenew(spac->bSendAtom, spac->bSendAtom_nalloc);
                for (i = nalloc_old; i < spac->bSendAtom_nalloc; i++)
                {
                    spac->bSendAtom[i] = FALSE;
                }
            }
            spas = &spac->spas[d][dir];
            n0   = spac->nreq[d][dir][0];
            nr   = spac->nreq[d][dir][1];
            if (debug)
            {
                fprintf(debug, "dim=%d, dir=%d, searching for %d atoms\n",
                        d, dir, nr);
            }
            start       = nlast - nr;
            spas->nsend = 0;
            nsend[0]    = 0;
            for (i = 0; i < nr; i++)
            {
                indr = ireq->ind[start+i];
                ind  = -1;
                /* Check if this is a home atom and if so ind will be set */
                if (!ga2la_get_home(dd->ga2la, indr, &ind))
                {
                    /* Search in the communicated atoms */
                    ind = gmx_hash_get_minone(ga2la_specat, indr);
                }
                if (ind >= 0)
                {
                    if (i < n0 || !spac->bSendAtom[ind])
                    {
                        if (spas->nsend+1 > spas->a_nalloc)
                        {
                            spas->a_nalloc = over_alloc_large(spas->nsend+1);
                            srenew(spas->a, spas->a_nalloc);
                        }
                        /* Store the local index so we know which coordinates
                         * to send out later.
                         */
                        spas->a[spas->nsend] = ind;
                        spac->bSendAtom[ind] = TRUE;
                        if (spas->nsend+1 > spac->ibuf_nalloc)
                        {
                            spac->ibuf_nalloc = over_alloc_large(spas->nsend+1);
                            srenew(spac->ibuf, spac->ibuf_nalloc);
                        }
                        /* Store the global index so we can send it now */
                        spac->ibuf[spas->nsend] = indr;
                        if (i < n0)
                        {
                            nsend[0]++;
                        }
                        spas->nsend++;
                    }
                }
            }
            nlast = start;
            /* Clear the local flags */
            for (i = 0; i < spas->nsend; i++)
            {
                spac->bSendAtom[spas->a[i]] = FALSE;
            }
            /* Send and receive the number of indices to communicate */
            nsend[1] = spas->nsend;
            dd_sendrecv_int(dd, d, dir == 0 ? dddirBackward : dddirForward,
                            nsend, 2, buf, 2);
            if (debug)
            {
                fprintf(debug, "Send to rank %d, %d (%d) indices, "
                        "receive from rank %d, %d (%d) indices\n",
                        dd->neighbor[d][1-dir], nsend[1], nsend[0],
                        dd->neighbor[d][dir], buf[1], buf[0]);
                if (gmx_debug_at)
                {
                    for (i = 0; i < spas->nsend; i++)
                    {
                        fprintf(debug, " %d", spac->ibuf[i]+1);
                    }
                    fprintf(debug, "\n");
                }
            }
            nrecv_local += buf[0];
            spas->nrecv  = buf[1];
            if (nat_tot_specat + spas->nrecv > dd->gatindex_nalloc)
            {
                dd->gatindex_nalloc =
                    over_alloc_dd(nat_tot_specat + spas->nrecv);
                srenew(dd->gatindex, dd->gatindex_nalloc);
            }
            /* Send and receive the indices */
            dd_sendrecv_int(dd, d, dir == 0 ? dddirBackward : dddirForward,
                            spac->ibuf, spas->nsend,
                            dd->gatindex+nat_tot_specat, spas->nrecv);
            nat_tot_specat += spas->nrecv;
        }

        /* Allocate the x/f communication buffers */
        ns = spac->spas[d][0].nsend;
        nr = spac->spas[d][0].nrecv;
        if (ndir == 2)
        {
            ns += spac->spas[d][1].nsend;
            nr += spac->spas[d][1].nrecv;
        }
        if (vbuf_fac*ns > spac->vbuf_nalloc)
        {
            spac->vbuf_nalloc = over_alloc_dd(vbuf_fac*ns);
            srenew(spac->vbuf, spac->vbuf_nalloc);
        }
        if (vbuf_fac == 2 && vbuf_fac*nr > spac->vbuf2_nalloc)
        {
            spac->vbuf2_nalloc = over_alloc_dd(vbuf_fac*nr);
            srenew(spac->vbuf2, spac->vbuf2_nalloc);
        }

        /* Make a global to local index for the communication atoms */
        for (i = nat_tot_prev; i < nat_tot_specat; i++)
        {
            gmx_hash_change_or_set(ga2la_specat, dd->gatindex[i], i);
        }
    }

    /* Check that in the end we got the number of atoms we asked for */
    if (nrecv_local != ireq->n)
    {
        if (debug)
        {
            fprintf(debug, "Requested %d, received %d (tot recv %d)\n",
                    ireq->n, nrecv_local, nat_tot_specat-at_start);
            if (gmx_debug_at)
            {
                for (i = 0; i < ireq->n; i++)
                {
                    ind = gmx_hash_get_minone(ga2la_specat, ireq->ind[i]);
                    fprintf(debug, " %s%d",
                            (ind >= 0) ? "" : "!",
                            ireq->ind[i]+1);
                }
                fprintf(debug, "\n");
            }
        }
        fprintf(stderr, "\nDD cell %d %d %d: Neighboring cells do not have atoms:",
                dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
        for (i = 0; i < ireq->n; i++)
        {
            if (gmx_hash_get_minone(ga2la_specat, ireq->ind[i]) < 0)
            {
                fprintf(stderr, " %d", ireq->ind[i]+1);
            }
        }
        fprintf(stderr, "\n");
        gmx_fatal(FARGS, "DD cell %d %d %d could only obtain %d of the %d atoms that are connected via %ss from the neighboring cells. This probably means your %s lengths are too long compared to the domain decomposition cell size. Decrease the number of domain decomposition grid cells%s%s.",
                  dd->ci[XX], dd->ci[YY], dd->ci[ZZ],
                  nrecv_local, ireq->n, specat_type,
                  specat_type, add_err,
                  dd_dlb_is_on(dd) ? " or use the -rcon option of mdrun" : "");
    }

    spac->at_start = at_start;
    spac->at_end   = nat_tot_specat;

    if (debug)
    {
        fprintf(debug, "Done setup_specat_communication\n");
    }

    return nat_tot_specat;
}
Exemple #17
0
void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
              int nindex, const int *index,
              int homenr,
              t_mdatoms *md)
{
    gmx_bool              bLJPME;
    gmx_mtop_atomlookup_t alook;
    int                   i;
    const t_grpopts      *opts;
    const gmx_groups_t   *groups;
    int                   nthreads gmx_unused;
    const real            oneOverSix = 1.0 / 6.0;

    bLJPME = EVDW_PME(ir->vdwtype);

    opts = &ir->opts;

    groups = &mtop->groups;

    /* Index==NULL indicates no DD (unless we have a DD node with no
     * atoms), so also check for homenr. This should be
     * signaled properly with an extra parameter or nindex==-1.
     */
    if (index == NULL && (homenr > 0))
    {
        md->nr = mtop->natoms;
    }
    else
    {
        md->nr = nindex;
    }

    if (md->nr > md->nalloc)
    {
        md->nalloc = over_alloc_dd(md->nr);

        if (md->nMassPerturbed)
        {
            srenew(md->massA, md->nalloc);
            srenew(md->massB, md->nalloc);
        }
        srenew(md->massT, md->nalloc);
        srenew(md->invmass, md->nalloc);
        srenew(md->chargeA, md->nalloc);
        srenew(md->typeA, md->nalloc);
        if (md->nPerturbed)
        {
            srenew(md->chargeB, md->nalloc);
            srenew(md->typeB, md->nalloc);
        }
        if (bLJPME)
        {
            srenew(md->sqrt_c6A, md->nalloc);
            srenew(md->sigmaA, md->nalloc);
            srenew(md->sigma3A, md->nalloc);
            if (md->nPerturbed)
            {
                srenew(md->sqrt_c6B, md->nalloc);
                srenew(md->sigmaB, md->nalloc);
                srenew(md->sigma3B, md->nalloc);
            }
        }
        srenew(md->ptype, md->nalloc);
        if (opts->ngtc > 1)
        {
            srenew(md->cTC, md->nalloc);
            /* We always copy cTC with domain decomposition */
        }
        srenew(md->cENER, md->nalloc);
        if (opts->ngacc > 1)
        {
            srenew(md->cACC, md->nalloc);
        }
        if (opts->nFreeze &&
            (opts->ngfrz > 1 ||
             opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ]))
        {
            srenew(md->cFREEZE, md->nalloc);
        }
        if (md->bVCMgrps)
        {
            srenew(md->cVCM, md->nalloc);
        }
        if (md->bOrires)
        {
            srenew(md->cORF, md->nalloc);
        }
        if (md->nPerturbed)
        {
            srenew(md->bPerturbed, md->nalloc);
        }

        /* Note that these user t_mdatoms array pointers are NULL
         * when there is only one group present.
         * Therefore, when adding code, the user should use something like:
         * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
         */
        if (mtop->groups.grpnr[egcUser1] != NULL)
        {
            srenew(md->cU1, md->nalloc);
        }
        if (mtop->groups.grpnr[egcUser2] != NULL)
        {
            srenew(md->cU2, md->nalloc);
        }

        if (ir->bQMMM)
        {
            srenew(md->bQM, md->nalloc);
        }
        if (ir->bAdress)
        {
            srenew(md->wf, md->nalloc);
            srenew(md->tf_table_index, md->nalloc);
        }
    }

    alook = gmx_mtop_atomlookup_init(mtop);

    // cppcheck-suppress unreadVariable
    nthreads = gmx_omp_nthreads_get(emntDefault);
#pragma omp parallel for num_threads(nthreads) schedule(static)
    for (i = 0; i < md->nr; i++)
    {
        try
        {
            int      g, ag;
            real     mA, mB, fac;
            real     c6, c12;
            t_atom  *atom;

            if (index == NULL)
            {
                ag = i;
            }
            else
            {
                ag   = index[i];
            }
            gmx_mtop_atomnr_to_atom(alook, ag, &atom);

            if (md->cFREEZE)
            {
                md->cFREEZE[i] = ggrpnr(groups, egcFREEZE, ag);
            }
            if (EI_ENERGY_MINIMIZATION(ir->eI))
            {
                /* Displacement is proportional to F, masses used for constraints */
                mA = 1.0;
                mB = 1.0;
            }
            else if (ir->eI == eiBD)
            {
                /* With BD the physical masses are irrelevant.
                 * To keep the code simple we use most of the normal MD code path
                 * for BD. Thus for constraining the masses should be proportional
                 * to the friction coefficient. We set the absolute value such that
                 * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2
                 * Then if we set the (meaningless) velocity to v=dx/dt, we get the
                 * correct kinetic energy and temperature using the usual code path.
                 * Thus with BD v*dt will give the displacement and the reported
                 * temperature can signal bad integration (too large time step).
                 */
                if (ir->bd_fric > 0)
                {
                    mA = 0.5*ir->bd_fric*ir->delta_t;
                    mB = 0.5*ir->bd_fric*ir->delta_t;
                }
                else
                {
                    /* The friction coefficient is mass/tau_t */
                    fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0];
                    mA  = 0.5*atom->m*fac;
                    mB  = 0.5*atom->mB*fac;
                }
            }
            else
            {
                mA = atom->m;
                mB = atom->mB;
            }
            if (md->nMassPerturbed)
            {
                md->massA[i]  = mA;
                md->massB[i]  = mB;
            }
            md->massT[i]    = mA;
            if (mA == 0.0)
            {
                md->invmass[i]    = 0;
            }
            else if (md->cFREEZE)
            {
                g = md->cFREEZE[i];
                if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
                {
                    /* Set the mass of completely frozen particles to ALMOST_ZERO iso 0
                     * to avoid div by zero in lincs or shake.
                     * Note that constraints can still move a partially frozen particle.
                     */
                    md->invmass[i]  = ALMOST_ZERO;
                }
                else
                {
                    md->invmass[i]  = 1.0/mA;
                }
            }
            else
            {
                md->invmass[i]    = 1.0/mA;
            }
            md->chargeA[i]      = atom->q;
            md->typeA[i]        = atom->type;
            if (bLJPME)
            {
                c6                = mtop->ffparams.iparams[atom->type*(mtop->ffparams.atnr+1)].lj.c6;
                c12               = mtop->ffparams.iparams[atom->type*(mtop->ffparams.atnr+1)].lj.c12;
                md->sqrt_c6A[i]   = sqrt(c6);
                if (c6 == 0.0 || c12 == 0)
                {
                    md->sigmaA[i] = 1.0;
                }
                else
                {
                    md->sigmaA[i] = pow(c12/c6, oneOverSix);
                }
                md->sigma3A[i]    = 1/(md->sigmaA[i]*md->sigmaA[i]*md->sigmaA[i]);
            }
            if (md->nPerturbed)
            {
                md->bPerturbed[i] = PERTURBED(*atom);
                md->chargeB[i]    = atom->qB;
                md->typeB[i]      = atom->typeB;
                if (bLJPME)
                {
                    c6                = mtop->ffparams.iparams[atom->typeB*(mtop->ffparams.atnr+1)].lj.c6;
                    c12               = mtop->ffparams.iparams[atom->typeB*(mtop->ffparams.atnr+1)].lj.c12;
                    md->sqrt_c6B[i]   = sqrt(c6);
                    if (c6 == 0.0 || c12 == 0)
                    {
                        md->sigmaB[i] = 1.0;
                    }
                    else
                    {
                        md->sigmaB[i] = pow(c12/c6, oneOverSix);
                    }
                    md->sigma3B[i]    = 1/(md->sigmaB[i]*md->sigmaB[i]*md->sigmaB[i]);
                }
            }
            md->ptype[i]    = atom->ptype;
            if (md->cTC)
            {
                md->cTC[i]    = groups->grpnr[egcTC][ag];
            }
            md->cENER[i]    =
                (groups->grpnr[egcENER] ? groups->grpnr[egcENER][ag] : 0);
            if (md->cACC)
            {
                md->cACC[i]   = groups->grpnr[egcACC][ag];
            }
            if (md->cVCM)
            {
                md->cVCM[i]       = groups->grpnr[egcVCM][ag];
            }
            if (md->cORF)
            {
                md->cORF[i]       = groups->grpnr[egcORFIT][ag];
            }

            if (md->cU1)
            {
                md->cU1[i]        = groups->grpnr[egcUser1][ag];
            }
            if (md->cU2)
            {
                md->cU2[i]        = groups->grpnr[egcUser2][ag];
            }

            if (ir->bQMMM)
            {
                if (groups->grpnr[egcQMMM] == 0 ||
                    groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1)
                {
                    md->bQM[i]      = TRUE;
                }
                else
                {
                    md->bQM[i]      = FALSE;
                }
            }
            /* Initialize AdResS weighting functions to adressw */
            if (ir->bAdress)
            {
                md->wf[i]           = 1.0;
                /* if no tf table groups specified, use default table */
                md->tf_table_index[i] = DEFAULT_TF_TABLE;
                if (ir->adress->n_tf_grps > 0)
                {
                    /* if tf table groups specified, tf is only applied to thoose energy groups*/
                    md->tf_table_index[i] = NO_TF_TABLE;
                    /* check wether atom is in one of the relevant energy groups and assign a table index */
                    for (g = 0; g < ir->adress->n_tf_grps; g++)
                    {
                        if (md->cENER[i] == ir->adress->tf_table_index[g])
                        {
                            md->tf_table_index[i] = g;
                        }
                    }
                }
            }
        }
        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
    }

    gmx_mtop_atomlookup_destroy(alook);

    md->homenr = homenr;
    md->lambda = 0;
}
Exemple #18
0
int gmx_pme_recv_coeffs_coords(struct gmx_pme_pp *pme_pp,
                               int               *natoms,
                               real             **chargeA,
                               real             **chargeB,
                               real             **sqrt_c6A,
                               real             **sqrt_c6B,
                               real             **sigmaA,
                               real             **sigmaB,
                               matrix             box,
                               rvec             **x,
                               rvec             **f,
                               int               *maxshift_x,
                               int               *maxshift_y,
                               gmx_bool          *bFreeEnergy_q,
                               gmx_bool          *bFreeEnergy_lj,
                               real              *lambda_q,
                               real              *lambda_lj,
                               gmx_bool          *bEnerVir,
                               int               *pme_flags,
                               gmx_int64_t       *step,
                               ivec               grid_size,
                               real              *ewaldcoeff_q,
                               real              *ewaldcoeff_lj)
{
    int                  nat = 0, status;

    *pme_flags = 0;
#ifdef GMX_MPI
    gmx_pme_comm_n_box_t cnb;
    int                  messages;

    cnb.flags  = 0;
    messages   = 0;
    do
    {

        /* Receive the send count, box and time step from the peer PP node */
        MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
                 pme_pp->node_peer, eCommType_CNB,
                 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);

        if (debug)
        {
            fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
                    (cnb.flags & PP_PME_CHARGE)        ? " charges" : "",
                    (cnb.flags & PP_PME_COORD )        ? " coordinates" : "",
                    (cnb.flags & PP_PME_FINISH)        ? " finish" : "",
                    (cnb.flags & PP_PME_SWITCHGRID)    ? " switch grid" : "",
                    (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
        }

        if (cnb.flags & PP_PME_SWITCHGRID)
        {
            /* Special case, receive the new parameters and return */
            copy_ivec(cnb.grid_size, grid_size);
            *ewaldcoeff_q  = cnb.ewaldcoeff_q;
            *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
            return pmerecvqxSWITCHGRID;
        }

        if (cnb.flags & PP_PME_RESETCOUNTERS)
        {
            /* Special case, receive the step and return */
            *step = cnb.step;

            return pmerecvqxRESETCOUNTERS;
        }

        if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
        {
            /* Receive the send counts from the other PP nodes */
            for (int sender = 0; sender < pme_pp->nnode; sender++)
            {
                if (pme_pp->node[sender] == pme_pp->node_peer)
                {
                    pme_pp->nat[sender] = cnb.natoms;
                }
                else
                {
                    MPI_Irecv(&(pme_pp->nat[sender]), sizeof(pme_pp->nat[0]),
                              MPI_BYTE,
                              pme_pp->node[sender], eCommType_CNB,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                }
            }
            MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
            messages = 0;

            nat = 0;
            for (int sender = 0; sender < pme_pp->nnode; sender++)
            {
                nat += pme_pp->nat[sender];
            }

            if (nat > pme_pp->nalloc)
            {
                pme_pp->nalloc = over_alloc_dd(nat);
                if (cnb.flags & PP_PME_CHARGE)
                {
                    srenew(pme_pp->chargeA, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_CHARGEB)
                {
                    srenew(pme_pp->chargeB, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SQRTC6)
                {
                    srenew(pme_pp->sqrt_c6A, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SQRTC6B)
                {
                    srenew(pme_pp->sqrt_c6B, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SIGMA)
                {
                    srenew(pme_pp->sigmaA, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SIGMAB)
                {
                    srenew(pme_pp->sigmaB, pme_pp->nalloc);
                }
                srenew(pme_pp->x, pme_pp->nalloc);
                srenew(pme_pp->f, pme_pp->nalloc);
            }

            /* maxshift is sent when the charges are sent */
            *maxshift_x = cnb.maxshift_x;
            *maxshift_y = cnb.maxshift_y;

            /* Receive the charges in place */
            for (int q = 0; q < eCommType_NR; q++)
            {
                real *charge_pp;

                if (!(cnb.flags & (PP_PME_CHARGE<<q)))
                {
                    continue;
                }
                switch (q)
                {
                    case eCommType_ChargeA: charge_pp = pme_pp->chargeA;  break;
                    case eCommType_ChargeB: charge_pp = pme_pp->chargeB;  break;
                    case eCommType_SQRTC6A: charge_pp = pme_pp->sqrt_c6A; break;
                    case eCommType_SQRTC6B: charge_pp = pme_pp->sqrt_c6B; break;
                    case eCommType_SigmaA:  charge_pp = pme_pp->sigmaA;   break;
                    case eCommType_SigmaB:  charge_pp = pme_pp->sigmaB;   break;
                    default: gmx_incons("Wrong eCommType");
                }
                nat = 0;
                for (int sender = 0; sender < pme_pp->nnode; sender++)
                {
                    if (pme_pp->nat[sender] > 0)
                    {
                        MPI_Irecv(charge_pp+nat,
                                  pme_pp->nat[sender]*sizeof(real),
                                  MPI_BYTE,
                                  pme_pp->node[sender], q,
                                  pme_pp->mpi_comm_mysim,
                                  &pme_pp->req[messages++]);
                        nat += pme_pp->nat[sender];
                        if (debug)
                        {
                            fprintf(debug, "Received from PP rank %d: %d %s\n",
                                    pme_pp->node[sender], pme_pp->nat[sender],
                                    (q == eCommType_ChargeA ||
                                     q == eCommType_ChargeB) ? "charges" : "params");
                        }
                    }
                }
            }

            pme_pp->flags_charge = cnb.flags;
        }

        if (cnb.flags & PP_PME_COORD)
        {
            if (!(pme_pp->flags_charge & (PP_PME_CHARGE | PP_PME_SQRTC6)))
            {
                gmx_incons("PME-only rank received coordinates before charges and/or C6-values"
                           );
            }

            /* The box, FE flag and lambda are sent along with the coordinates
             *  */
            copy_mat(cnb.box, box);
            *bFreeEnergy_q  = ((cnb.flags & GMX_PME_DO_COULOMB) &&
                               (cnb.flags & PP_PME_FEP_Q));
            *bFreeEnergy_lj = ((cnb.flags & GMX_PME_DO_LJ) &&
                               (cnb.flags & PP_PME_FEP_LJ));
            *lambda_q       = cnb.lambda_q;
            *lambda_lj      = cnb.lambda_lj;
            *bEnerVir       = (cnb.flags & PP_PME_ENER_VIR);
            *pme_flags      = cnb.flags;

            if (*bFreeEnergy_q && !(pme_pp->flags_charge & PP_PME_CHARGEB))
            {
                gmx_incons("PME-only rank received free energy request, but "
                           "did not receive B-state charges");
            }

            if (*bFreeEnergy_lj && !(pme_pp->flags_charge & PP_PME_SQRTC6B))
            {
                gmx_incons("PME-only rank received free energy request, but "
                           "did not receive B-state C6-values");
            }

            /* Receive the coordinates in place */
            nat = 0;
            for (int sender = 0; sender < pme_pp->nnode; sender++)
            {
                if (pme_pp->nat[sender] > 0)
                {
                    MPI_Irecv(pme_pp->x[nat], pme_pp->nat[sender]*sizeof(rvec),
                              MPI_BYTE,
                              pme_pp->node[sender], eCommType_COORD,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                    nat += pme_pp->nat[sender];
                    if (debug)
                    {
                        fprintf(debug, "Received from PP rank %d: %d "
                                "coordinates\n",
                                pme_pp->node[sender], pme_pp->nat[sender]);
                    }
                }
            }
        }

        /* Wait for the coordinates and/or charges to arrive */
        MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
        messages = 0;
    }
    while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
    status = ((cnb.flags & PP_PME_FINISH) ? pmerecvqxFINISH : pmerecvqxX);

    *step = cnb.step;
#else
    GMX_UNUSED_VALUE(box);
    GMX_UNUSED_VALUE(maxshift_x);
    GMX_UNUSED_VALUE(maxshift_y);
    GMX_UNUSED_VALUE(bFreeEnergy_q);
    GMX_UNUSED_VALUE(bFreeEnergy_lj);
    GMX_UNUSED_VALUE(lambda_q);
    GMX_UNUSED_VALUE(lambda_lj);
    GMX_UNUSED_VALUE(bEnerVir);
    GMX_UNUSED_VALUE(step);
    GMX_UNUSED_VALUE(grid_size);
    GMX_UNUSED_VALUE(ewaldcoeff_q);
    GMX_UNUSED_VALUE(ewaldcoeff_lj);

    status = pmerecvqxX;
#endif

    *natoms   = nat;
    *chargeA  = pme_pp->chargeA;
    *chargeB  = pme_pp->chargeB;
    *sqrt_c6A = pme_pp->sqrt_c6A;
    *sqrt_c6B = pme_pp->sqrt_c6B;
    *sigmaA   = pme_pp->sigmaA;
    *sigmaB   = pme_pp->sigmaB;
    *x        = pme_pp->x;
    *f        = pme_pp->f;

    return status;
}
Exemple #19
0
void set_lincs(t_idef *idef,t_mdatoms *md,
               gmx_bool bDynamics,t_commrec *cr,
               struct gmx_lincsdata *li)
{
    int      start,natoms,nflexcon;
    t_blocka at2con;
    t_iatom  *iatom;
    int      i,k,ncc_alloc,ni,con,nconnect,concon;
    int      type,a1,a2;
    real     lenA=0,lenB;
    gmx_bool     bLocal;

    li->nc = 0;
    li->ncc = 0;
		
    /* This is the local topology, so there are only F_CONSTR constraints */
    if (idef->il[F_CONSTR].nr == 0)
    {
        /* There are no constraints,
         * we do not need to fill any data structures.
         */
        return;
    }
    
    if (debug)
    {
        fprintf(debug,"Building the LINCS connectivity\n");
    }
    
    if (DOMAINDECOMP(cr))
    {
        if (cr->dd->constraints)
        {
            dd_get_constraint_range(cr->dd,&start,&natoms);
        }
        else
        {
            natoms = cr->dd->nat_home;
        }
        start = 0;
    }
    else if(PARTDECOMP(cr))
	{
		pd_get_constraint_range(cr->pd,&start,&natoms);
	}
	else
    {
        start  = md->start;
        natoms = md->homenr;
    }
    at2con = make_at2con(start,natoms,idef->il,idef->iparams,bDynamics,
                         &nflexcon);

	
    if (idef->il[F_CONSTR].nr/3 > li->nc_alloc || li->nc_alloc == 0)
    {
        li->nc_alloc = over_alloc_dd(idef->il[F_CONSTR].nr/3);
        srenew(li->bllen0,li->nc_alloc);
        srenew(li->ddist,li->nc_alloc);
        srenew(li->bla,2*li->nc_alloc);
        srenew(li->blc,li->nc_alloc);
        srenew(li->blc1,li->nc_alloc);
        srenew(li->blnr,li->nc_alloc+1);
        srenew(li->bllen,li->nc_alloc);
        srenew(li->tmpv,li->nc_alloc);
        srenew(li->tmp1,li->nc_alloc);
        srenew(li->tmp2,li->nc_alloc);
        srenew(li->tmp3,li->nc_alloc);
        srenew(li->lambda,li->nc_alloc);
        if (li->ncg_triangle > 0)
        {
            /* This is allocating too much, but it is difficult to improve */
            srenew(li->triangle,li->nc_alloc);
            srenew(li->tri_bits,li->nc_alloc);
        }
    }
    
    iatom = idef->il[F_CONSTR].iatoms;
    
    ncc_alloc = li->ncc_alloc;
    li->blnr[0] = 0;
    
    ni = idef->il[F_CONSTR].nr/3;

    con = 0;
    nconnect = 0;
    li->blnr[con] = nconnect;
    for(i=0; i<ni; i++)
    {
        bLocal = TRUE;
        type = iatom[3*i];
        a1   = iatom[3*i+1];
        a2   = iatom[3*i+2];
        lenA = idef->iparams[type].constr.dA;
        lenB = idef->iparams[type].constr.dB;
        /* Skip the flexible constraints when not doing dynamics */
        if (bDynamics || lenA!=0 || lenB!=0)
        {
            li->bllen0[con]  = lenA;
            li->ddist[con]   = lenB - lenA;
            /* Set the length to the topology A length */
            li->bllen[con]   = li->bllen0[con];
            li->bla[2*con]   = a1;
            li->bla[2*con+1] = a2;
            /* Construct the constraint connection matrix blbnb */
            for(k=at2con.index[a1-start]; k<at2con.index[a1-start+1]; k++)
            {
                concon = at2con.a[k];
                if (concon != i)
                {
                    if (nconnect >= ncc_alloc)
                    {
                        ncc_alloc = over_alloc_small(nconnect+1);
                        srenew(li->blbnb,ncc_alloc);
                    }
                    li->blbnb[nconnect++] = concon;
                }
            }
            for(k=at2con.index[a2-start]; k<at2con.index[a2-start+1]; k++)
            {
                concon = at2con.a[k];
                if (concon != i)
                {
                    if (nconnect+1 > ncc_alloc)
                    {
                        ncc_alloc = over_alloc_small(nconnect+1);
                        srenew(li->blbnb,ncc_alloc);
                    }
                    li->blbnb[nconnect++] = concon;
                }
            }
            li->blnr[con+1] = nconnect;
            
            if (cr->dd == NULL)
            {
                /* Order the blbnb matrix to optimize memory access */
                qsort(&(li->blbnb[li->blnr[con]]),li->blnr[con+1]-li->blnr[con],
                      sizeof(li->blbnb[0]),int_comp);
            }
            /* Increase the constraint count */
            con++;
        }
    }
    
    done_blocka(&at2con);

    /* This is the real number of constraints,
     * without dynamics the flexible constraints are not present.
     */
    li->nc = con;
    
    li->ncc = li->blnr[con];
    if (cr->dd == NULL)
    {
        /* Since the matrix is static, we can free some memory */
        ncc_alloc = li->ncc;
        srenew(li->blbnb,ncc_alloc);
    }
    
    if (ncc_alloc > li->ncc_alloc)
    {
        li->ncc_alloc = ncc_alloc;
        srenew(li->blmf,li->ncc_alloc);
        srenew(li->blmf1,li->ncc_alloc);
        srenew(li->tmpncc,li->ncc_alloc);
    }
    
    if (debug)
    {
        fprintf(debug,"Number of constraints is %d, couplings %d\n",
                li->nc,li->ncc);
    }

    set_lincs_matrix(li,md->invmass,md->lambda);
}
void atoms2md(gmx_mtop_t *mtop,t_inputrec *ir,
	      int nindex,int *index,
	      int start,int homenr,
	      t_mdatoms *md)
{
  t_atoms   *atoms_mol;
  int       i,g,ag,as,ae,molb;
  real      mA,mB,fac;
  t_atom    *atom;
  t_grpopts *opts;
  gmx_groups_t *groups;
  gmx_molblock_t *molblock;

  opts = &ir->opts;

  groups = &mtop->groups;

  molblock = mtop->molblock;

  if (index == NULL) {
    md->nr = mtop->natoms;
  } else {
    md->nr = nindex;
  }

  if (md->nr > md->nalloc) {
    md->nalloc = over_alloc_dd(md->nr);

    if (md->nMassPerturbed) {
      srenew(md->massA,md->nalloc);
      srenew(md->massB,md->nalloc);
    }
    srenew(md->massT,md->nalloc);
    srenew(md->invmass,md->nalloc);
    srenew(md->chargeA,md->nalloc);
    if (md->nPerturbed) {
      srenew(md->chargeB,md->nalloc);
    }
    srenew(md->typeA,md->nalloc);
    if (md->nPerturbed) {
      srenew(md->typeB,md->nalloc);
    }
    srenew(md->ptype,md->nalloc);
    if (opts->ngtc > 1) {
      srenew(md->cTC,md->nalloc);
      /* We always copy cTC with domain decomposition */
    }
    srenew(md->cENER,md->nalloc);
    if (opts->ngacc > 1)
      srenew(md->cACC,md->nalloc);
    if (opts->nFreeze &&
	(opts->ngfrz > 1 ||
	 opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ]))
      srenew(md->cFREEZE,md->nalloc);
    if (md->bVCMgrps)
      srenew(md->cVCM,md->nalloc);
    if (md->bOrires)
      srenew(md->cORF,md->nalloc);
    if (md->nPerturbed)
      srenew(md->bPerturbed,md->nalloc);
    
    /* Note that these user t_mdatoms array pointers are NULL
     * when there is only one group present.
     * Therefore, when adding code, the user should use something like:
     * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
     */
    if (mtop->groups.grpnr[egcUser1] != NULL)
      srenew(md->cU1,md->nalloc);
    if (mtop->groups.grpnr[egcUser2] != NULL)
      srenew(md->cU2,md->nalloc);
    
    if (ir->bQMMM)
      srenew(md->bQM,md->nalloc);
  }

  for(i=0; (i<md->nr); i++) {
    if (index == NULL) {
      ag = i;
      gmx_mtop_atomnr_to_atom(mtop,ag,&atom);
    } else {
      ag   = index[i];
      molb = -1;
      ae   = 0;
      do {
	molb++;
	as = ae;
	ae = as + molblock[molb].nmol*molblock[molb].natoms_mol;
      } while (ag >= ae);
      atoms_mol = &mtop->moltype[molblock[molb].type].atoms;
      atom = &atoms_mol->atom[(ag - as) % atoms_mol->nr];
    }

    if (md->cFREEZE) {
      md->cFREEZE[i] = ggrpnr(groups,egcFREEZE,ag);
    }
    if (EI_ENERGY_MINIMIZATION(ir->eI)) {
      mA = 1.0;
      mB = 1.0;
    } else if (ir->eI == eiBD) {
      /* Make the mass proportional to the friction coefficient for BD.
       * This is necessary for the constraint algorithms.
       */
      if (ir->bd_fric) {
	mA = ir->bd_fric*ir->delta_t;
	mB = ir->bd_fric*ir->delta_t;
      } else {
	fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0];
	mA = atom->m*fac;
	mB = atom->mB*fac;
      }
    } else {
      mA = atom->m;
      mB = atom->mB;
    }
    if (md->nMassPerturbed) {
      md->massA[i]	= mA;
      md->massB[i]	= mB;
    }
    md->massT[i]	= mA;
    if (mA == 0.0) {
      md->invmass[i]    = 0;
    } else if (md->cFREEZE) {
      g = md->cFREEZE[i];
      if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
	/* Set the mass of completely frozen particles to ALMOST_ZERO iso 0
	 * to avoid div by zero in lincs or shake.
	 * Note that constraints can still move a partially frozen particle.
	 */
	md->invmass[i]	= ALMOST_ZERO;
      else
	md->invmass[i]	= 1.0/mA;
    } else {
      md->invmass[i]	= 1.0/mA;
    }
    md->chargeA[i]	= atom->q;
    md->typeA[i]	= atom->type;
    if (md->nPerturbed) {
      md->chargeB[i]	= atom->qB;
      md->typeB[i]	= atom->typeB;
      md->bPerturbed[i] = PERTURBED(*atom);
    }
    md->ptype[i]	= atom->ptype;
    if (md->cTC)
      md->cTC[i]	= groups->grpnr[egcTC][ag];
    md->cENER[i]	=
      (groups->grpnr[egcENER] ? groups->grpnr[egcENER][ag] : 0);
    if (md->cACC)
      md->cACC[i]	= groups->grpnr[egcACC][ag];
    if (md->cVCM)
      md->cVCM[i]     	= groups->grpnr[egcVCM][ag];
    if (md->cORF)
      md->cORF[i]     	= groups->grpnr[egcORFIT][ag];

    if (md->cU1)
      md->cU1[i]     	= groups->grpnr[egcUser1][ag];
    if (md->cU2)
      md->cU2[i]     	= groups->grpnr[egcUser2][ag];

    if (ir->bQMMM) {
      if (groups->grpnr[egcQMMM] == 0 || 
	  groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1) {
	md->bQM[i]      = TRUE;
      } else {
	md->bQM[i]      = FALSE;
      }
    }
  }

  md->start  = start;
  md->homenr = homenr;
  md->lambda = 0;
}
Exemple #21
0
int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
                     real **chargeA, real **chargeB,
                     matrix box, rvec **x,rvec **f,
                     int *maxshift_x, int *maxshift_y,
                     gmx_bool *bFreeEnergy,real *lambda,
                     gmx_bool *bEnerVir,
                     gmx_large_int_t *step,
                     ivec grid_size, real *ewaldcoeff)
{
    gmx_pme_comm_n_box_t cnb;
    int  nat=0,q,messages,sender;
    real *charge_pp;

    messages = 0;

    /* avoid compiler warning about unused variable without MPI support */
    cnb.flags = 0;	
#ifdef GMX_MPI
    do {
        /* Receive the send count, box and time step from the peer PP node */
        MPI_Recv(&cnb,sizeof(cnb),MPI_BYTE,
                 pme_pp->node_peer,0,
                 pme_pp->mpi_comm_mysim,MPI_STATUS_IGNORE);

        if (debug)
        {
            fprintf(debug,"PME only node receiving:%s%s%s%s\n",
                    (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
                    (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
                    (cnb.flags & PP_PME_FINISH) ? " finish" : "",
                    (cnb.flags & PP_PME_SWITCH) ? " switch" : "");
        }

        if (cnb.flags & PP_PME_SWITCH)
        {
            /* Special case, receive the new parameters and return */
            copy_ivec(cnb.grid_size,grid_size);
            *ewaldcoeff = cnb.ewaldcoeff;

            return -2;
        }

        if (cnb.flags & PP_PME_CHARGE) {
            /* Receive the send counts from the other PP nodes */
            for(sender=0; sender<pme_pp->nnode; sender++) {
                if (pme_pp->node[sender] == pme_pp->node_peer) {
                    pme_pp->nat[sender] = cnb.natoms;
                } else {
                    MPI_Irecv(&(pme_pp->nat[sender]),sizeof(pme_pp->nat[0]),
                              MPI_BYTE,
                              pme_pp->node[sender],0,
                              pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
                }
            }
            MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
            messages = 0;

            nat = 0;
            for(sender=0; sender<pme_pp->nnode; sender++)
                nat += pme_pp->nat[sender];

            if (nat > pme_pp->nalloc) {
                pme_pp->nalloc = over_alloc_dd(nat);
                srenew(pme_pp->chargeA,pme_pp->nalloc);
                if (cnb.flags & PP_PME_CHARGEB)
                    srenew(pme_pp->chargeB,pme_pp->nalloc);
                srenew(pme_pp->x,pme_pp->nalloc);
                srenew(pme_pp->f,pme_pp->nalloc);
            }

            /* maxshift is sent when the charges are sent */
            *maxshift_x = cnb.maxshift_x;
            *maxshift_y = cnb.maxshift_y;

            /* Receive the charges in place */
            for(q=0; q<((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++) {
                if (q == 0)
                    charge_pp = pme_pp->chargeA;
                else
                    charge_pp = pme_pp->chargeB;
                nat = 0;
                for(sender=0; sender<pme_pp->nnode; sender++) {
                    if (pme_pp->nat[sender] > 0) {
                        MPI_Irecv(charge_pp+nat,
                                  pme_pp->nat[sender]*sizeof(real),
                                  MPI_BYTE,
                                  pme_pp->node[sender],1+q,
                                  pme_pp->mpi_comm_mysim,
                                  &pme_pp->req[messages++]);
                        nat += pme_pp->nat[sender];
                        if (debug)
                            fprintf(debug,"Received from PP node %d: %d "
                                "charges\n",
                                    pme_pp->node[sender],pme_pp->nat[sender]);
                    }
                }
            }

            pme_pp->flags_charge = cnb.flags;
        }

        if (cnb.flags & PP_PME_COORD) {
            if (!(pme_pp->flags_charge & PP_PME_CHARGE))
                gmx_incons("PME-only node received coordinates before charges"
                    );

            /* The box, FE flag and lambda are sent along with the coordinates
             *  */
            copy_mat(cnb.box,box);
            *bFreeEnergy = (cnb.flags & PP_PME_FEP);
            *lambda      = cnb.lambda;
	    *bEnerVir    = (cnb.flags & PP_PME_ENER_VIR);

            if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
                gmx_incons("PME-only node received free energy request, but "
                    "did not receive B-state charges");

            /* Receive the coordinates in place */
            nat = 0;
            for(sender=0; sender<pme_pp->nnode; sender++) {
                if (pme_pp->nat[sender] > 0) {
                    MPI_Irecv(pme_pp->x[nat],pme_pp->nat[sender]*sizeof(rvec),
                              MPI_BYTE,
                              pme_pp->node[sender],3,
                              pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
                    nat += pme_pp->nat[sender];
                    if (debug)
                        fprintf(debug,"Received from PP node %d: %d "
                            "coordinates\n",
                                pme_pp->node[sender],pme_pp->nat[sender]);
                }
            }
        }

        /* Wait for the coordinates and/or charges to arrive */
        MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
        messages = 0;
    } while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));

    *step = cnb.step;
#endif

    *chargeA = pme_pp->chargeA;
    *chargeB = pme_pp->chargeB;
    *x       = pme_pp->x;
    *f       = pme_pp->f;


    return ((cnb.flags & PP_PME_FINISH) ? -1 : nat);
}
/*! \brief Walks over the constraints out from the local atoms into the non-local atoms and adds them to a list */
static void walk_out(int con, int con_offset, int a, int offset, int nrec,
                     int ncon1, const t_iatom *ia1, const t_iatom *ia2,
                     const t_blocka *at2con,
                     const gmx_ga2la_t *ga2la, gmx_bool bHomeConnect,
                     gmx_domdec_constraints_t *dc,
                     gmx_domdec_specat_comm_t *dcc,
                     t_ilist *il_local,
                     ind_req_t *ireq)
{
    int            a1_gl, a2_gl, a_loc, i, coni, b;
    const t_iatom *iap;

    if (dc->gc_req[con_offset+con] == 0)
    {
        /* Add this non-home constraint to the list */
        if (dc->ncon+1 > dc->con_nalloc)
        {
            dc->con_nalloc = over_alloc_large(dc->ncon+1);
            srenew(dc->con_gl, dc->con_nalloc);
            srenew(dc->con_nlocat, dc->con_nalloc);
        }
        dc->con_gl[dc->ncon]       = con_offset + con;
        dc->con_nlocat[dc->ncon]   = (bHomeConnect ? 1 : 0);
        dc->gc_req[con_offset+con] = 1;
        if (il_local->nr + 3 > il_local->nalloc)
        {
            il_local->nalloc = over_alloc_dd(il_local->nr+3);
            srenew(il_local->iatoms, il_local->nalloc);
        }
        iap = constr_iatomptr(ncon1, ia1, ia2, con);
        il_local->iatoms[il_local->nr++] = iap[0];
        a1_gl = offset + iap[1];
        a2_gl = offset + iap[2];
        /* The following indexing code can probably be optizimed */
        if (ga2la_get_home(ga2la, a1_gl, &a_loc))
        {
            il_local->iatoms[il_local->nr++] = a_loc;
        }
        else
        {
            /* We set this index later */
            il_local->iatoms[il_local->nr++] = -a1_gl - 1;
        }
        if (ga2la_get_home(ga2la, a2_gl, &a_loc))
        {
            il_local->iatoms[il_local->nr++] = a_loc;
        }
        else
        {
            /* We set this index later */
            il_local->iatoms[il_local->nr++] = -a2_gl - 1;
        }
        dc->ncon++;
    }
    /* Check to not ask for the same atom more than once */
    if (gmx_hash_get_minone(dc->ga2la, offset+a) == -1)
    {
        assert(dcc);
        /* Add this non-home atom to the list */
        if (ireq->n+1 > ireq->nalloc)
        {
            ireq->nalloc = over_alloc_large(ireq->n+1);
            srenew(ireq->ind, ireq->nalloc);
        }
        ireq->ind[ireq->n++] = offset + a;
        /* Temporarily mark with -2, we get the index later */
        gmx_hash_set(dc->ga2la, offset+a, -2);
    }

    if (nrec > 0)
    {
        for (i = at2con->index[a]; i < at2con->index[a+1]; i++)
        {
            coni = at2con->a[i];
            if (coni != con)
            {
                /* Walk further */
                iap = constr_iatomptr(ncon1, ia1, ia2, coni);
                if (a == iap[1])
                {
                    b = iap[2];
                }
                else
                {
                    b = iap[1];
                }
                if (!ga2la_get_home(ga2la, offset+b, &a_loc))
                {
                    walk_out(coni, con_offset, b, offset, nrec-1,
                             ncon1, ia1, ia2, at2con,
                             ga2la, FALSE, dc, dcc, il_local, ireq);
                }
            }
        }
    }
}
/*! \brief Looks up SETTLE constraints for a range of charge-groups */
static void atoms_to_settles(gmx_domdec_t *dd,
                             const gmx_mtop_t *mtop,
                             const int *cginfo,
                             const int **at2settle_mt,
                             int cg_start, int cg_end,
                             t_ilist *ils_local,
                             ind_req_t *ireq)
{
    gmx_ga2la_t            *ga2la;
    gmx_mtop_atomlookup_t   alook;
    int                     settle;
    int                     nral, sa;
    int                     cg, a, a_gl, a_glsa, a_gls[3], a_locs[3];
    int                     mb, molnr, a_mol, offset;
    const gmx_molblock_t   *molb;
    const t_iatom          *ia1;
    gmx_bool                a_home[3];
    int                     nlocal;
    gmx_bool                bAssign;

    ga2la  = dd->ga2la;

    alook = gmx_mtop_atomlookup_settle_init(mtop);

    nral = NRAL(F_SETTLE);

    for (cg = cg_start; cg < cg_end; cg++)
    {
        if (GET_CGINFO_SETTLE(cginfo[cg]))
        {
            for (a = dd->cgindex[cg]; a < dd->cgindex[cg+1]; a++)
            {
                a_gl = dd->gatindex[a];

                gmx_mtop_atomnr_to_molblock_ind(alook, a_gl, &mb, &molnr, &a_mol);
                molb = &mtop->molblock[mb];

                settle = at2settle_mt[molb->type][a_mol];

                if (settle >= 0)
                {
                    offset = a_gl - a_mol;

                    ia1 = mtop->moltype[molb->type].ilist[F_SETTLE].iatoms;

                    bAssign = FALSE;
                    nlocal  = 0;
                    for (sa = 0; sa < nral; sa++)
                    {
                        a_glsa     = offset + ia1[settle*(1+nral)+1+sa];
                        a_gls[sa]  = a_glsa;
                        a_home[sa] = ga2la_get_home(ga2la, a_glsa, &a_locs[sa]);
                        if (a_home[sa])
                        {
                            if (nlocal == 0 && a_gl == a_glsa)
                            {
                                bAssign = TRUE;
                            }
                            nlocal++;
                        }
                    }

                    if (bAssign)
                    {
                        if (ils_local->nr+1+nral > ils_local->nalloc)
                        {
                            ils_local->nalloc = over_alloc_dd(ils_local->nr+1+nral);
                            srenew(ils_local->iatoms, ils_local->nalloc);
                        }

                        ils_local->iatoms[ils_local->nr++] = ia1[settle*4];

                        for (sa = 0; sa < nral; sa++)
                        {
                            if (ga2la_get_home(ga2la, a_gls[sa], &a_locs[sa]))
                            {
                                ils_local->iatoms[ils_local->nr++] = a_locs[sa];
                            }
                            else
                            {
                                ils_local->iatoms[ils_local->nr++] = -a_gls[sa] - 1;
                                /* Add this non-home atom to the list */
                                if (ireq->n+1 > ireq->nalloc)
                                {
                                    ireq->nalloc = over_alloc_large(ireq->n+1);
                                    srenew(ireq->ind, ireq->nalloc);
                                }
                                ireq->ind[ireq->n++] = a_gls[sa];
                                /* A check on double atom requests is
                                 * not required for settle.
                                 */
                            }
                        }
                    }
                }
            }
        }
    }

    gmx_mtop_atomlookup_destroy(alook);
}
static void dd_pmeredist_pos_coeffs(struct gmx_pme_t *pme,
                                    int n, gmx_bool bX, rvec *x, real *data,
                                    pme_atomcomm_t *atc)
{
    int *commnode, *buf_index;
    int  nnodes_comm, i, nsend, local_pos, buf_pos, node, scount, rcount;

    commnode  = atc->node_dest;
    buf_index = atc->buf_index;

    nnodes_comm = min(2*atc->maxshift, atc->nslab-1);

    nsend = 0;
    for (i = 0; i < nnodes_comm; i++)
    {
        buf_index[commnode[i]] = nsend;
        nsend                 += atc->count[commnode[i]];
    }
    if (bX)
    {
        if (atc->count[atc->nodeid] + nsend != n)
        {
            gmx_fatal(FARGS, "%d particles communicated to PME rank %d are more than 2/3 times the cut-off out of the domain decomposition cell of their charge group in dimension %c.\n"
                      "This usually means that your system is not well equilibrated.",
                      n - (atc->count[atc->nodeid] + nsend),
                      pme->nodeid, 'x'+atc->dimind);
        }

        if (nsend > pme->buf_nalloc)
        {
            pme->buf_nalloc = over_alloc_dd(nsend);
            srenew(pme->bufv, pme->buf_nalloc);
            srenew(pme->bufr, pme->buf_nalloc);
        }

        atc->n = atc->count[atc->nodeid];
        for (i = 0; i < nnodes_comm; i++)
        {
            scount = atc->count[commnode[i]];
            /* Communicate the count */
            if (debug)
            {
                fprintf(debug, "dimind %d PME rank %d send to rank %d: %d\n",
                        atc->dimind, atc->nodeid, commnode[i], scount);
            }
            pme_dd_sendrecv(atc, FALSE, i,
                            &scount, sizeof(int),
                            &atc->rcount[i], sizeof(int));
            atc->n += atc->rcount[i];
        }

        pme_realloc_atomcomm_things(atc);
    }

    local_pos = 0;
    for (i = 0; i < n; i++)
    {
        node = atc->pd[i];
        if (node == atc->nodeid)
        {
            /* Copy direct to the receive buffer */
            if (bX)
            {
                copy_rvec(x[i], atc->x[local_pos]);
            }
            atc->coefficient[local_pos] = data[i];
            local_pos++;
        }
        else
        {
            /* Copy to the send buffer */
            if (bX)
            {
                copy_rvec(x[i], pme->bufv[buf_index[node]]);
            }
            pme->bufr[buf_index[node]] = data[i];
            buf_index[node]++;
        }
    }

    buf_pos = 0;
    for (i = 0; i < nnodes_comm; i++)
    {
        scount = atc->count[commnode[i]];
        rcount = atc->rcount[i];
        if (scount > 0 || rcount > 0)
        {
            if (bX)
            {
                /* Communicate the coordinates */
                pme_dd_sendrecv(atc, FALSE, i,
                                pme->bufv[buf_pos], scount*sizeof(rvec),
                                atc->x[local_pos], rcount*sizeof(rvec));
            }
            /* Communicate the coefficients */
            pme_dd_sendrecv(atc, FALSE, i,
                            pme->bufr+buf_pos, scount*sizeof(real),
                            atc->coefficient+local_pos, rcount*sizeof(real));
            buf_pos   += scount;
            local_pos += atc->rcount[i];
        }
    }
}
/*! \brief Looks up constraint for the local atoms */
static void atoms_to_constraints(gmx_domdec_t *dd,
                                 const gmx_mtop_t *mtop,
                                 const int *cginfo,
                                 const t_blocka *at2con_mt, int nrec,
                                 t_ilist *ilc_local,
                                 ind_req_t *ireq)
{
    const t_blocka             *at2con;
    gmx_ga2la_t                *ga2la;
    gmx_mtop_atomlookup_t       alook;
    int                         ncon1;
    gmx_molblock_t             *molb;
    t_iatom                    *ia1, *ia2, *iap;
    int                         nhome, cg, a, a_gl, a_mol, a_loc, b_lo, offset, mb, molnr, b_mol, i, con, con_offset;
    gmx_domdec_constraints_t   *dc;
    gmx_domdec_specat_comm_t   *dcc;

    dc  = dd->constraints;
    dcc = dd->constraint_comm;

    ga2la  = dd->ga2la;

    alook = gmx_mtop_atomlookup_init(mtop);

    nhome = 0;
    for (cg = 0; cg < dd->ncg_home; cg++)
    {
        if (GET_CGINFO_CONSTR(cginfo[cg]))
        {
            for (a = dd->cgindex[cg]; a < dd->cgindex[cg+1]; a++)
            {
                a_gl = dd->gatindex[a];

                gmx_mtop_atomnr_to_molblock_ind(alook, a_gl, &mb, &molnr, &a_mol);
                molb = &mtop->molblock[mb];

                ncon1 = mtop->moltype[molb->type].ilist[F_CONSTR].nr/NRAL(F_SETTLE);

                ia1 = mtop->moltype[molb->type].ilist[F_CONSTR].iatoms;
                ia2 = mtop->moltype[molb->type].ilist[F_CONSTRNC].iatoms;

                /* Calculate the global constraint number offset for the molecule.
                 * This is only required for the global index to make sure
                 * that we use each constraint only once.
                 */
                con_offset =
                    dc->molb_con_offset[mb] + molnr*dc->molb_ncon_mol[mb];

                /* The global atom number offset for this molecule */
                offset = a_gl - a_mol;
                at2con = &at2con_mt[molb->type];
                for (i = at2con->index[a_mol]; i < at2con->index[a_mol+1]; i++)
                {
                    con = at2con->a[i];
                    iap = constr_iatomptr(ncon1, ia1, ia2, con);
                    if (a_mol == iap[1])
                    {
                        b_mol = iap[2];
                    }
                    else
                    {
                        b_mol = iap[1];
                    }
                    if (ga2la_get_home(ga2la, offset+b_mol, &a_loc))
                    {
                        /* Add this fully home constraint at the first atom */
                        if (a_mol < b_mol)
                        {
                            if (dc->ncon+1 > dc->con_nalloc)
                            {
                                dc->con_nalloc = over_alloc_large(dc->ncon+1);
                                srenew(dc->con_gl, dc->con_nalloc);
                                srenew(dc->con_nlocat, dc->con_nalloc);
                            }
                            dc->con_gl[dc->ncon]     = con_offset + con;
                            dc->con_nlocat[dc->ncon] = 2;
                            if (ilc_local->nr + 3 > ilc_local->nalloc)
                            {
                                ilc_local->nalloc = over_alloc_dd(ilc_local->nr + 3);
                                srenew(ilc_local->iatoms, ilc_local->nalloc);
                            }
                            b_lo = a_loc;
                            ilc_local->iatoms[ilc_local->nr++] = iap[0];
                            ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? a    : b_lo);
                            ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? b_lo : a   );
                            dc->ncon++;
                            nhome++;
                        }
                    }
                    else
                    {
                        /* We need the nrec constraints coupled to this constraint,
                         * so we need to walk out of the home cell by nrec+1 atoms,
                         * since already atom bg is not locally present.
                         * Therefore we call walk_out with nrec recursions to go
                         * after this first call.
                         */
                        walk_out(con, con_offset, b_mol, offset, nrec,
                                 ncon1, ia1, ia2, at2con,
                                 dd->ga2la, TRUE, dc, dcc, ilc_local, ireq);
                    }
                }
            }
        }
    }

    gmx_mtop_atomlookup_destroy(alook);

    if (debug)
    {
        fprintf(debug,
                "Constraints: home %3d border %3d atoms: %3d\n",
                nhome, dc->ncon-nhome,
                dd->constraint_comm ? ireq->n : 0);
    }
}