void DihedralCharmmIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { const int tp1 = atom->ntypes + 1; const int bp1 = atom->ndihedraltypes + 1; fc.set_ntypes(tp1,bp1,memory); buffers->set_ntypes(tp1); for (int i = 0; i < tp1; i++) { for (int j = 0; j < tp1; j++) { fc.ljp[i][j].lj1 = lj14_1[i][j]; fc.ljp[i][j].lj2 = lj14_2[i][j]; fc.ljp[i][j].lj3 = lj14_3[i][j]; fc.ljp[i][j].lj4 = lj14_4[i][j]; } } for (int i = 0; i < bp1; i++) { fc.bp[i].multiplicity = multiplicity[i]; fc.bp[i].cos_shift = cos_shift[i]; fc.bp[i].sin_shift = sin_shift[i]; fc.bp[i].k = k[i]; fc.weight[i] = weight[i]; } }
void PairBuckCoulCutIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int tp1 = atom->ntypes + 1; int ntable = 1; if (ncoultablebits) for (int i = 0; i < ncoultablebits; i++) ntable *= 2; fc.set_ntypes(tp1, ntable, memory, _cop); // Repeat cutsq calculation because done after call to init_style for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { double cut; if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) cut = init_one(i, j); else cut = 0.0; cutsq[i][j] = cutsq[j][i] = cut*cut; } } for (int i = 0; i < 4; i++) { fc.special_lj[i] = force->special_lj[i]; fc.special_coul[i] = force->special_coul[i]; fc.special_coul[0] = 1.0; fc.special_lj[0] = 1.0; } for (int i = 1; i < tp1; i++) { for (int j = 1; j < tp1; j++) { fc.c_cut[i][j].cutsq = cutsq[i][j]; fc.c_cut[i][j].cut_ljsq = cut_ljsq[i][j]; fc.c_cut[i][j].cut_coulsq = cut_coulsq[i][j]; fc.c_force[i][j].buck1 = buck1[i][j]; fc.c_force[i][j].buck2 = buck2[i][j]; fc.c_force[i][j].rhoinv = rhoinv[i][j]; fc.c_energy[i][j].a = a[i][j]; fc.c_energy[i][j].c = c[i][j]; fc.c_energy[i][j].offset = offset[i][j]; } } #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; flt_t * special_lj = fc.special_lj; flt_t * special_coul = fc.special_coul; C_FORCE_T * c_force = fc.c_force[0]; C_ENERGY_T * c_energy = fc.c_energy[0]; C_CUT_T * c_cut = fc.c_cut[0]; int tp1sq = tp1 * tp1; #pragma offload_transfer target(mic:_cop) \ in(special_lj, special_coul: length(4) alloc_if(0) free_if(0)) \ in(c_force, c_energy, c_cut: length(tp1sq) alloc_if(0) free_if(0)) #endif }
void ImproperHarmonicIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { const int bp1 = atom->nimpropertypes + 1; fc.set_ntypes(bp1,memory); for (int i = 0; i < bp1; i++) { fc.fc[i].k = k[i]; fc.fc[i].chi = chi[i]; } }
void AngleHarmonicIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { const int bp1 = atom->nangletypes + 1; fc.set_ntypes(bp1,memory); for (int i = 0; i < bp1; i++) { fc.fc[i].k = k[i]; fc.fc[i].theta0 = theta0[i]; } }
void BondHarmonicIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { const int bp1 = atom->nbondtypes + 1; fc.set_ntypes(bp1,memory); for (int i = 1; i < bp1; i++) { fc.fc[i].k = k[i]; fc.fc[i].r0 = r0[i]; } }
void PairBuckIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int tp1 = atom->ntypes + 1; fc.set_ntypes(tp1, memory, _cop); buffers->set_ntypes(tp1); flt_t **cutneighsq = buffers->get_cutneighsq(); // Repeat cutsq calculation because done after call to init_style double cut, cutneigh; for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { cut = init_one(i, j); cutneigh = cut + neighbor->skin; cutsq[i][j] = cutsq[j][i] = cut*cut; cutneighsq[i][j] = cutneighsq[j][i] = cutneigh * cutneigh; } } } for (int i = 0; i < 4; i++) { fc.special_lj[i] = force->special_lj[i]; fc.special_lj[0] = 1.0; } for (int i = 0; i < tp1; i++) { for (int j = 0; j < tp1; j++) { fc.c_force[i][j].buck1 = buck1[i][j]; fc.c_force[i][j].buck2 = buck2[i][j]; fc.c_force[i][j].rhoinv = rhoinv[i][j]; fc.c_force[i][j].cutsq = cutsq[i][j]; fc.c_energy[i][j].a = a[i][j]; fc.c_energy[i][j].c = c[i][j]; fc.c_energy[i][j].offset = offset[i][j]; } } #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; flt_t * special_lj = fc.special_lj; C_FORCE_T * c_force = fc.c_force[0]; C_ENERGY_T * c_energy = fc.c_energy[0]; flt_t * ocutneighsq = cutneighsq[0]; int tp1sq = tp1 * tp1; #pragma offload_transfer target(mic:_cop) \ in(special_lj: length(4) alloc_if(0) free_if(0)) \ in(c_force, c_energy: length(tp1sq) alloc_if(0) free_if(0)) \ in(ocutneighsq: length(tp1sq) alloc_if(0) free_if(0)) #endif }
void DihedralOPLSIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { const int bp1 = atom->ndihedraltypes + 1; fc.set_ntypes(bp1,memory); for (int i = 0; i < bp1; i++) { fc.bp[i].k1 = k1[i]; fc.bp[i].k2 = k2[i]; fc.bp[i].k3 = k3[i]; fc.bp[i].k4 = k4[i]; } }
void DihedralHarmonicIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> * /*buffers*/) { const int bp1 = atom->ndihedraltypes + 1; fc.set_ntypes(bp1,memory); for (int i = 1; i < bp1; i++) { fc.bp[i].multiplicity = multiplicity[i]; fc.bp[i].cos_shift = cos_shift[i]; fc.bp[i].sin_shift = sin_shift[i]; fc.bp[i].k = k[i]; } }
void AngleCharmmIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { const int bp1 = atom->ndihedraltypes + 1; fc.set_ntypes(bp1,memory); for (int i = 0; i < bp1; i++) { fc.fc[i].k = k[i]; fc.fc[i].theta0 = theta0[i]; fc.fc[i].k_ub = k_ub[i]; fc.fc[i].r_ub = r_ub[i]; } }
void BondFENEIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> * /*buffers*/) { const int bp1 = atom->nbondtypes + 1; fc.set_ntypes(bp1,memory); for (int i = 1; i < bp1; i++) { fc.fc[i].k = k[i]; fc.fc[i].ir0sq = 1.0 / (r0[i] * r0[i]); fc.fc[i].sigma = sigma[i]; fc.fc[i].epsilon = epsilon[i]; } }
void PairLJCutCoulLongIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int tp1 = atom->ntypes + 1; int ntable = 1; if (ncoultablebits) for (int i = 0; i < ncoultablebits; i++) ntable *= 2; fc.set_ntypes(tp1, ntable, memory, _cop); buffers->set_ntypes(tp1); flt_t **cutneighsq = buffers->get_cutneighsq(); // Repeat cutsq calculation because done after call to init_style double cut, cutneigh; for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { cut = init_one(i, j); cutneigh = cut + neighbor->skin; cutsq[i][j] = cutsq[j][i] = cut*cut; cutneighsq[i][j] = cutneighsq[j][i] = cutneigh * cutneigh; } } } fc.g_ewald = force->kspace->g_ewald; fc.tabinnersq = tabinnersq; for (int i = 0; i < 4; i++) { fc.special_lj[i] = force->special_lj[i]; fc.special_coul[i] = force->special_coul[i]; fc.special_coul[0] = 1.0; fc.special_lj[0] = 1.0; } for (int i = 0; i < tp1; i++) { for (int j = 0; j < tp1; j++) { fc.c_force[i][j].cutsq = cutsq[i][j]; fc.c_force[i][j].cut_ljsq = cut_ljsq[i][j]; fc.c_force[i][j].lj1 = lj1[i][j]; fc.c_force[i][j].lj2 = lj2[i][j]; fc.c_energy[i][j].lj3 = lj3[i][j]; fc.c_energy[i][j].lj4 = lj4[i][j]; fc.c_energy[i][j].offset = offset[i][j]; } } if (ncoultablebits) { for (int i = 0; i < ntable; i++) { fc.table[i].r = rtable[i]; fc.table[i].dr = drtable[i]; fc.table[i].f = ftable[i]; fc.table[i].df = dftable[i]; fc.etable[i] = etable[i]; fc.detable[i] = detable[i]; fc.ctable[i] = ctable[i]; fc.dctable[i] = dctable[i]; } } #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; flt_t * special_lj = fc.special_lj; flt_t * special_coul = fc.special_coul; C_FORCE_T * c_force = fc.c_force[0]; C_ENERGY_T * c_energy = fc.c_energy[0]; TABLE_T * table = fc.table; flt_t * etable = fc.etable; flt_t * detable = fc.detable; flt_t * ctable = fc.ctable; flt_t * dctable = fc.dctable; flt_t * ocutneighsq = cutneighsq[0]; int tp1sq = tp1 * tp1; #pragma offload_transfer target(mic:_cop) \ in(special_lj, special_coul: length(4) alloc_if(0) free_if(0)) \ in(c_force, c_energy: length(tp1sq) alloc_if(0) free_if(0)) \ in(table: length(ntable) alloc_if(0) free_if(0)) \ in(etable,detable,ctable,dctable: length(ntable) alloc_if(0) free_if(0)) \ in(ocutneighsq: length(tp1sq) alloc_if(0) free_if(0)) #endif }
void PairSWIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int off_ccache = 0; #ifdef _LMP_INTEL_OFFLOAD if (_cop >= 0) off_ccache = 1; #endif buffers->grow_ccache(off_ccache, comm->nthreads); _ccache_stride = buffers->ccache_stride(); int tp1 = atom->ntypes + 1; fc.set_ntypes(tp1,memory,_cop); buffers->set_ntypes(tp1); flt_t **cutneighsq = buffers->get_cutneighsq(); // Repeat cutsq calculation because done after call to init_style double cut, cutneigh; for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { cut = init_one(i,j); cutneigh = cut + neighbor->skin; cutsq[i][j] = cutsq[j][i] = cut*cut; cutneighsq[i][j] = cutneighsq[j][i] = cutneigh * cutneigh; } } } _spq = 1; for (int ii = 0; ii < tp1; ii++) { int i = map[ii]; for (int jj = 0; jj < tp1; jj++) { int j = map[jj]; if (i < 0 || j < 0 || ii == 0 || jj == 0) { fc.p2[ii][jj].cutsq = 0; fc.p2[ii][jj].cut = 0; fc.p2[ii][jj].sigma_gamma = 0; fc.p2f[ii][jj].cut = 0; fc.p2f[ii][jj].powerp = 0; fc.p2f[ii][jj].powerq = 0; fc.p2f[ii][jj].sigma = 0; fc.p2f[ii][jj].c1 = 0; fc.p2f[ii][jj].c2 = 0; fc.p2f[ii][jj].c3 = 0; fc.p2f[ii][jj].c4 = 0; fc.p2e[ii][jj].c5 = 0; fc.p2e[ii][jj].c6 = 0; } else { int ijparam = elem2param[i][j][j]; fc.p2[ii][jj].cutsq = params[ijparam].cutsq; fc.p2[ii][jj].cut = params[ijparam].cut; fc.p2[ii][jj].sigma_gamma = params[ijparam].sigma_gamma; fc.p2f[ii][jj].cut = params[ijparam].cut; fc.p2f[ii][jj].powerp = params[ijparam].powerp; fc.p2f[ii][jj].powerq = params[ijparam].powerq; fc.p2f[ii][jj].sigma = params[ijparam].sigma; fc.p2f[ii][jj].c1 = params[ijparam].c1; fc.p2f[ii][jj].c2 = params[ijparam].c2; fc.p2f[ii][jj].c3 = params[ijparam].c3; fc.p2f[ii][jj].c4 = params[ijparam].c4; fc.p2e[ii][jj].c5 = params[ijparam].c5; fc.p2e[ii][jj].c6 = params[ijparam].c6; double cutcut = params[ijparam].cut * params[ijparam].cut; if (params[ijparam].cutsq >= cutcut) fc.p2[ii][jj].cutsq *= 0.98; if (params[ijparam].powerp != 4.0 || params[ijparam].powerq != 0.0) _spq = 0; } for (int kk = 0; kk < tp1; kk++) { int k = map[kk]; if (i < 0 || j < 0 || k < 0 || ii == 0 || jj == 0 || kk == 0) { fc.p3[ii][jj][kk].costheta = 0; fc.p3[ii][jj][kk].lambda_epsilon = 0; fc.p3[ii][jj][kk].lambda_epsilon2 = 0; } else { int ijkparam = elem2param[i][j][k]; fc.p3[ii][jj][kk].costheta = params[ijkparam].costheta; fc.p3[ii][jj][kk].lambda_epsilon = params[ijkparam].lambda_epsilon; fc.p3[ii][jj][kk].lambda_epsilon2 = params[ijkparam].lambda_epsilon2; } } } } _host_pad = 1; _offload_pad = 1; if (INTEL_NBOR_PAD > 1) _host_pad = INTEL_NBOR_PAD * sizeof(float) / sizeof(flt_t); #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; FC_PACKED0_T *op2 = fc.p2[0]; FC_PACKED1_T *op2f = fc.p2f[0]; FC_PACKED2_T *op2e = fc.p2e[0]; FC_PACKED3_T *op3 = fc.p3[0][0]; flt_t * ocutneighsq = cutneighsq[0]; int tp1sq = tp1 * tp1; int tp1cu = tp1sq * tp1; if (op2 != NULL && op2f != NULL && op2e != NULL && op3 != NULL && ocutneighsq != NULL) { #pragma offload_transfer target(mic:_cop) \ in(op2,op2f,op2e: length(tp1sq) alloc_if(0) free_if(0)) \ in(op3: length(tp1cu) alloc_if(0) free_if(0)) \ in(ocutneighsq: length(tp1sq)) } #endif }
void PairLJCharmmCoulLongIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int tp1 = atom->ntypes + 1; int ntable = 1; if (ncoultablebits) for (int i = 0; i < ncoultablebits; i++) ntable *= 2; fc.set_ntypes(tp1, ntable, memory, _cop); buffers->set_ntypes(tp1); flt_t **cutneighsq = buffers->get_cutneighsq(); // Repeat cutsq calculation because done after call to init_style double cut, cutneigh; if (cut_lj > cut_coul) error->all(FLERR, "Intel varient of lj/charmm/coul/long expects lj cutoff<=coulombic"); for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { cut = init_one(i, j); cutneigh = cut + neighbor->skin; cutsq[i][j] = cutsq[j][i] = cut*cut; cutneighsq[i][j] = cutneighsq[j][i] = cutneigh * cutneigh; } } } cut_lj_innersq = cut_lj_inner * cut_lj_inner; cut_ljsq = cut_lj * cut_lj; cut_coulsq = cut_coul * cut_coul; cut_bothsq = MAX(cut_ljsq, cut_coulsq); fc.g_ewald = force->kspace->g_ewald; fc.tabinnersq = tabinnersq; fc.cut_coulsq = cut_coulsq; fc.cut_ljsq = cut_ljsq; fc.cut_lj_innersq = cut_lj_innersq; for (int i = 0; i < 4; i++) { fc.special_lj[i] = force->special_lj[i]; fc.special_coul[i] = force->special_coul[i]; fc.special_coul[0] = 1.0; fc.special_lj[0] = 1.0; } for (int i = 0; i < tp1; i++) { for (int j = 0; j < tp1; j++) { fc.lj[i][j].x = lj1[i][j]; fc.lj[i][j].y = lj2[i][j]; fc.lj[i][j].z = lj3[i][j]; fc.lj[i][j].w = lj4[i][j]; fc.cutsq[i][j] = cutsq[i][j]; } } if (ncoultablebits) { for (int i = 0; i < ntable; i++) { fc.table[i].r = rtable[i]; fc.table[i].dr = drtable[i]; fc.table[i].f = ftable[i]; fc.table[i].df = dftable[i]; fc.etable[i] = etable[i]; fc.detable[i] = detable[i]; fc.ctable[i] = ctable[i]; fc.dctable[i] = dctable[i]; } } #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; flt_t * special_lj = fc.special_lj; flt_t * special_coul = fc.special_coul; flt_t * cutsq = fc.cutsq[0]; LJ_T * lj = fc.lj[0]; TABLE_T * table = fc.table; flt_t * etable = fc.etable; flt_t * detable = fc.detable; flt_t * ctable = fc.ctable; flt_t * dctable = fc.dctable; flt_t * ocutneighsq = cutneighsq[0]; int tp1sq = tp1 * tp1; #pragma offload_transfer target(mic:_cop) \ in(special_lj, special_coul: length(4) alloc_if(0) free_if(0)) \ in(cutsq,lj: length(tp1sq) alloc_if(0) free_if(0)) \ in(table: length(ntable) alloc_if(0) free_if(0)) \ in(etable,detable,ctable,dctable: length(ntable) alloc_if(0) free_if(0)) \ in(ocutneighsq: length(tp1sq) alloc_if(0) free_if(0)) #endif }
void PairBuckCoulLongIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int off_ccache = 0; #ifdef _LMP_INTEL_OFFLOAD if (_cop >= 0) off_ccache = 1; #endif buffers->grow_ccache(off_ccache, comm->nthreads, 1); _ccache_stride = buffers->ccache_stride(); int tp1 = atom->ntypes + 1; int ntable = 1; if (ncoultablebits) for (int i = 0; i < ncoultablebits; i++) ntable *= 2; fc.set_ntypes(tp1, ntable, memory, _cop); buffers->set_ntypes(tp1); flt_t **cutneighsq = buffers->get_cutneighsq(); // Repeat cutsq calculation because done after call to init_style double cut, cutneigh; for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { cut = init_one(i, j); cutneigh = cut + neighbor->skin; cutsq[i][j] = cutsq[j][i] = cut*cut; cutneighsq[i][j] = cutneighsq[j][i] = cutneigh * cutneigh; } } } fc.g_ewald = force->kspace->g_ewald; fc.tabinnersq = tabinnersq; for (int i = 0; i < 4; i++) { fc.special_lj[i] = force->special_lj[i]; fc.special_coul[i] = force->special_coul[i]; fc.special_coul[0] = 1.0; fc.special_lj[0] = 1.0; } for (int i = 0; i < tp1; i++) { for (int j = 0; j < tp1; j++) { if (cutsq[i][j] < cut_ljsq[i][j]) error->all(FLERR, "Intel variant of lj/buck/coul/long expects lj cutoff<=coulombic"); fc.c_force[i][j].cutsq = cutsq[i][j]; fc.c_force[i][j].cut_ljsq = cut_ljsq[i][j]; fc.c_force[i][j].buck1 = buck1[i][j]; fc.c_force[i][j].buck2 = buck2[i][j]; fc.rho_inv[i][j] = rhoinv[i][j]; fc.c_energy[i][j].a = a[i][j]; fc.c_energy[i][j].c = c[i][j]; fc.c_energy[i][j].offset = offset[i][j]; fc.c_energy[i][j].pad = rhoinv[i][j]; } } if (ncoultablebits) { for (int i = 0; i < ntable; i++) { fc.table[i].r = rtable[i]; fc.table[i].dr = drtable[i]; fc.table[i].f = ftable[i]; fc.table[i].df = dftable[i]; fc.etable[i] = etable[i]; fc.detable[i] = detable[i]; fc.ctable[i] = ctable[i]; fc.dctable[i] = dctable[i]; } } #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; flt_t * special_lj = fc.special_lj; flt_t * special_coul = fc.special_coul; C_FORCE_T * c_force = fc.c_force[0]; C_ENERGY_T * c_energy = fc.c_energy[0]; TABLE_T * table = fc.table; flt_t * rho_inv = fc.rho_inv[0]; flt_t * etable = fc.etable; flt_t * detable = fc.detable; flt_t * ctable = fc.ctable; flt_t * dctable = fc.dctable; flt_t * ocutneighsq = cutneighsq[0]; int tp1sq = tp1 * tp1; #pragma offload_transfer target(mic:_cop) \ in(special_lj, special_coul: length(4) alloc_if(0) free_if(0)) \ in(c_force, c_energy: length(tp1sq) alloc_if(0) free_if(0)) \ in(rho_inv: length(tp1sq) alloc_if(0) free_if(0)) \ in(table: length(ntable) alloc_if(0) free_if(0)) \ in(etable,detable,ctable,dctable: length(ntable) alloc_if(0) free_if(0)) \ in(ocutneighsq: length(tp1sq) alloc_if(0) free_if(0)) #endif }
void PairLJCharmmCoulCharmmIntel::pack_force_const(ForceConst<flt_t> &fc, IntelBuffers<flt_t,acc_t> *buffers) { int off_ccache = 0; #ifdef _LMP_INTEL_OFFLOAD if (_cop >= 0) off_ccache = 1; #endif buffers->grow_ccache(off_ccache, comm->nthreads, 1); _ccache_stride = buffers->ccache_stride(); int tp1 = atom->ntypes + 1; fc.set_ntypes(tp1, memory, _cop); buffers->set_ntypes(tp1); flt_t **cutneighsq = buffers->get_cutneighsq(); // Repeat cutsq calculation because done after call to init_style double cut, cutneigh; if (cut_lj > cut_coul) error->all(FLERR, "Intel varient of lj/charmm/coul/long expects lj cutoff<=coulombic"); for (int i = 1; i <= atom->ntypes; i++) { for (int j = i; j <= atom->ntypes; j++) { if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { cut = init_one(i, j); cutneigh = cut + neighbor->skin; cutsq[i][j] = cutsq[j][i] = cut*cut; cutneighsq[i][j] = cutneighsq[j][i] = cutneigh * cutneigh; } } } cut_coul_innersq = cut_coul_inner * cut_coul_inner; cut_lj_innersq = cut_lj_inner * cut_lj_inner; cut_ljsq = cut_lj * cut_lj; cut_coulsq = cut_coul * cut_coul; cut_bothsq = MAX(cut_ljsq, cut_coulsq); fc.cut_coulsq = cut_coulsq; fc.cut_ljsq = cut_ljsq; fc.cut_coul_innersq = cut_coul_innersq; fc.cut_lj_innersq = cut_lj_innersq; for (int i = 0; i < 4; i++) { fc.special_lj[i] = force->special_lj[i]; fc.special_coul[i] = force->special_coul[i]; fc.special_coul[0] = 1.0; fc.special_lj[0] = 1.0; } for (int i = 1; i < tp1; i++) { for (int j = 1; j < tp1; j++) { fc.lj[i][j].x = lj1[i][j]; fc.lj[i][j].y = lj2[i][j]; fc.lj[i][j].z = lj3[i][j]; fc.lj[i][j].w = lj4[i][j]; fc.cutsq[i][j] = cutsq[i][j]; } } #ifdef _LMP_INTEL_OFFLOAD if (_cop < 0) return; flt_t * special_lj = fc.special_lj; flt_t * special_coul = fc.special_coul; flt_t * cutsq = fc.cutsq[0]; LJ_T * lj = fc.lj[0]; flt_t * ocutneighsq = cutneighsq[0]; int tp1sq = tp1 * tp1; #pragma offload_transfer target(mic:_cop) \ in(special_lj, special_coul: length(4) alloc_if(0) free_if(0)) \ in(cutsq,lj: length(tp1sq) alloc_if(0) free_if(0)) \ in(ocutneighsq: length(tp1sq) alloc_if(0) free_if(0)) #endif }