Example #1
0
int LJGROMACST::init(const int ntypes, double **host_cutsq,
                     double **host_lj1, double **host_lj2, double **host_lj3,
                     double **host_lj4, double *host_special_lj,
                     const int nlocal, const int nall, const int max_nbors,
                     const int maxspecial, const double cell_size,
                     const double gpu_split, FILE *_screen,
                     double **host_ljsw1, double **host_ljsw2, double **host_ljsw3,
                     double **host_ljsw4, double **host_ljsw5,
                     double **cut_inner, double **cut_inner_sq) {
  int success;
  success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,
                            _screen,lj_gromacs,"k_lj_gromacs");
  if (success!=0)
    return success;

  // If atom type constants fit in shared memory use fast kernel
  int lj_types=ntypes;
  shared_types=false;
  int max_shared_types=this->device->max_shared_types();
  if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
    lj_types=max_shared_types;
    shared_types=true;
  }
  _lj_types=lj_types;

  // Allocate a host write buffer for data initialization
  UCL_H_Vec<numtyp> host_write(lj_types*lj_types*32,*(this->ucl_device),
                               UCL_WRITE_ONLY);

  for (int i=0; i<lj_types*lj_types; i++)
    host_write[i]=0.0;

  lj1.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,lj_types,lj1,host_write,host_lj1,host_lj2,
                         host_cutsq,cut_inner_sq);

  lj3.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,lj_types,lj3,host_write,host_lj3,host_lj4,
                         cut_inner,host_ljsw5);

  ljsw.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,lj_types,ljsw,host_write,host_ljsw1,host_ljsw2,
                         host_ljsw3,host_ljsw4);

  UCL_H_Vec<double> dview;
  sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
  dview.view(host_special_lj,4,*(this->ucl_device));
  ucl_copy(sp_lj,dview,false);

  _allocated=true;
  this->_max_bytes=lj1.row_bytes()+lj3.row_bytes()
    +ljsw.row_bytes()+sp_lj.row_bytes();
  return 0;
}
Example #2
0
int BornT::init(const int ntypes, double **host_cutsq,
                double **host_rhoinv, double **host_born1, double **host_born2, 
                double **host_born3, double **host_a, double **host_c,
                double **host_d, double **host_sigma,
                double **host_offset, double *host_special_lj,
                const int nlocal, const int nall, const int max_nbors, 
                const int maxspecial, const double cell_size, 
                const double gpu_split, FILE *_screen) {
  int success;
  success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,
                            _screen,born,"k_born");
  if (success!=0)
    return success;

  // If atom type constants fit in shared memory use fast kernel
  int lj_types=ntypes;
  shared_types=false;
  int max_shared_types=this->device->max_shared_types();
  if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
    lj_types=max_shared_types;
    shared_types=true;
  }
  _lj_types=lj_types;

  // Allocate a host write buffer for data initialization
  UCL_H_Vec<numtyp> host_write(lj_types*lj_types*32,*(this->ucl_device),
                               UCL_WRITE_OPTIMIZED);

  for (int i=0; i<lj_types*lj_types; i++)
    host_write[i]=0.0;

  coeff1.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,lj_types,coeff1,host_write,host_rhoinv,
                         host_born1,host_born2,host_born3);

  coeff2.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,lj_types,coeff2,host_write,host_a,host_c,
		                     host_d,host_offset);

  cutsq_sigma.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack2(ntypes,lj_types,cutsq_sigma,host_write,host_cutsq,
                         host_sigma);

  UCL_H_Vec<double> dview;
  sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
  dview.view(host_special_lj,4,*(this->ucl_device));
  ucl_copy(sp_lj,dview,false);

  _allocated=true;
  this->_max_bytes=coeff1.row_bytes()+coeff2.row_bytes()
   +cutsq_sigma.row_bytes()+sp_lj.row_bytes();
  return 0;
}
Example #3
0
int DPDT::init(const int ntypes,
               double **host_cutsq, double **host_a0,
               double **host_gamma, double **host_sigma,
               double **host_cut, double *host_special_lj,
               const bool tstat_only,
               const int nlocal, const int nall,
               const int max_nbors, const int maxspecial,
               const double cell_size,
               const double gpu_split, FILE *_screen) {
  int success;
  success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,_screen,dpd,"k_dpd");
  if (success!=0)
    return success;

  // If atom type constants fit in shared memory use fast kernel
  int lj_types=ntypes;
  shared_types=false;
  int max_shared_types=this->device->max_shared_types();
  if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
    lj_types=max_shared_types;
    shared_types=true;
  }
  _lj_types=lj_types;

  // Allocate a host write buffer for data initialization
  UCL_H_Vec<numtyp> host_write(lj_types*lj_types*32,*(this->ucl_device),
                               UCL_WRITE_ONLY);

  for (int i=0; i<lj_types*lj_types; i++)
    host_write[i]=0.0;

  coeff.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,lj_types,coeff,host_write,host_a0,host_gamma,
                         host_sigma,host_cut);

  UCL_H_Vec<numtyp> host_rsq(lj_types*lj_types,*(this->ucl_device),
                             UCL_WRITE_ONLY);
  cutsq.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack1(ntypes,lj_types,cutsq,host_rsq,host_cutsq);

  UCL_H_Vec<double> dview;
  sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
  dview.view(host_special_lj,4,*(this->ucl_device));
  ucl_copy(sp_lj,dview,false);

  _tstat_only = 0;
  if (tstat_only) _tstat_only=1;

  _allocated=true;
  this->_max_bytes=coeff.row_bytes()+cutsq.row_bytes()+sp_lj.row_bytes();
  return 0;
}
int CGCMMT::init(const int ntypes, double **host_cutsq, 
                          int **host_cg_type, double **host_lj1, 
                          double **host_lj2, double **host_lj3, 
                          double **host_lj4, double **host_offset, 
                          double *host_special_lj, const int nlocal,
                          const int nall, const int max_nbors,
                          const int maxspecial, const double cell_size, 
                          const double gpu_split, FILE *_screen) {
  int success;
  success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,
                            _screen,cg_cmm,"k_cg_cmm");
  if (success!=0)
    return success;

  // If atom type constants fit in shared memory use fast kernel
  int cmm_types=ntypes;
  shared_types=false;
  int max_shared_types=this->device->max_shared_types();
  if (cmm_types<=max_shared_types && this->_block_size>=max_shared_types) {
    cmm_types=max_shared_types;
    shared_types=true;
  }
  _cmm_types=cmm_types;

  // Allocate a host write buffer for data initialization
  UCL_H_Vec<numtyp> host_write(cmm_types*cmm_types*32,*(this->ucl_device),
                               UCL_WRITE_ONLY);

  for (int i=0; i<cmm_types*cmm_types; i++)
    host_write[i]=0.0;

  lj1.alloc(cmm_types*cmm_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,cmm_types,lj1,host_write,host_cutsq, 
                         host_cg_type,host_lj1,host_lj2);

  lj3.alloc(cmm_types*cmm_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,cmm_types,lj3,host_write,host_lj3,host_lj4,
		         host_offset);

  UCL_H_Vec<double> dview;
  sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
  dview.view(host_special_lj,4,*(this->ucl_device));
  ucl_copy(sp_lj,dview,false);

  _allocated=true;
  this->_max_bytes=lj1.row_bytes()+lj3.row_bytes()+sp_lj.row_bytes();
  return 0;
}
int MorseT::init(const int ntypes, 
                          double **host_cutsq, double **host_morse1, 
                          double **host_r0, double **host_alpha, 
                          double **host_d0, double **host_offset, 
                          double *host_special_lj, const int nlocal,
                          const int nall, const int max_nbors,
                          const int maxspecial, const double cell_size,
                          const double gpu_split, FILE *_screen) {
  int success;
  success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,
                            _screen,morse,"k_morse");
  if (success!=0)
    return success;

  // If atom type constants fit in shared memory use fast kernel
  int types=ntypes;
  shared_types=false;
  int max_shared_types=this->device->max_shared_types();
  if (types<=max_shared_types && this->_block_size>=max_shared_types) {
    types=max_shared_types;
    shared_types=true;
  }
  _types=types;

  // Allocate a host write buffer for data initialization
  UCL_H_Vec<numtyp> host_write(types*types*32,*(this->ucl_device),
                               UCL_WRITE_ONLY);

  for (int i=0; i<types*types; i++)
    host_write[i]=0.0;

  mor1.alloc(types*types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack4(ntypes,types,mor1,host_write,host_cutsq,host_morse1,
                         host_r0,host_alpha);

  mor2.alloc(types*types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack2(ntypes,types,mor2,host_write,host_d0,host_offset);

  UCL_H_Vec<double> dview;
  sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
  dview.view(host_special_lj,4,*(this->ucl_device));
  ucl_copy(sp_lj,dview,false);

  _allocated=true;
  this->_max_bytes=mor1.row_bytes()+mor2.row_bytes()+sp_lj.row_bytes();
  return 0;
}
Example #6
0
int TableT::init(const int ntypes,
                double **host_cutsq, double ***host_table_coeffs,
                double **host_table_data,
                double *host_special_lj, const int nlocal,
                const int nall, const int max_nbors,
                const int maxspecial, const double cell_size,
                const double gpu_split, FILE *_screen,
                int tabstyle, int ntables, int tablength) {
  int success;
  success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,
                            gpu_split,_screen,table,"k_table");
  if (success!=0)
    return success;

  k_pair_linear.set_function(*(this->pair_program),"k_table_linear");
  k_pair_linear_fast.set_function(*(this->pair_program),"k_table_linear_fast");
  k_pair_spline.set_function(*(this->pair_program),"k_table_spline");
  k_pair_spline_fast.set_function(*(this->pair_program),"k_table_spline_fast");
  k_pair_bitmap.set_function(*(this->pair_program),"k_table_bitmap");
  k_pair_bitmap_fast.set_function(*(this->pair_program),"k_table_bitmap_fast");
  _compiled_styles = true;

  // If atom type constants fit in shared memory use fast kernel
  int lj_types=ntypes;
  shared_types=false;
  int max_shared_types=this->device->max_shared_types();
  if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
    lj_types=max_shared_types;
    shared_types=true;
  }
  _lj_types=lj_types;

  _tabstyle = tabstyle;
  _ntables = ntables;
  if (tabstyle != BITMAP) _tablength = tablength;
  else _tablength = 1 << tablength;

  // Allocate a host write buffer for data initialization
  UCL_H_Vec<int> host_write_int(lj_types*lj_types,*(this->ucl_device),
                               UCL_WRITE_ONLY);

  for (int i=0; i<lj_types*lj_types; i++)
    host_write_int[i] = 0;

  tabindex.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  nshiftbits.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  nmask.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);

  for (int ix=1; ix<ntypes; ix++)
    for (int iy=1; iy<ntypes; iy++)
      host_write_int[ix*lj_types+iy] = (int)host_table_coeffs[ix][iy][0]; // tabindex
  ucl_copy(tabindex,host_write_int,false);

  for (int ix=1; ix<ntypes; ix++)
    for (int iy=1; iy<ntypes; iy++)
      host_write_int[ix*lj_types+iy] = (int)host_table_coeffs[ix][iy][1]; // nshiftbits
  ucl_copy(nshiftbits,host_write_int,false);

  for (int ix=1; ix<ntypes; ix++)
    for (int iy=1; iy<ntypes; iy++)
      host_write_int[ix*lj_types+iy] = (int)host_table_coeffs[ix][iy][2]; // nmask
  ucl_copy(nmask,host_write_int,false);

  UCL_H_Vec<numtyp4> host_write(lj_types*lj_types,*(this->ucl_device),
                               UCL_WRITE_ONLY);

  coeff2.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  for (int ix=1; ix<ntypes; ix++)
    for (int iy=1; iy<ntypes; iy++) {
      host_write[ix*lj_types+iy].x = host_table_coeffs[ix][iy][3]; // innersq
      host_write[ix*lj_types+iy].y = host_table_coeffs[ix][iy][4]; // invdelta
      host_write[ix*lj_types+iy].z = host_table_coeffs[ix][iy][5]; // deltasq6
      host_write[ix*lj_types+iy].w = (numtyp)0.0;
  }
  ucl_copy(coeff2,host_write,false);

  // Allocate tablength arrays
  UCL_H_Vec<numtyp4> host_write2(_ntables*_tablength,*(this->ucl_device),
                                 UCL_WRITE_ONLY);
  for (int i=0; i<_ntables*_tablength; i++) {
    host_write2[i].x = 0.0;
    host_write2[i].y = 0.0;
    host_write2[i].z = 0.0;
    host_write2[i].w = 0.0;
  }

  coeff3.alloc(_ntables*_tablength,*(this->ucl_device),UCL_READ_ONLY);
  for (int n=0; n<_ntables; n++) {
    if (tabstyle == LOOKUP) {
      for (int k=0; k<_tablength-1; k++) {
          host_write2[n*_tablength+k].x = (numtyp)0;
          host_write2[n*_tablength+k].y = host_table_data[n][6*k+1]; // e
          host_write2[n*_tablength+k].z = host_table_data[n][6*k+2]; // f
          host_write2[n*_tablength+k].w = (numtyp)0;
      }
    } else if (tabstyle == LINEAR || tabstyle == SPLINE || tabstyle == BITMAP) {
      for (int k=0; k<_tablength; k++) {
          host_write2[n*_tablength+k].x = host_table_data[n][6*k+0]; // rsq
          host_write2[n*_tablength+k].y = host_table_data[n][6*k+1]; // e
          host_write2[n*_tablength+k].z = host_table_data[n][6*k+2]; // f
          host_write2[n*_tablength+k].w = (numtyp)0;
      }
    }
  }
  ucl_copy(coeff3,host_write2,false);

  coeff4.alloc(_ntables*_tablength,*(this->ucl_device),UCL_READ_ONLY);
  for (int i=0; i<_ntables*_tablength; i++) {
    host_write2[i].x = 0.0;
    host_write2[i].y = 0.0;
    host_write2[i].z = 0.0;
    host_write2[i].w = 0.0;
  }

  for (int n=0; n<_ntables; n++) {
    if (tabstyle == LINEAR) {
      for (int k=0; k<_tablength-1; k++) {
        host_write2[n*_tablength+k].x = (numtyp)0;
        host_write2[n*_tablength+k].y = host_table_data[n][6*k+3]; // de
        host_write2[n*_tablength+k].z = host_table_data[n][6*k+4]; // df
        host_write2[n*_tablength+k].w = (numtyp)0;
      }
    } else if (tabstyle == SPLINE) {
      for (int k=0; k<_tablength; k++) {
        host_write2[n*_tablength+k].x = (numtyp)0;
        host_write2[n*_tablength+k].y = host_table_data[n][6*k+3]; // e2
        host_write2[n*_tablength+k].z = host_table_data[n][6*k+4]; // f2
        host_write2[n*_tablength+k].w = (numtyp)0;
      }
    } else if (tabstyle == BITMAP) {
      for (int k=0; k<_tablength; k++) {
        host_write2[n*_tablength+k].x = (numtyp)0;
        host_write2[n*_tablength+k].y = host_table_data[n][6*k+3]; // de
        host_write2[n*_tablength+k].z = host_table_data[n][6*k+4]; // df
        host_write2[n*_tablength+k].w = host_table_data[n][6*k+5]; // drsq
      }
    }
  }
  ucl_copy(coeff4,host_write2,false);

  UCL_H_Vec<numtyp> host_rsq(lj_types*lj_types,*(this->ucl_device),
                             UCL_WRITE_ONLY);
  cutsq.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
  this->atom->type_pack1(ntypes,lj_types,cutsq,host_rsq,host_cutsq);

  UCL_H_Vec<double> dview;
  sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
  dview.view(host_special_lj,4,*(this->ucl_device));
  ucl_copy(sp_lj,dview,false);

  _allocated=true;
  this->_max_bytes=tabindex.row_bytes()+nshiftbits.row_bytes()
    +nmask.row_bytes()+coeff2.row_bytes()
    +coeff3.row_bytes()+coeff4.row_bytes()+cutsq.row_bytes()
    +sp_lj.row_bytes();
  return 0;
}