Exemplo n.º 1
0
int colvarproxy_namd::smp_biases_loop()
{
  colvarmodule *cv = this->colvars;
  CkLoop_Parallelize(calc_cv_biases_smp, 1, this,
                     cv->biases_active()->size(), 0, cv->biases_active()->size()-1);
  return cvm::get_error();
}
Exemplo n.º 2
0
int colvarproxy_namd::smp_biases_script_loop()
{
  colvarmodule *cv = this->colvars;
  CkLoop_Parallelize(calc_cv_biases_smp, 1, this,
                     cv->biases_active()->size(), 0, cv->biases_active()->size()-1,
                     1, NULL, CKLOOP_NONE,
                     calc_cv_scripted_forces, 1, this);
  return cvm::get_error();
}
Exemplo n.º 3
0
//Each node calculates its own ftilde
void FVectorCache::computeFTilde(complex *fs_in){

  // Create the FComputePacket for this set of f vectors and start CkLoop
  f_packet.size = ndata;
  f_packet.fs = fs_in;
  
  
#ifdef USE_CKLOOP
  CkLoop_Parallelize(fTildeWorkUnit, 1, &f_packet, n_list_size, 0, n_list_size - 1);
#else
    fTildeWorkUnit(0,0,NULL,1,&f_packet);
#endif
}
Exemplo n.º 4
0
double PmeKSpace::compute_energy_orthogonal_helper(float *q_arr, const Lattice &lattice, double ewald, double *virial) {
  double energy = 0.0;
  double v0 = 0.;
  double v1 = 0.;
  double v2 = 0.;
  double v3 = 0.;
  double v4 = 0.;
  double v5 = 0.;

  int n;
  int k1, k2, k3, ind;
  int K1, K2, K3;

  K1=myGrid.K1; K2=myGrid.K2; K3=myGrid.K3;

  i_pi_volume = 1.0/(M_PI * lattice.volume());
  piob = M_PI/ewald;
  piob *= piob;


    double recipx = lattice.a_r().x;
    double recipy = lattice.b_r().y;
    double recipz = lattice.c_r().z;
        
    init_exp(exp1, K1, 0, K1, recipx);
    init_exp(exp2, K2, k2_start, k2_end, recipy);
    init_exp(exp3, K3, k3_start, k3_end, recipz);

    double recips[] = {recipx, recipy, recipz};
    const int NPARTS=CmiMyNodeSize(); //this controls the granularity of loop parallelism
    ALLOCA(double, partialEnergy, NPARTS);
    ALLOCA(double, partialVirial, 6*NPARTS);
    int unitDist[] = {K1/NPARTS, K1%NPARTS};
    
    //parallelize the following loop using CkLoop
    void *params[] = {this, q_arr, recips, partialEnergy, partialVirial, unitDist};

#if     USE_CKLOOP
    CProxy_FuncCkLoop ckLoop = CkpvAccess(BOCclass_group).ckLoop;
    CkLoop_Parallelize(ckLoop, compute_energy_orthogonal_ckloop, 6, (void *)params, NPARTS, 0, NPARTS-1);
#endif
/*  
    //The transformed loop used to compute energy
    int unit = K1/NPARTS;
    int remains = K1%NPARTS;  
    for(int i=0; i<NPARTS; i++){
        int k1from, k1to;
        if(i<remains){
            k1from = i*(unit+1);
            k1to = k1from+unit;
        }else{
            k1from = remains*(unit+1)+(i-remains)*unit;
            k1to = k1from+unit-1;
        }
        double *pEnergy = partialEnergy+i;
        double *pVirial = partialVirial+i*6;
        compute_energy_orthogonal_subset(q_arr, recips, pVirial, pEnergy, k1from, k1to);
    }
*/    
    
    for(int i=0; i<NPARTS; i++){
        v0 += partialVirial[i*6+0];
        v1 += partialVirial[i*6+1];
        v2 += partialVirial[i*6+2];
        v3 += partialVirial[i*6+3];
        v4 += partialVirial[i*6+4];
        v5 += partialVirial[i*6+5];
        energy += partialEnergy[i];
    }
    
    virial[0] = v0;
    virial[1] = v1;
    virial[2] = v2;
    virial[3] = v3;
    virial[4] = v4;
    virial[5] = v5;
    return energy;
}
Exemplo n.º 5
0
// Receive an unoccupied psi, and split off the computation of all associated f
// vectors across the node using CkLoop.
void PsiCache::computeFs(PsiMessage* msg) {
  double start = CmiWallTimer();

  if (msg->spin_index != 0) {
    CkAbort("Error: We don't support multiple spins yet!\n");
  }
  CkAssert(msg->size == psi_size);

  // Compute ikq index and the associated umklapp factor
  // TODO: This should just be a table lookup
  unsigned ikq;
  int umklapp[3];
  kqIndex(msg->k_index, ikq, umklapp);

  bool uproc = false;
  if (umklapp[0] != 0 || umklapp[1] != 0 || umklapp[2] != 0) {
    uproc = true;
    computeUmklappFactor(umklapp);
  }

  GWBSE* gwbse = GWBSE::get();
  double*** e_occ = gwbse->gw_epsilon.Eocc;
  double*** e_occ_shifted = gwbse->gw_epsilon.Eocc_shifted;
  double*** e_unocc = gwbse->gw_epsilon.Eunocc;

  // Create the FComputePacket for this set of f vectors and start CkLoop
  f_packet.size = psi_size;
  f_packet.unocc_psi = msg->psi;

  if ( qindex == 0 ) { 
    f_packet.occ_psis = psis_shifted[ikq]; 
    f_packet.e_occ = e_occ_shifted[msg->spin_index][ikq];
  }
  else { 
    f_packet.occ_psis = psis[ikq];
    f_packet.e_occ = e_occ[msg->spin_index][ikq]; 
  }
  f_packet.e_unocc = e_unocc[msg->spin_index][msg->k_index][msg->state_index-L];
  f_packet.fs = fs + (L*psi_size*(received_chunks%pipeline_stages));

  if (uproc) { f_packet.umklapp_factor = umklapp_factor; }
  else { f_packet.umklapp_factor = NULL; }

#ifdef USE_CKLOOP
  CkLoop_Parallelize(computeF, 1, &f_packet, L, 0, L - 1);
#else
  for (int l = 0; l < L; l++) {
    computeF(l,l,NULL,1,&f_packet);
  }
#endif
  received_chunks++;


#ifdef TESTING
{
  FVectorCache *fvec_cache = fvector_cache_proxy.ckLocalBranch();
  fvec_cache->computeFTilde(fs);
//  fvec_cache->applyCutoff(msg->accept_size, msg->accept);
//  fvec_cache->init(140);
//compute ftilde first - similar to ckloop above for all L's
  fvec_cache->putFVec(msg->state_index-L, fs);
}
#endif

  // Let the matrix chares know that the f vectors are ready
  CkCallback cb(CkReductionTarget(PMatrix, applyFs), pmatrix2D_proxy);
  contribute(cb);

  // Cleanup
  delete msg;
  total_time += CmiWallTimer() - start;
}