int main(int argc, char **argv)
{
  const long int ITER_CNT = 100;
  const long int MAX_MSG_SIZE = 1048576;
  int* source_addr;
  int peer;
  long int i=0,j=0, buff_size; 
  long long int start_time, stop_time, res;
  double time;

  shmem_init();

  int pe_id = shmem_my_pe();
  source_addr = (int*) malloc(MAX_MSG_SIZE);

  if(pe_id == 1) {
      if(shmem_n_pes()!=4)
      	fprintf(stderr,"Num PEs should be ==4");
      printf("#Message Cnt;Time(s);MR(msgs/sec)\n");
  }

  if (pe_id==1)
	  peer = 3;
  else if(pe_id==3)
	  peer = 1;
  get_rtc_res_(&res);

  for (i = 0; i < SHMEM_BARRIER_SYNC_SIZE; i += 1){
          pSync[i] = SHMEM_SYNC_VALUE;
  }

  /* Collective operation: Implicit barrier on return from attach */
  shmemx_am_attach(HANDLER_ID_REQ, &sample_req_handler);
  shmem_barrier_all();
  if(pe_id == 1 || pe_id == 3) {

  	for(buff_size=1; buff_size<=MAX_MSG_SIZE; buff_size*=2) {
  	    shmem_barrier(1,1,2,pSync);
  	    get_rtc_(&start_time);
  	    for(j=1;j<=ITER_CNT;j++) {
  	        if(pe_id == 1) {
  	    	    shmemx_am_request(peer, HANDLER_ID_REQ, source_addr, buff_size);
  	            shmemx_am_quiet();
  	        }
  	    }
  	    shmem_barrier(1,1,2,pSync);
  	    get_rtc_(&stop_time);
  	    time = (stop_time - start_time)*1.0/(double)res/ITER_CNT;
  	    if(pe_id == 1) {
  	   	 printf("%20ld;%20.12f;%20.12f\n", 
  	                buff_size, time, (double)buff_size/time);
  	    }
  	    fflush(stdout);
  	}
  }

  shmem_barrier_all();
  shmem_finalize();

}
Exemplo n.º 2
0
static void
test_one_way(void)
{
    int i, k;
    int pe_size  = world_size;

    tmp = 0;
    total = 0;

    shmem_barrier_all();

    if (world_size % 2 == 1) {
        pe_size = world_size - 1;
    }

    if (!(world_size % 2 == 1 && rank == (world_size - 1))) {
        if (rank < world_size / 2) {
            for (i = 0 ; i < niters ; ++i) {
                cache_invalidate();

                shmem_barrier(0, 0, pe_size, barrier_pSync);

                tmp = timer();
                for (k = 0 ; k < nmsgs ; ++k) {
                    shmem_putmem(recv_buf + (nbytes * k), 
                                 send_buf + (nbytes * k), 
                                 nbytes, rank + (world_size / 2));
                }
                shmem_quiet();
                total += (timer() - tmp);
            }
        } else {
            for (i = 0 ; i < niters ; ++i) {
                cache_invalidate();

                shmem_barrier(0, 0, pe_size, barrier_pSync);

                tmp = timer();
                shmem_short_wait((short*) (recv_buf + (nbytes * (nmsgs - 1))), 0);
                total += (timer() - tmp);
                memset(recv_buf, 0, npeers * nmsgs * nbytes);
            }
        }

        shmem_double_sum_to_all(&tmp, &total, 1, 0, 0, pe_size, reduce_pWrk, reduce_pSync);
        display_result("single direction", (niters * nmsgs) / (tmp / world_size));
    }

    shmem_barrier_all();
}
Exemplo n.º 3
0
/*inline*/ void globalSum_double(LSMSCommunication &comm,double &a, int n)
{
  shmem_barrier(comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size,pSync1);
  static double r_d;  
  r_d=a;
  shmem_double_sum_to_all(&a, &r_d, n,comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size, pWrk_d, pSync2);
}
Exemplo n.º 4
0
/*inline*/ void globalSum_int(LSMSCommunication &comm,int &a)
{
  shmem_barrier(comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size,pSync1);
  static int r_i;  
  r_i=a;
  shmem_int_sum_to_all(&a, &r_i, 1,comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size, pWrk_i, pSync2);
}
Exemplo n.º 5
0
/*inline*/ void globalMax_double(LSMSCommunication &comm,double &a)
{
  shmem_barrier(comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size,pSync1);
  static double r_d;  
  r_d=a;
  shmem_double_max_to_all(&(a), &r_d, 1,comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size, pWrk_d, pSync1);
}
Exemplo n.º 6
0
/*inline*/ void globalSum_real(LSMSCommunication &comm,double *a, int n)
{
  shmem_barrier(comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size,pSync1);
  double* r_d = (double*)shmalloc(n*sizeof(double));  
  memcpy(r_d,a,n*sizeof(double));
  shmem_double_sum_to_all(a, r_d, n,comm.comm.start_pe, comm.comm.logPE_stride, comm.comm.size, pWrk_d, pSync2);
  shfree(r_d);
}
Exemplo n.º 7
0
int
main ()
{
    int me, npes, src;
    int i;
    struct timeval start, end;
    long time_taken, start_time, end_time;

    for (i = 0; i < _SHMEM_BCAST_SYNC_SIZE; i += 1) {
        pSync[i] = _SHMEM_SYNC_VALUE;
    }

    shmem_init ();
    me = shmem_my_pe ();
    npes = shmem_n_pes ();
    src = me - 1;
    time_taken = 0;

    for (i = 0; i < 10000; i++) {
        if (me != 0) {
            shmem_int_p (&x, src * (i + 1), me - 1);
        }
        else {
            shmem_int_p (&x, src * (i + 1), npes - 1);
        }
        shmem_barrier_all ();

        gettimeofday (&start, NULL);
        start_time = (start.tv_sec * 1000000.0) + start.tv_usec;

        shmem_barrier (0, 0, npes, pSync);

        gettimeofday (&end, NULL);
        end_time = (end.tv_sec * 1000000.0) + end.tv_usec;
        time_taken = time_taken + (end_time - start_time);

    }
    /* printf("%d: x = %d\n", me, x); */
    if (me == 0) {
        printf
            ("Time required for a barrier, with %d PEs is %ld microseconds\n",
             npes, time_taken / 10000);
    }

    shmem_finalize ();

    return 0;
}
Exemplo n.º 8
0
int recurse(int i, int ilog, int pes, int j, int nelems) {
    int next_i,next_ilog,next_pes,k;
    if(ilog <= npes) {
        if(me % ilog ==0) {

            if(me == 4) printf("\n");
            //		printf("\nDEBUG%d --> %d", me+i, me);
            shmem_int_get(&A[nelems], A, nelems, me+i);

            /*	printf("\nDEBUG(%d)imported list: ",me);
            	for( k = 0; k < nelems; k++){
            		printf("%d ,",Aux[k]);
            	}
            	printf("\n");

            	for(k=0; k < nelems; k++){
            		A[nelems+k] = Aux[k];
            	}	*/

            merge (0,nelems-1, nelems*2-1);

            nelems = nelems *2;

            /*	printf("\nDEBUG(%d)new A: ",me);
                            for( k = 0; k < nelems; k++){
                                    printf("A[%d]= %d \n",k,A[k]);
                            }
                            printf("\n");*/



            next_pes = pes/2;
            next_i = 2*i;
            next_ilog = 2*ilog;
            shmem_barrier(0,j , next_pes, pSync);
            j = j+1;
            recurse(next_i, next_ilog, next_pes, j, nelems);
        }
    }
}
int main(void)
{
   int i, me, npes;

   for (i = 0; i < _SHMEM_BARRIER_SYNC_SIZE; i += 1){
      pSync[i] = _SHMEM_SYNC_VALUE;
   }

   start_pes(0);
   me = _my_pe();
   npes = _num_pes();

   if(me % 2 == 0){
      x = 1000 + me;
      /*put to next even PE in a circular fashion*/
      shmem_int_p(&x, 4, (me+2)%npes);
      /*synchronize all even pes*/
      shmem_barrier(0, 1, (npes/2 + npes%2), pSync);
   }
   printf("%d: x = %d\n", me, x);
   return 0;
}
Exemplo n.º 10
0
int
main()
{
    int me;
    int i;

    for (i = 0; i < SHMEM_BARRIER_SYNC_SIZE; i += 1) {
        pSync[i] = SHMEM_SYNC_VALUE;
    }

    shmem_init();
    me = shmem_my_pe();

    shmem_barrier_all();

    if (me == 0) {
        shmem_int_p(&x, 4, 1);
    }

    if (me == 2) {
        printf("Process %d going to sleep\n", me);
        sleep(3);
        printf("Process %d out from sleep\n", me);
    }

    printf("Process %d before barrier\n", me);
    if (me == 2 || me == 3) {
        shmem_barrier(2, 0, 2, pSync);
    }
    printf("Process %d after barrier\n", me);

    printf("%d: x = %d\n", me, x);

    shmem_finalize();

    return 0;
}
Exemplo n.º 11
0
int main(int argc, char *argv[])
{
  int size, rank, world_rank, my_group;
  int num_lsms; // number of parallel LSMS instances
  int size_lsms; // number of atoms in a lsms instance
  int num_steps; // number of energy calculations
  int initial_steps; // number of steps before sampling starts
  int stepCount=0; // count the Monte Carlo steps executed
  double max_time; // maximum walltime for this run in seconds
  bool restrict_time = false;       // was the maximum time specified?
  bool restrict_steps = false; // or the max. numer of steps?
  int align; // alignment of lsms_instances
  
  double magnetization;
  double energy_accumulator; // accumulates the enegy to calculate the mean
  int energies_accumulated;


  int new_peid,new_root;
  static int op,flag;
  double *evec,*r_values;
  evec=(double *)shmalloc(sizeof(double)*3*size_lsms);
  r_values=(double *)shmalloc(sizeof(double)*(R_VALUE_OFFSET+3*(size_lsms+1)));




  energy_accumulator=0.0;
  energies_accumulated=0;

  double walltime_0,walltime;

  double restartWriteFrequency=30.0*60.0;
  double nextWriteTime=restartWriteFrequency;

  MPI_Comm local_comm;
  int *lsms_rank0;
  MPI_Status status;

  char prefix[40];
  char i_lsms_name[64];
  char gWL_in_name[64], gWL_out_name[64];
  char mode_name[64];
  char energy_calculation_name[64];
  char stupid[37];

  char step_out_name[64];
  char wl_step_out_name[128];
  char *wl_stepf=NULL;
  bool step_out_flag=false;
  std::ofstream step_out_file;
  typedef enum {Constant, Random, WangLandau_1d, ExhaustiveIsing, WangLandau_2d} EvecGenerationMode;
  typedef enum {MagneticMoment, MagneticMomentZ, MagneticMomentX, MagneticMomentY} SecondDimension;

  EvecGenerationMode evec_generation_mode = Constant;
  SecondDimension second_dimension = MagneticMoment;
  double ev0[3];

  bool return_moments_flag=true; // true-> return all magnetic moments from lsms run at each step.
  bool generator_needs_moment=false;

  typedef enum {OneStepEnergy, MultiStepEnergy, ScfEnergy} EnergyCalculationMode;
  EnergyCalculationMode energyCalculationMode = OneStepEnergy;
  int energyIndex=1; // index for the return value to use for the MC step (0: total energy, 1: band energy)

  ev0[0]=ev0[1]=0.0; ev0[2]=1.0;
  // size has to be align + size_lsms*num_lsms
  align=1;
  num_lsms=1;
  size_lsms=-1;
  my_group=-1;
  num_steps=1;
  initial_steps=0;

  sprintf(i_lsms_name,"i_lsms");
  gWL_in_name[0]=gWL_out_name[0]=0;
  mode_name[0]=0;
  energy_calculation_name[0]=0;

  // check command line arguments
  for(int i=0; i<argc; i++)
  {
    if(!strcmp("-num_lsms",argv[i])) num_lsms=atoi(argv[++i]);
    if(!strcmp("-size_lsms",argv[i])) size_lsms=atoi(argv[++i]);
    if(!strcmp("-align",argv[i])) align=atoi(argv[++i]);
    if(!strcmp("-num_steps",argv[i])) {num_steps=atoi(argv[++i]); restrict_steps=true;}
    if(!strcmp("-initial_steps",argv[i])) initial_steps=atoi(argv[++i]); 
    if(!strcmp("-walltime",argv[i])) {max_time=60.0*atof(argv[++i]); restrict_time=true;}
    if(!strcmp("-i",argv[i])) strncpy(i_lsms_name,argv[++i],64);
    if(!strcmp("-random_dir",argv[i])) {evec_generation_mode = Random;}
    if(!strcmp("-step_out",argv[i]))
    {strncpy(step_out_name,argv[++i],64); step_out_flag=true;
      return_moments_flag=true;}
    if(!strcmp("-wl_out", argv[i])) strncpy(gWL_out_name,argv[++i],64);
    if(!strcmp("-wl_in", argv[i])) strncpy(gWL_in_name,argv[++i],64);
    if(!strcmp("-mode", argv[i])) strncpy(mode_name,argv[++i],64);
    if(!strcmp("-energy_calculation",argv[i])) strncpy(energy_calculation_name,argv[++i],64);
  }

  if(!(restrict_steps || restrict_time)) restrict_steps=true;

  if(mode_name[0]!=0)
  {
    if(!strcmp("constant",mode_name)) evec_generation_mode = Constant;
    if(!strcmp("random",mode_name)) evec_generation_mode = Random;
    if(!strcmp("1d",mode_name)) evec_generation_mode = WangLandau_1d;
    if(!strcmp("ising",mode_name)) evec_generation_mode = ExhaustiveIsing;
    if(!strcmp("2d",mode_name)) evec_generation_mode = WangLandau_2d;
    if(!strcmp("2d-m",mode_name)) {evec_generation_mode = WangLandau_2d; second_dimension=MagneticMoment;}
    if(!strcmp("2d-x",mode_name)) {evec_generation_mode = WangLandau_2d; second_dimension=MagneticMomentX;}
    if(!strcmp("2d-y",mode_name)) {evec_generation_mode = WangLandau_2d; second_dimension=MagneticMomentY;}
    if(!strcmp("2d-z",mode_name)) {evec_generation_mode = WangLandau_2d; second_dimension=MagneticMomentZ;}
  }

  if(energy_calculation_name[0]!=0)
  {
    if(energy_calculation_name[0]=='o') { energyCalculationMode = OneStepEnergy; energyIndex=1; }
    if(energy_calculation_name[0]=='m') { energyCalculationMode = MultiStepEnergy; energyIndex=1; }
    if(energy_calculation_name[0]=='s') { energyCalculationMode = ScfEnergy; energyIndex=0; }
  }

#ifdef USE_PAPI
#define NUM_PAPI_EVENTS 4
  int hw_counters = PAPI_num_counters();
  if(hw_counters>NUM_PAPI_EVENTS) hw_counters=NUM_PAPI_EVENTS;
  int papi_events[NUM_PAPI_EVENTS]; // = {PAPI_TOT_INS,PAPI_TOT_CYC,PAPI_FP_OPS,PAPI_VEC_INS};
  char *papi_event_name[] = {"PAPI_TOT_INS","PAPI_FP_OPS",
                             "RETIRED_SSE_OPERATIONS:DOUBLE_ADD_SUB_OPS:DOUBLE_MUL_OPS:DOUBLE_DIV_OPS:OP_TYPE",
                             "RETIRED_SSE_OPERATIONS:SINGLE_ADD_SUB_OPS:SINGLE_MUL_OPS:SINGLE_DIV_OPS:OP_TYPE"};
  // "RETIRED_INSTRUCTIONS",
  // "RETIRED_MMX_AND_FP_INSTRUCTIONS:PACKED_SSE_AND_SSE2",
  // "RETIRED_SSE_OPERATIONS:DOUBLE_ADD_SUB_OPS:DOUBLE_MUL_OPS:DOUBLE_DIV_OPS:1",
  // "RETIRED_SSE_OPERATIONS:SINGLE_ADD_SUB_OPS:SINGLE_MUL_OPS:SINGLE_DIV_OPS:1"
  // get events from names:
  for(int i=0; i<NUM_PAPI_EVENTS; i++)
  {
    if(PAPI_event_name_to_code(papi_event_name[i],&papi_events[i]) != PAPI_OK)
    {
      // printline("Error in obtaining PAPI event code for: "+ttos(papi_event_name[i]),
      //           std::cerr,parameters.myrankWorld);
      // printline("Skipping all following events",
      //           std::cerr,parameters.myrankWorld);
      if(hw_counters>i) hw_counters=i;
    }
  }
  long long papi_values[NUM_PAPI_EVENTS+4];
  // printline("PAPI: "+ttos(hw_counters)+" counters available",std::cout,parameters.myrankWorld);
  if(hw_counters>NUM_PAPI_EVENTS) hw_counters=NUM_PAPI_EVENTS;
  long long papi_real_cyc_0 = PAPI_get_real_cyc();
  long long papi_real_usec_0 = PAPI_get_real_usec();
  long long papi_virt_cyc_0 = PAPI_get_virt_cyc();
  long long papi_virt_usec_0 = PAPI_get_virt_usec();
  PAPI_start_counters(papi_events,hw_counters);
#endif


  lsms_rank0=(int *)malloc(sizeof(int)*(num_lsms+1));

  // initialize MPI:
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  world_rank=rank;
  MPI_Comm_size(MPI_COMM_WORLD, &size);

  walltime_0 = get_rtc();

#ifndef SVN_REV
#define SVN_REV "unknown"
#endif

// make sure 'return_moments_flag' is set correctly
  switch(evec_generation_mode)
  {
  case Constant : break;
  case Random : break;
  case WangLandau_1d :
    return_moments_flag = true;
    generator_needs_moment = true;
    break;
  case ExhaustiveIsing : break;
  case WangLandau_2d :
    return_moments_flag = true;
    generator_needs_moment = true;
    break;
  default: std::cout<<" ERROR: UNKNOWN EVEC GENERATION MODE\n"; exit(1);
  }

  if(rank==0)
  {
    std::cout<<"LSMS_3"<<std::endl;
    std::cout<<" SVN revision "<<SVN_REV<<std::endl<<std::endl;
#ifdef USE_PAPI
    std::cout<<" Using Papi counters"<<std::endl<<std::endl; 
#endif
    std::cout<<" Size of LSMS instances = "<<size_lsms<<" atoms\n";
    std::cout<<" Number of LSMS instances = "<<num_lsms<<std::endl;
    std::cout<<" LSMS Energy calculated using ";
    switch(energyCalculationMode)
    {
    case OneStepEnergy: std::cout<<"oneStepEnergy [frozen potential band energy]"<<std::endl; break;
    case MultiStepEnergy: std::cout<<"multiStepEnergy [frozen potential band energy with converged Fermi energy]"<<std::endl; break;
    case ScfEnergy: std::cout<<"scfEnergy [self-consistent total energy]"<<std::endl; break;
    default: std::cout<<"UNKNOWN ENERGY CALCULATION METHOD"<<std::endl; exit(1);
    }
    if(restrict_steps) std::cout<<" Number of gWL steps = "<<num_steps<<std::endl;
    if(restrict_time) std::cout<<" Maximum walltime = "<<max_time<<"s\n";
    std::cout<<" Processor alignment (process allocation quantization) = "<<align<<std::endl;
    switch(evec_generation_mode)
    {
    case Constant : std::cout<<" Constant moments direction along "
                             <<ev0[0]<<" "<<ev0[1]<<" "<<ev0[2]<<std::endl;
      break;
    case Random : std::cout<<" Random distribution of moments (no Wang-Landau)"<<std::endl;
      break;
    case WangLandau_1d : std::cout<<" Wang-Landau for one continuous variable (energy)"<<std::endl;
//      return_moments_flag = true;
//      generator_needs_moment = true;
      break;
    case ExhaustiveIsing : std::cout<<" Exhaustive Ising sampling"<<std::endl; break;
    case WangLandau_2d : std::cout<<" Wang-Landau for two continuous variable (energy, ";
      switch(second_dimension)
      {
      case MagneticMoment  : std::cout<<"magnitude of magnetization)"; break;
      case MagneticMomentX : std::cout<<"x component of magnetization)"; break;
      case MagneticMomentY : std::cout<<"y component of magnetization)"; break;
      case MagneticMomentZ : std::cout<<"z component of magnetization)"; break;
      }
      std::cout<<std::endl;
//      return_moments_flag = true;
//      generator_needs_moment = true;
      break;
    default: std::cout<<" ERROR: UNKNOWN EVEC GENERATION MODE\n"; exit(1);
    }
    if(step_out_flag) std::cout<<" Step output written to: "<<step_out_name<<std::endl;
    std::cout<<std::endl;

    if(step_out_flag && (evec_generation_mode==WangLandau_1d))
    {
      // step_out_flag=false;
      snprintf(wl_step_out_name,127,"wl1d_%s",step_out_name);
      wl_stepf=wl_step_out_name;
    }

    if(step_out_flag)
    {
      step_out_file.open(step_out_name);
      step_out_file<<"#";
      for(int i=0; i<argc; i++) step_out_file<<" "<<argv[i];
      step_out_file<<std::endl<<size_lsms<<std::endl;
    }
  }

  if(generator_needs_moment) return_moments_flag=true;

  if(num_lsms==1)
  {
    SHMEM_activeset local_comm;
    local_comm.rank=shmem_my_pe();
    local_comm.size=shmem_n_pes();
    local_comm.start_pe=0;
    local_comm.logPE_stride=0;
    LSMS lsms_calc(local_comm,i_lsms_name,"1_");
      
    if(rank==0)
    {
      std::cout<<"executing LSMS(C++) for "<<lsms_calc.numSpins()<<" atoms\n";
      std::cout<<"  LSMS version = "<<lsms_calc.version()<<std::endl;
    }

    if(energyCalculationMode==OneStepEnergy)
      std::cout<<"one step Energy = "<<lsms_calc.oneStepEnergy()<<std::endl;
    else if(energyCalculationMode==MultiStepEnergy)
      std::cout<<"multi-step Energy = "<<lsms_calc.multiStepEnergy()<<std::endl;
    else if(energyCalculationMode==ScfEnergy)
      std::cout<<"self-consistent Energy = "<<lsms_calc.scfEnergy()<<std::endl;
    else
    {
      printf("ERROR: Unknown energy calculation mode for lsms_calc in wl-lsms main!\n");
     // MPI_Abort(MPI_COMM_WORLD,5);
      exit(5);
    }
  }
  else
  {
    // build the communicators
    //int color=MPI_UNDEFINED;
    //Assuming user passes a power of two while using "-align"
    int s = align;
    int comm_size=(size-align)/num_lsms;
    int world_rank;
    for(int i=0; i<num_lsms; i++)
    {
      if((world_rank>=s) && (world_rank<s+comm_size)) 
      { 
        my_group=i; 
        //color=i; 
        new_peid=world_rank-s;
        new_root=s;
      }
      lsms_rank0[i]=s;
      s+=comm_size;
    }
    if(world_rank==0){ 
      //color=num_lsms;
      new_peid=0;
      comm_size=1;
      new_root=0;
    }

    //MPI_Comm_split(MPI_COMM_WORLD, color, 0, &local_comm);
    SHMEM_activeset local_comm;
    local_comm.rank=new_peid;
    local_comm.size=comm_size;
    local_comm.start_pe=new_root;
    local_comm.logPE_stride=0;

    std::cout<<"world_rank="<<world_rank<<" -> group="<<my_group<<std::endl;

      
    snprintf(prefix,38,"Group %4d: ",my_group);

    // now we get ready to do some calculations...

    if(my_group>=0)
    {
      double energy;
      double band_energy;
      int static i_values[10];
      double static r_values[10];
      static int op;


      //MPI_Comm_rank(local_comm, &rank);
      rank = local_comm.rank;
      snprintf(prefix,38,"%d_",my_group);
      // to use the ramdisk on jaguarpf:
      // snprintf(prefix,38,"/tmp/ompi/%d_",my_group);
      LSMS lsms_calc(local_comm,i_lsms_name,prefix);
      snprintf(prefix,38,"Group %4d: ",my_group);

      if(rank==0 && my_group==0)
      {
        std::cout<<prefix<<"executing LSMS(C++) for "<<lsms_calc.numSpins()<<" atoms\n";
        std::cout<<prefix<<"  LSMS version = "<<lsms_calc.version()<<std::endl;
      }

      // wait for commands from master
      bool finished=false;
      while(!finished)
      {
        if(rank==0)
        {
          //MPI_Recv(evec,3*size_lsms,MPI_DOUBLE,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
          //op =status.MPI_TAG;
          if (lsms_rank0[0]==world_rank)
                shmem_barrier(0, lsms_rank0[0], 2, pSync1);

        }
        //MPI_Bcast(&op,1,MPI_INT,0,local_comm);
        shmem_broadcast32(&op, &op, 1, local_comm.start_pe, local_comm.start_pe, local_comm.logPE_stride, local_comm.size, pSync2); 

/* recognized opcodes:
   5: calculate energy

   recognized energy calculation modes:
   OneStepEnergy : calclulate frozen potential band energy in one step (don't converge Ef)
   use only if the Fermi energy will not change due to MC steps!
   The only method available in LSMS_1.9
   MultiStepEnergy : calculate frozen potential band energy after converging Fermi energy
   This should be the new default method. If the Fermi energy doesn't change
   multiStepEnergy only performs one step and should be equivalent to oneStepEnergy
   The tolerance for Ef convergence can be set with LSMS::setEfTol(Real).
   The default tolerance is set in the LSMS::LSMS constructor (currently 1.0e-6).
   The maximum number of steps is read from the LSMS input file 'nscf' parameter.
   ScfEnergy : this will calculate the selfconsistent total energy.
   The maximum number of steps is read from the LSMS input file 'nscf' parameter.
   NOT IMPLEMENTED YET!!!

   10: get number of sites
*/

        if(op==5)
        {
          lsms_calc.setEvec(evec);
          if(energyCalculationMode==OneStepEnergy)
            energy=lsms_calc.oneStepEnergy(&band_energy);
          else if(energyCalculationMode==MultiStepEnergy)
            band_energy=energy=lsms_calc.multiStepEnergy();
          else if(energyCalculationMode==ScfEnergy)
            energy=lsms_calc.scfEnergy(&band_energy);
          else
          {
            printf("ERROR: Unknown energy calculation mode for lsms_calc in wl-lsms main!\n");
            //MPI_Abort(MPI_COMM_WORLD,5);
            exit(5);
          }
          r_values[0]=energy;
          r_values[1]=band_energy;
          if(return_moments_flag)
          {
            lsms_calc.getMag(&r_values[R_VALUE_OFFSET]);
          }
          if(rank==0)
          {
            if(return_moments_flag)
            {
              //MPI_Send(r_values,R_VALUE_OFFSET+3*size_lsms,MPI_DOUBLE,0,1005,MPI_COMM_WORLD);
              shmem_double_put(r_values, r_values, R_VALUE_OFFSET+3*size_lsms, 0);

            } else {
              //MPI_Send(r_values,R_VALUE_OFFSET,MPI_DOUBLE,0,1005,MPI_COMM_WORLD);
              shmem_double_put(r_values, r_values, R_VALUE_OFFSET, 0);
            }
            shmem_fence();
            shmem_int_swap(&flag, world_rank, 0);

          }
              
        } else if(op==10) {
          i_values[0]=lsms_calc.numSpins();
          //MPI_Send(i_values,10,MPI_INT,0,1010,MPI_COMM_WORLD);
          shmem_int_put(i_values, i_values, 10, 0);
        } else {
          // printf("world rank %d: recieved exit\n",world_rank); 
          finished=true;
        }
      }

      shfree(evec);
      //shfree(r_values);
    }
    else if(world_rank==0)
    {
      int running;
      double **evecs;
      //double *r_values;
      //int i_values[10];
      int *init_steps;
      int total_init_steps;
      bool accepted;
        
      char *wl_inf=NULL;
      char *wl_outf=NULL;
      if(gWL_in_name) wl_inf=gWL_in_name;
      if(gWL_out_name) wl_outf=gWL_out_name;
        
      EvecGenerator *generator;

/*
      // get number of spins from first LSMS instance
      // temp r_values:
      r_values=(double *)malloc(sizeof(double)*10);
      MPI_Send(r_values,1,MPI_DOUBLE, lsms_rank0[0], 10, MPI_COMM_WORLD);
      free(r_values);
      MPI_Recv(i_values,10,MPI_INT,lsms_rank0[0],1010,MPI_COMM_WORLD,&status);
      if(i_values[0]!=size_lsms)
      {
        printf("Size specified for Wang-Landau and in LSMS input file don't match!\n");
        size_lsms=i_values[0];
      }
*/

      evecs=(double **)shmalloc(sizeof(double *)*num_lsms);
      init_steps=(int *)shmalloc(sizeof(int)*num_lsms);
      for(int i=0; i<num_lsms; i++)
      {
        evecs[i]=(double *)shmalloc(sizeof(double)*3*size_lsms);
        init_steps[i]=initial_steps;
      }
      total_init_steps=num_lsms*initial_steps;
        

      // Initialize the correct evec generator
      switch(evec_generation_mode)
      {
      case Random :  generator = new RandomEvecGenerator(size_lsms);
        break;
      case Constant: generator = new ConstantEvecGenerator(size_lsms, ev0, num_lsms);
        break;
     //case WangLandau_1d : generator = new WL1dEvecGenerator<std::mt19937>(size_lsms, num_lsms,
     //                                                                      evecs, wl_inf, wl_outf, wl_stepf);
     case WangLandau_1d : generator = new WL1dEvecGenerator<boost::mt19937>(size_lsms, num_lsms,
                                                                           evecs, wl_inf, wl_outf, wl_stepf);
        break;
      case ExhaustiveIsing : generator = new ExhaustiveIsing1dEvecGenerator(size_lsms, num_lsms,
                                                                            evecs, wl_inf, wl_outf);
        break;
      //case WangLandau_2d : generator = new WL2dEvecGenerator<std::mt19937>(size_lsms, num_lsms,
      //                                                                     evecs, wl_inf, wl_outf, wl_stepf);
      case WangLandau_2d : generator = new WL2dEvecGenerator<boost::mt19937>(size_lsms, num_lsms,
                                                                           evecs, wl_inf, wl_outf, wl_stepf);
        break;
      default: std::cerr<<"The code should never arrive here: UNKNOWN EVEC GENERATION MODE\n";
        exit(1);
      }

      for(int i=0; i<num_lsms; i++)
      {
        generator->initializeEvec(i,evecs[i]);
      }
      std::cout<<"This is the master node\n";
      // issue initial commands to all LSMS instances
      running=0;
      bool more_work=true;
      if(total_init_steps>0)
      {
        for(int i=0; i<num_lsms; i++)
        {
          std::cout<<"starting initial calculation in group "<<i<<std::endl;
          //MPI_Send(evecs[i], 3*size_lsms, MPI_DOUBLE, lsms_rank0[i], 5, MPI_COMM_WORLD);
          shmem_double_put(evec, evecs[i], 3*size_lsms, lsms_rank0[i]);
          shmem_int_p(&op, 5, lsms_rank0[i]);
          shmem_fence();


          num_steps--; running++; stepCount++;
          if(restrict_steps) std::cout<<"      "<<num_steps<<" steps remaining\n";
        }
        shmem_barrier(0, lsms_rank0[0], 2, pSync1);
        // first deal with the initial steps:
        while(running>0)
        {
          //if(return_moments_flag)
          //  MPI_Recv(r_values,R_VALUE_OFFSET+3*size_lsms,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
          //else
          //  MPI_Recv(r_values,R_VALUE_OFFSET,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
          
          shmem_int_wait(&flag,-1);

          running--;
          // std::cout<<"received energy E_tot ="<<r_values[0]<<std::endl;
          // std::cout<<"    band energy E_band="<<r_values[1]<<std::endl;
          if(total_init_steps>0)
          {
            //int r_group=(status.MPI_SOURCE-align)/comm_size;
            int r_group=(flag-align)/comm_size;
            std::cout<<"starting additional calculation in group "<<r_group<<std::endl;

            if(init_steps[r_group]>0)
            {
              more_work = !(generator->generateUnsampledEvec(r_group,evecs[r_group],r_values[energyIndex]));
              init_steps[r_group]--; total_init_steps--;
            }
                
            //MPI_Send(evecs[r_group], 3*size_lsms, MPI_DOUBLE, lsms_rank0[r_group], 5, MPI_COMM_WORLD);
            shmem_double_put(r_values, evecs[r_group],  3*size_lsms, lsms_rank0[r_group]); //TODO check this
            shmem_fence();
                
            num_steps--; running++; stepCount++;
            if(restrict_steps && num_steps<=0) more_work=false;
            if(restrict_steps) std::cout<<"      "<<num_steps<<" steps remaining\n";
            walltime = get_rtc() - walltime_0;
            if(restrict_time && walltime>=max_time) more_work=false;
            if(restrict_time) std::cout<<"      "<<max_time-walltime<<" seconds remaining\n";
          }
              
        }
      }
      more_work=true;
      running=0;
      for(int i=0; i<num_lsms; i++)
      {
        std::cout<<"starting main calculation in group "<<i<<std::endl;
        //MPI_Send(evecs[i], 3*size_lsms, MPI_DOUBLE, lsms_rank0[i], 5, MPI_COMM_WORLD);
        shmem_double_put(evec, evecs[i], 3*size_lsms, lsms_rank0[i]);
        shmem_int_p(&op, 5, lsms_rank0[i]);
        shmem_fence();
        num_steps--; running++; stepCount++;
        if(restrict_steps) std::cout<<"      "<<num_steps<<" steps remaining\n";
      }
      shmem_barrier(0, lsms_rank0[0], 2, pSync1);
        
      generator->startSampling();
      // wait for results and issue new commands or wind down
      while(running>0)
      {
        //MPI_Recv(r_values,R_VALUE_OFFSET+3*size_lsms,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
        shmem_int_wait(&flag,-1);

        running--;
        std::cout<<"received energy E_tot ="<<r_values[0]<<std::endl;
        std::cout<<"    band energy E_band="<<r_values[1]<<std::endl;
        // printf("from status.MPI_SOURCE=%d\n",status.MPI_SOURCE);
        energy_accumulator+=r_values[0]; energies_accumulated++;
        if(more_work)
        {
          int r_group=(status.MPI_SOURCE-align)/comm_size;
          std::cout<<"starting additional calculation in group "<<r_group<<std::endl;
              
          if(generator_needs_moment)
          {
            double m0,m1,m2;
            m0=0.0; m1=0.0; m2=0.0;
            for(int i=0; i<3*size_lsms; i+=3)
            {
              m0+=r_values[R_VALUE_OFFSET+i];
              m1+=r_values[R_VALUE_OFFSET+i+1];
              m2+=r_values[R_VALUE_OFFSET+i+2];
            }
            switch(second_dimension)
            {
            case  MagneticMoment : magnetization=std::sqrt(m0*m0+m1*m1+m2*m2); break;
            case  MagneticMomentX : magnetization=m0; break;
            case  MagneticMomentY : magnetization=m1; break;
            case  MagneticMomentZ : magnetization=m2; break;
            }
            if(generator->generateEvec(r_group,evecs[r_group],r_values[energyIndex],magnetization, &accepted))
              more_work=false;
          } else {
            if(generator->generateEvec(r_group,evecs[r_group],r_values[energyIndex], &accepted)) more_work=false;
          }

          //MPI_Send(evecs[r_group], 3*size_lsms, MPI_DOUBLE, lsms_rank0[r_group], 5, MPI_COMM_WORLD);
          shmem_double_put(r_values, evecs[r_group],  3*size_lsms, lsms_rank0[r_group]); //TODO check this
          shmem_fence();

          num_steps--; running++; stepCount++;
          if(restrict_steps && num_steps<=0) more_work=false;
          if(restrict_steps) std::cout<<"      "<<num_steps<<" steps remaining\n";
          walltime = get_rtc() - walltime_0;
          if(restrict_time && walltime>=max_time) more_work=false;
          if(restrict_time) std::cout<<"      "<<max_time-walltime<<" seconds remaining\n";
        }
        else
        {
          // send an exit message to this instance of LSMS
          int r_group=(status.MPI_SOURCE-align)/comm_size;

          MPI_Send(evecs[r_group], 3*size_lsms, MPI_DOUBLE, lsms_rank0[r_group], 2, MPI_COMM_WORLD);
        }

        if(step_out_flag && accepted)
        {
          step_out_file<<"# iteration "<<energies_accumulated<<std::endl;
          step_out_file.precision(15);
          step_out_file<<energies_accumulated<<std::endl;
          step_out_file<<r_values[0]<<"  "<<r_values[1]<<std::endl;
          for(int j=0; j<3*size_lsms; j+=3)
          {
            step_out_file<<r_values[j+R_VALUE_OFFSET]<<"  "<<r_values[j+R_VALUE_OFFSET+1]
                         <<"  "<<r_values[j+R_VALUE_OFFSET+2]<<std::endl;
          }
        }
        // write restart file every restartWriteFrequency seconds
        if(walltime>nextWriteTime)
        {
          generator->writeState("WLrestart.jsn");
          nextWriteTime+=restartWriteFrequency;
        }

      }
      generator->writeState("WLrestart.jsn");
/*
  if(evec_generation_mode==WangLandau_1d)
  (static_cast<WL1dEvecGenerator<std::mt19937> *>(generator))->writeState("WLrestart.state");
  if(evec_generation_mode==ExhaustiveIsing)
  (static_cast<ExhaustiveIsing1dEvecGenerator *>(generator))->writeState("WLrestart.state");
*/
      for(int i=0; i<num_lsms; i++) free(evecs[i]);
      shfree(evecs);
      //shfree(r_values);
    }
  }

  if(world_rank==0)
  {
    if(step_out_flag)
    {
      step_out_file<<"# end\n-1\n"
                   <<energy_accumulator/double(energies_accumulated)<<std::endl;
      step_out_file.close();
    }
    std::cout<<"Finished all scheduled calculations. Freeing resources.\n";
    std::cout<<"Energy mean = "<<energy_accumulator/double(energies_accumulated)<<"Ry\n";
  }


  if(num_lsms>1)
  {
    // make sure averyone arrives here:
    MPI_Bcast(stupid,37,MPI_CHAR,0,MPI_COMM_WORLD);

    if(world_rank==0)
    {
      MPI_Comm_free(&local_comm);
    }
    else if(my_group>=0)
    {
      MPI_Comm_free(&local_comm);
    }
  }



  if(world_rank==0)
  {
    double walltime = get_rtc() - walltime_0;
    std::cout<<" WL-LSMS finished in "<<walltime<<" seconds.\n";
    std::cout<<" Monte-Carlo steps / walltime = "
             <<double(stepCount)/walltime<<"/sec\n";
  }

#ifdef USE_PAPI
  PAPI_stop_counters(papi_values,hw_counters);
  papi_values[hw_counters  ] = PAPI_get_real_cyc()-papi_real_cyc_0;
  papi_values[hw_counters+1] = PAPI_get_real_usec()-papi_real_usec_0;
  papi_values[hw_counters+2] = PAPI_get_virt_cyc()-papi_virt_cyc_0;
  papi_values[hw_counters+3] = PAPI_get_virt_usec()-papi_virt_usec_0;
  long long accumulated_counters[NUM_PAPI_EVENTS+4];
/*
  for(int i=0; i<hw_counters; i++)
  {
  printline(ttos(papi_event_name[i])+" = "+ttos(papi_values[i]),
  std::cout,parameters.myrankWorld);
  }
  printline("PAPI real cycles : "+ttos(papi_values[hw_counters]),
  std::cout,parameters.myrankWorld);
  printline("PAPI real usecs : "+ttos(papi_values[hw_counters+1]),
  std::cout,parameters.myrankWorld);
  printline("PAPI user cycles : "+ttos(papi_values[hw_counters+2]),
  std::cout,parameters.myrankWorld);
  printline("PAPI user usecs : "+ttos(papi_values[hw_counters+3]),
  std::cout,parameters.myrankWorld);
*/
  
  //MPI_Reduce(papi_values,accumulated_counters,hw_counters+4,
  //           MPI_LONG,MPI_SUM,0,MPI_COMM_WORLD);

  shmem_long_sum_to_all(accumulated_counters, papi_values, hw_counters+4,
      comm.pestart, comm.logPE_stride, comm.size, pWrk_i, pSync2);



  if(world_rank==0)
  {
    for(int i=0; i<hw_counters; i++)
    {
      std::cout<<"Accumulated: "<<(papi_event_name[i])<<" = "<<(accumulated_counters[i])<<"\n";
    }
    std::cout<<"PAPI accumulated real cycles : "<<(accumulated_counters[hw_counters])<<"\n";
    std::cout<<"PAPI accumulated user cycles : "<<(accumulated_counters[hw_counters+2])<<"\n";
    double gflops_papi = ((double)accumulated_counters[1])/
      (1000.0*(double)papi_values[hw_counters+1]);
    double gflops_hw_double = ((double)accumulated_counters[2])/
      (1000.0*(double)papi_values[hw_counters+1]);
    double gflops_hw_single = ((double)accumulated_counters[3])/
      (1000.0*(double)papi_values[hw_counters+1]);
    double gips = ((double)accumulated_counters[0])/(1000.0*(double)papi_values[hw_counters+1]);
    std::cout<<"PAPI_FP_OPS real GFLOP/s : "<<(gflops_papi)<<"\n";
    std::cout<<"PAPI hw double real GFLOP/s : "<<(gflops_hw_double)<<"\n";
    std::cout<<"PAPI hw single real GFLOP/s : "<<(gflops_hw_single)<<"\n";
    std::cout<<"PAPI real GINST/s : "<<(gips)<<"\n";
  }
#endif


  //MPI_Finalize();
  return 0;
}
Exemplo n.º 12
0
int
main (void)
{
  int i;
  int me, npes;
  long *pSync;
  double *A;

  /*
   * Kick the OpenSHMEM program off.  Discover program layout, and
   * prep. the sync work array for barrier
   */
  start_pes (0);
  me = shmem_my_pe ();
  npes = shmem_n_pes ();

  pSync = shmalloc (sizeof (*pSync) * _SHMEM_BARRIER_SYNC_SIZE);
  for (i = 0; i < _SHMEM_BARRIER_SYNC_SIZE; i += 1)
    {
      pSync[i] = _SHMEM_SYNC_VALUE;
    }
  shmem_barrier_all ();

  /*
   * Allocate array everywhere and initialize
   */
  A = shmalloc (sizeof (*A) * X * Y);
  init_array (A, X, Y, 0.0);

  /*
   * Use PEs 0 and 1 as sender and receiver
   */
  if (me == 0)
    {
      double *target = & A[OFFSET (0, 0, Y)];
      shmem_double_p (target, 3.142, 1);
    }

  /*
   * Pair-wise sync sender & receiver.  Other PEs do not enter
   * barrier
   */
  if ( (me == 0) || (me == 1) )
    {
      shmem_barrier (0, 0, 2, pSync);
    }
  else
    {
      printf ("PE %d / %d not in barrier\n", me, npes);
    }

  /*
   * Receiver shows us updated array
   */
  if (me == 1)
    {
      dump_array (stdout, A, X, Y);
    }

  return 0;
}
Exemplo n.º 13
0
Arquivo: pingpong.c Projeto: caomw/SOS
int
main(int argc, char* argv[])
{
	int c, j, loops, k, l;
	int my_pe, nProcs, nWorkers;
	int  nWords=1;
	int  failures=0;
	char *prog_name;
	long *wp,work_sz;

    for(j=0; j < SHMEM_BARRIER_SYNC_SIZE; j++) {
        pSync0[j] = pSync1[j] = pSync2[j] = pSync3[j] =
            pSync4[j] = SHMEM_SYNC_VALUE;
    }

	shmem_init();
	my_pe = shmem_my_pe();
	nProcs = shmem_n_pes();
	nWorkers = nProcs - 1;

	if (nProcs == 1) {
   		Rfprintf(stderr,
			"ERR - Requires > 1 PEs\n");
		shmem_finalize();
		return 0;
	}

	for(j=0; j < nProcs; j++)
		if ( shmem_pe_accessible(j) != 1 ) {
			fprintf(stderr,
				"ERR - pe %d not accessible from pe %d\n",
				j, my_pe);
		}

	prog_name = strrchr(argv[0],'/');
	if ( prog_name )
		prog_name++;
	else
		prog_name = argv[0];

	while((c=getopt(argc,argv,"hvM:s")) != -1) {
		switch(c) {
		  case 's':
			Slow++;
			break;
		  case 'v':
			Verbose++;
			break;
		  case 'M':
			output_mod = atoi(optarg);
			if (output_mod <= 0) {
    				Rfprintf(stderr, "ERR - output modulo arg out of "
						"bounds '%d'?\n", output_mod);
				shmem_finalize();
				return 1;
			}
   			Rfprintf(stderr,"%s: output modulo %d\n",
					prog_name,output_mod);
			break;
		  case 'h':
			Rfprintf(stderr,
				"usage: %s {nWords-2-put(%d)K/M} {Loop-count(%d)K/M}\n",
				prog_name, DFLT_NWORDS, DFLT_LOOPS);
			shmem_finalize();
			return 1;
		  default:
			shmem_finalize();
			return 1;
		}
	}

	if (optind == argc)
		nWords = DFLT_NWORDS;
	else {
		nWords = atoi_scaled(argv[optind++]);
		if (nWords <= 0) {
    			Rfprintf(stderr, "ERR - Bad nWords arg '%d'?\n", nWords);
			shmem_finalize();
			return 1;
		}
	}

	if (optind == argc)
		loops = DFLT_LOOPS;
	else {
		loops = atoi_scaled(argv[optind++]);
		if (loops <= 0 || loops > 1000000) {
    			Rfprintf(stderr,
				"ERR - loops arg out of bounds '%d'?\n", loops);
			shmem_finalize();
			return 1;
		}
	}

    work_sz = (nProcs*nWords) * sizeof(long);
	work = shmem_malloc( work_sz );
	if ( !work ) {
   		fprintf(stderr,"[%d] ERR - work = shmem_malloc(%ld) ?\n",my_pe,work_sz);
		shmem_global_exit(1);
	}

	Target = shmem_malloc( 2 * nWords * sizeof(long) );
	if ( !Target ) {
   		fprintf(stderr,"[%d] ERR - Target = shmem_malloc(%ld) ?\n",
                my_pe, (nWords * sizeof(long)));
		shmem_global_exit(1);
	}
    src = &Target[nWords];

#if _DEBUG
	Rprintf("%s: %d loops of %d longs per put\n",prog_name,loops,nWords);
#endif

	for(j=0; j < nWords; j++)
		src[j] = VAL;

	for(j=0; j < loops; j++) {

#if _DEBUG
		if ( Verbose && (j==0 || (j % output_mod) == 0) )
    			fprintf(stderr,"[%d] +(%d)\n", my_pe,j);
#endif
        shmem_barrier(0, 0, nProcs, pSync0);
		if ( my_pe == 0 ) {
			int p;
			for(p=1; p < nProcs; p++)
				shmem_long_put(Target, src, nWords, p);
		}
		else {
			if (Slow) {
				/* wait for each put to complete */
				for(k=0; k < nWords; k++)
					shmem_wait(&Target[k],my_pe);
			} else {
				/* wait for last word to be written */
				shmem_wait(&Target[nWords-1],my_pe);
			}
		}
#if _DEBUG
		if ( Verbose && (j==0 || (j % output_mod) == 0) )
    			fprintf(stderr,"[%d] -(%d)\n", shmem_my_pe(),j);
#endif
        shmem_barrier(0, 0, nProcs, pSync1);

		RDprintf("Workers[1 ... %d] verify Target data put by proc0\n",
			nWorkers);

		/* workers verify put data is expected */
		if ( my_pe != 0 ) {
			for(k=0; k < nWords; k++) {
				if (Target[k] != VAL) {
					fprintf(stderr, "[%d] Target[%d] %#lx "
							"!= %#x?\n",
							my_pe,k,Target[k],VAL);
					failures++;
				}
				assert(Target[k] == VAL);
				Target[k] = my_pe;
			}
		}
		else	/* clear results buffer, workers will put here */
			memset(work, 0, work_sz);

        shmem_barrier(0, 0, nProcs, pSync2);

		RDprintf("Workers[1 ... %d] put Target data to PE0 work "
			"vector\n",nWorkers);

		if ( my_pe != 0 ) {
			/* push nWords of val my_pe back to PE zero */
			shmem_long_put(&work[my_pe * nWords], Target, nWords, 0);
		}
		else {
			/* wait for procs 1 ... nProcs to complete put()s */
			for(l=1; l < nProcs; l++) {
				wp = &work[ l*nWords ]; // procs nWords chunk
#if 1
				/* wait for last long to be written from each PE */
				shmem_wait(&wp[nWords-1],0);
#else
				for(k=0; k < nWords; k++)
					shmem_wait(&wp[k],0);
#endif
			}
		}

        shmem_barrier(0, 0, nProcs, pSync3);

		if ( my_pe == 0 ) {
			RDprintf("Loop(%d) PE0 verifing work data.\n",j);
			for(l=1; l < nProcs; l++) {
				wp = &work[ l*nWords ]; // procs nWords chunk
				for(k=0; k < nWords; k++) {
					if (wp[k] != l) {
						fprintf(stderr,
						"[0] PE(%d)_work[%d] %ld "
							"!= %d?\n",
							l,k,work[k],l);
						failures++;
					}
					assert(wp[k] == l);
					break;
				}
				if (failures)
					break;
			}
		}
        shmem_barrier(0, 0, nProcs, pSync4);
#if _DEBUG
		if (loops > 1) {
			Rfprintf(stderr,".");
			RDprintf("Loop(%d) Pass.\n",j);
		}
#endif
	}

    shmem_free( work );
    shmem_free( Target );

#if _DEBUG
	Rfprintf(stderr,"\n");fflush(stderr);
	shmem_barrier_all();
	RDprintf("%d(%d) Exit(%d)\n", my_pe, nProcs, failures);
#endif

	shmem_finalize();

	return failures;
}
Exemplo n.º 14
0
JNIEXPORT void JNICALL Java_shmem_ShMem_barrier(
        JNIEnv *env, jclass clazz, jint PE_start,
        jint logPE_stride, jint PE_size, jlong pSync)
{
    shmem_barrier(PE_start, logPE_stride, PE_size, (long*)pSync);
}
Exemplo n.º 15
0
void
FORTRANIFY (shmem_barrier) (int *PE_start, int *logPE_stride, int *PE_size,
                            int *pSync)
{
    shmem_barrier (*PE_start, *logPE_stride, *PE_size, (long *) pSync);
}
Exemplo n.º 16
0
int main(void)
{
    int i, me, npes;
    int errors = 0;

    shmem_init();

    me = shmem_my_pe();
    npes = shmem_n_pes();

    for (i = 0; i < NELEM; i++) {
        src[i] = me;
        dst[i] = -1;
    }

    for (i = 0; i < SHMEM_BCAST_SYNC_SIZE; i++)
        bcast_psync[i] = SHMEM_SYNC_VALUE;

    for (i = 0; i < SHMEM_BARRIER_SYNC_SIZE; i++) {
        barrier_psync0[i] = SHMEM_SYNC_VALUE;
        barrier_psync1[i] = SHMEM_SYNC_VALUE;
    }

    if (me == 0)
        printf("Shrinking active set test\n");

    shmem_barrier_all();

    /* A total of npes tests are performed, where the active set in each test
     * includes PEs i..npes-1 */
    for (i = 0; i <= me; i++) {
        int j;

        if (me == i)
            printf(" + active set size %d\n", npes-i);

        shmem_broadcast64(dst, src, NELEM, 0, i, 0, npes-i, bcast_psync);

        /* Validate broadcasted data */
        for (j = 0; j < NELEM; j++) {
            int64_t expected = (me == i) ? i-1 : i;
            if (dst[j] != expected) {
                printf("%d: Expected dst[%d] = %"PRId64", got dst[%d] = %"PRId64", iteration %d\n",
                       me, j, expected, j, dst[j], i);
                errors++;
            }
        }

        shmem_barrier(i, 0, npes-i, (i % 2) ? barrier_psync0 : barrier_psync1);
    }

    shmem_barrier_all();

    for (i = 0; i < NELEM; i++)
        dst[i] = -1;

    if (me == 0)
        printf("Changing root test\n");

    shmem_barrier_all();

    /* A total of npes tests are performed, where the root changes each time */
    for (i = 0; i < npes; i++) {
        int j;

        if (me == i)
            printf(" + root %d\n", i);

        shmem_broadcast64(dst, src, NELEM, i, 0, 0, npes, bcast_psync);

        /* Validate broadcasted data */
        for (j = 0; j < NELEM; j++) {
            int64_t expected = (me == i) ? i-1 : i;
            if (dst[j] != expected) {
                printf("%d: Expected dst[%d] = %"PRId64", got dst[%d] = %"PRId64", iteration %d\n",
                       me, j, expected, j, dst[j], i);
                errors++;
            }
        }

        shmem_barrier(0, 0, npes, barrier_psync0);
    }
    shmem_finalize();

    return errors != 0;
}
Exemplo n.º 17
0
void communicateParameters(LSMSCommunication &comm, LSMSSystemParameters &lsms, 
                           CrystalParameters &crystal, MixingParameters &mix)
{
  int const s=sizeof(LSMSSystemParameters)+9*sizeof(Real)+sizeof(int)+10
    +sizeof(MixingParameters)+5*sizeof(int);
  int rem=0,ele=0;
  int tot_bufsize=s;
  rem=s%32;
  ele=s/32;
  if  (rem!=0)
  {
    tot_bufsize=s-rem+32;
    ele++;
  }
  // TODO fine-tune this size
  tot_bufsize=65536;
  char* buf=(char*)shmalloc(tot_bufsize);
  int pos=0;
  int sec_id;

  if(comm.comm.rank==0)
  {
    
    //MPI_Pack(lsms.systemid,80,MPI_CHAR,buf,s,&pos,comm.comm);
    //MPI_Pack(lsms.title,80,MPI_CHAR,buf,s,&pos,comm.comm);
    //MPI_Pack(lsms.potential_file_in,128,MPI_CHAR,buf,s,&pos,comm.comm);
    //MPI_Pack(lsms.potential_file_out,128,MPI_CHAR,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.pot_in_type,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.pot_out_type,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.num_atoms,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.nspin,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.nrel_rel,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.nrelc,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.nrelv,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.n_spin_cant,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.n_spin_pola,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.mtasa,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.fixRMT,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.nscf,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.writeSteps,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.clight,1,MPI_DOUBLE,buf,s,&pos,comm.comm);

    //MPI_Pack(&lsms.energyContour.grid,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.energyContour.npts,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.energyContour.ebot,1,MPI_DOUBLE,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.energyContour.etop,1,MPI_DOUBLE,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.energyContour.eibot,1,MPI_DOUBLE,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.energyContour.eitop,1,MPI_DOUBLE,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.energyContour.maxGroupSize,1,MPI_INT,buf,s,&pos,comm.comm);

    //MPI_Pack(&lsms.mixing,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.alphaDV,1,MPI_DOUBLE,buf,s,&pos,comm.comm);

    //MPI_Pack(&lsms.global.iprint,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.global.print_node,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.global.default_iprint,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.global.istop,32,MPI_CHAR,buf,s,&pos,comm.comm);
    //MPI_Pack(&lsms.global.GPUThreads,32,MPI_INT,buf,s,&pos,comm.comm);

    //MPI_Pack(&crystal.num_types,1,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&crystal.bravais(0,0),9,MPI_DOUBLE,buf,s,&pos,comm.comm);

    //************  MemCpying  ***************
    memcpy(&buf[pos],&lsms.systemid,80*char_size); pos = pos+80*char_size;
    memcpy(&buf[pos],&lsms.title,80*char_size); pos = pos+80*char_size;
    memcpy(&buf[pos],&lsms.potential_file_in,128*char_size); pos = pos+128*char_size;
    memcpy(&buf[pos],&lsms.potential_file_out,128*char_size); pos = pos+128*char_size;
    memcpy(&buf[pos],&lsms.pot_in_type,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.pot_out_type ,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.num_atoms,int_size); pos = pos+int_size;

    memcpy(&buf[pos],&lsms.nspin,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.nrel_rel,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.nrelc,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.nrelv,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.n_spin_cant,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.n_spin_pola,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.mtasa,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.fixRMT,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.nscf,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.writeSteps,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.clight,double_size); pos = pos+double_size;

    memcpy(&buf[pos],&lsms.energyContour.grid,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.energyContour.npts,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.energyContour.ebot,double_size); pos = pos+double_size;
    memcpy(&buf[pos],&lsms.energyContour.etop,double_size); pos = pos+double_size;
    memcpy(&buf[pos],&lsms.energyContour.eibot,double_size); pos = pos+double_size;
    memcpy(&buf[pos],&lsms.energyContour.eitop,double_size); pos = pos+double_size;
    memcpy(&buf[pos],&lsms.energyContour.maxGroupSize,int_size); pos = pos+int_size;

    memcpy(&buf[pos],&lsms.mixing,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.alphaDV,double_size); pos = pos+double_size;

    memcpy(&buf[pos],&lsms.global.iprint,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.global.print_node,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.global.default_iprint,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&lsms.global.istop,32*char_size); pos = pos+32*char_size;
    memcpy(&buf[pos],&lsms.global.GPUThreads,32*int_size); pos = pos+32*int_size;

    memcpy(&buf[pos],&crystal.num_types,int_size); pos = pos+int_size;
    memcpy(&buf[pos],&crystal.bravais(0,0),9*double_size); pos = pos+9*double_size;


// MixingParameters
    // MPI_CXX_BOOL is not always available
    // MPI_Pack(&mix.quantity[0],mix.numQuantities,MPI_CXX_BOOL,buf,s,&pos,comm.comm);
    // copy to temporary int array and send this
    int tmpQuantity[mix.numQuantities];
    for(int i=0; i<mix.numQuantities; i++)
      if(mix.quantity[i])
        tmpQuantity[i] = 1;
      else
        tmpQuantity[i] = 0; 
    //MPI_Pack(&tmpQuantity[0],mix.numQuantities,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&mix.algorithm[0],mix.numQuantities,MPI_INT,buf,s,&pos,comm.comm);
    //MPI_Pack(&mix.mixingParameter[0],mix.numQuantities,MPI_DOUBLE,buf,s,&pos,comm.comm);
    memcpy(&buf[pos],&tmpQuantity[0],mix.numQuantities*int_size); pos = pos+mix.numQuantities*int_size;
    memcpy(&buf[pos],&mix.algorithm[0],mix.numQuantities*int_size); pos = pos+mix.numQuantities*int_size;
    memcpy(&buf[pos],&mix.mixingParameter[0],mix.numQuantities*double_size); pos = pos+mix.numQuantities*double_size;

  }
  //MPI_Bcast(buf,s,MPI_PACKED,0,comm.comm);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  shmem_broadcast32(&buf[0], &buf[0], tot_bufsize, 0, 0, 0, comm.comm.size,pSync1);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  if(comm.comm.rank!=0)
  {
    int pos=0;
    //MPI_Unpack(buf,s,&pos,lsms.systemid,80,MPI_CHAR,comm.comm);
    //MPI_Unpack(buf,s,&pos,lsms.title,80,MPI_CHAR,comm.comm);
    //MPI_Unpack(buf,s,&pos,lsms.potential_file_in,128,MPI_CHAR,comm.comm);
    //MPI_Unpack(buf,s,&pos,lsms.potential_file_out,128,MPI_CHAR,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.pot_in_type,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.pot_out_type,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.num_atoms,1,MPI_INT,comm.comm);
    memcpy(&lsms.systemid,&buf[pos],80*char_size); pos = pos+80*char_size;
    memcpy(&lsms.title,&buf[pos],80*char_size); pos = pos+80*char_size;
    memcpy(&lsms.potential_file_in,&buf[pos],128*char_size); pos = pos+128*char_size;
    memcpy(&lsms.potential_file_out,&buf[pos],128*char_size); pos = pos+128*char_size;
    memcpy(&lsms.pot_in_type,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.pot_out_type,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.num_atoms,&buf[pos],int_size); pos = pos+int_size;
    crystal.num_atoms=lsms.num_atoms;
    //MPI_Unpack(buf,s,&pos,&lsms.nspin,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.nrel_rel,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.nrelc,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.nrelv,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.n_spin_cant,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.n_spin_pola,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.mtasa,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.fixRMT,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.nscf,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.writeSteps,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.clight,1,MPI_DOUBLE,comm.comm);

    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.grid,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.npts,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.ebot,1,MPI_DOUBLE,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.etop,1,MPI_DOUBLE,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.eibot,1,MPI_DOUBLE,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.eitop,1,MPI_DOUBLE,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.energyContour.maxGroupSize,1,MPI_INT,comm.comm);

    //MPI_Unpack(buf,s,&pos,&lsms.mixing,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.alphaDV,1,MPI_DOUBLE,comm.comm);

    //MPI_Unpack(buf,s,&pos,&lsms.global.iprint,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.global.print_node,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.global.default_iprint,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.global.istop,32,MPI_CHAR,comm.comm);
    //MPI_Unpack(buf,s,&pos,&lsms.global.GPUThreads,32,MPI_INT,comm.comm);

    //MPI_Unpack(buf,s,&pos,&crystal.num_types,1,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&crystal.bravais(0,0),9,MPI_DOUBLE,comm.comm);

    memcpy(&lsms.nspin,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.nrel_rel,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.nrelc,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.nrelv,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.n_spin_cant,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.n_spin_pola,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.mtasa,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.fixRMT,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.nscf,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.writeSteps,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.clight,&buf[pos],double_size); pos = pos+double_size;

    memcpy(&lsms.energyContour.grid,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.energyContour.npts,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.energyContour.ebot,&buf[pos],double_size); pos = pos+double_size;
    memcpy(&lsms.energyContour.etop,&buf[pos],double_size); pos = pos+double_size;
    memcpy(&lsms.energyContour.eibot,&buf[pos],double_size); pos = pos+double_size;
    memcpy(&lsms.energyContour.eitop,&buf[pos],double_size); pos = pos+double_size;
    memcpy(&lsms.energyContour.maxGroupSize,&buf[pos],int_size); pos = pos+int_size;

    memcpy(&lsms.mixing,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.alphaDV,&buf[pos],double_size); pos = pos+double_size;

    memcpy(&lsms.global.iprint,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.global.print_node,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.global.default_iprint,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&lsms.global.istop,&buf[pos],32*char_size); pos = pos+32*char_size;
    memcpy(&lsms.global.GPUThreads,&buf[pos],32*int_size); pos = pos+32*int_size;

    memcpy(&crystal.num_types,&buf[pos],int_size); pos = pos+int_size;
    memcpy(&crystal.bravais(0,0),&buf[pos],9*double_size); pos = pos+9*double_size;

    crystal.resize(crystal.num_atoms);
    crystal.resizeTypes(crystal.num_types);


// MixingParameters
    // MPI_CXX_BOOL is not always available
    // MPI_Unpack(buf,s,&pos,&mix.quantity[0],mix.numQuantities,MPI_CXX_BOOL,comm.comm);
    // recieve temporary int array and copy
    int tmpQuantity[mix.numQuantities];
    //MPI_Unpack(buf,s,&pos,&tmpQuantity[0],mix.numQuantities,MPI_INT,comm.comm);
    memcpy(&tmpQuantity[0],&buf[pos],mix.numQuantities*int_size); pos = pos+mix.numQuantities*int_size;

    for(int i=0; i<mix.numQuantities; i++)
      if(tmpQuantity[i]==1)
        mix.quantity[i] = true;
      else
        mix.quantity[i] = false; 
    //MPI_Unpack(buf,s,&pos,&mix.algorithm[0],mix.numQuantities,MPI_INT,comm.comm);
    //MPI_Unpack(buf,s,&pos,&mix.mixingParameter[0],mix.numQuantities,MPI_DOUBLE,comm.comm);
    memcpy(&mix.algorithm[0],&buf[pos],mix.numQuantities*int_size); pos = pos+mix.numQuantities*int_size;
    memcpy(&mix.mixingParameter[0],&buf[pos],mix.numQuantities*double_size); pos = pos+mix.numQuantities*double_size;
  }

 for(int i=0; i<mix.numQuantities; i++)
      printf("mix.quantity[%d]=%d\n", i,mix.quantity[i]);

  // Allocate buffer for transmitting Crystal params
  int buff_size;

  if((crystal.num_types*sizeof(AtomType)) > (3*crystal.num_atoms*double_size))
     buff_size = crystal.num_types*sizeof(AtomType);
  else 
     buff_size = 3*crystal.num_atoms*double_size;  
 
  shfree(buf);
  // TODO finetune buff-size
  buff_size=1048576; //sizeof(LSMSSystemParameters)+9*sizeof(Real);
  rem=buff_size%64;
  ele=buff_size/64;
  if(rem != 0)
  {
     buff_size=buff_size-rem+64;
     ele++;
  }

  double* temp_buff=(double*) shmalloc(buff_size);
  int*    temp_intbuff=(int*) shmalloc(buff_size);

  //MPI_Bcast(&crystal.position(0,0),3*crystal.num_atoms,MPI_DOUBLE,0,comm.comm);
//TODO check if a barrier is neededa after broadcast ... data not updated otherwise
  if(comm.comm.rank == 0)
      memcpy(temp_buff,&crystal.position(0,0),3*crystal.num_atoms*double_size);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  shmem_broadcast64(temp_buff, temp_buff,3*crystal.num_atoms, 0, 0, 0, comm.comm.size,pSync1);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  if(comm.comm.rank != 0)
      memcpy(&crystal.position(0,0),temp_buff,3*crystal.num_atoms*double_size);

  //MPI_Bcast(&crystal.evecs(0,0),3*crystal.num_atoms,MPI_DOUBLE,0,comm.comm);
  if(comm.comm.rank == 0){
      memcpy(temp_buff,&crystal.evecs(0,0),3*crystal.num_atoms*double_size);
}
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  shmem_broadcast64(temp_buff, temp_buff, 3*crystal.num_atoms, 0, 0, 0, comm.comm.size,pSync1);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  if(comm.comm.rank != 0){
      memcpy(&crystal.evecs(0,0),temp_buff,3*crystal.num_atoms*double_size);
}

  //MPI_Bcast(&crystal.type[0],crystal.num_atoms,MPI_INT,0,comm.comm);
  if(comm.comm.rank == 0){
      memcpy(temp_intbuff,&crystal.type[0],crystal.num_atoms*int_size);
  }
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  shmem_broadcast32(temp_intbuff, temp_intbuff, crystal.num_atoms, 0, 0, 0, comm.comm.size,pSync1);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  if(comm.comm.rank != 0){
      memcpy(&crystal.type[0],temp_intbuff,crystal.num_atoms*int_size);
  }

// This is dangerous and assumes homogeneous nodes:
  //MPI_Bcast(&crystal.types[0],crystal.num_types*sizeof(AtomType),MPI_BYTE,0,comm.comm);
  if(comm.comm.rank == 0)
      memcpy(temp_buff,&crystal.types[0],crystal.num_types*sizeof(AtomType));
  // having to use the smallest possible broadcast:"32"-type
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  shmem_broadcast32(temp_buff,temp_buff,crystal.num_types*sizeof(AtomType)/4,0,0,0,comm.comm.size,pSync1);
  shmem_barrier(0, 0, comm.comm.size,pSync2);
  if(comm.comm.rank != 0)
      memcpy(&crystal.types[0],temp_buff,crystal.num_types*sizeof(AtomType));

  shmem_barrier(0, 0, comm.comm.size,pSync1);
  shfree(temp_buff);
  shfree(temp_intbuff);

// get maximum lmax
  crystal.maxlmax=0;
  for(int i=0; i<crystal.num_types; i++)
    if(crystal.types[i].lmax>crystal.maxlmax) crystal.maxlmax=crystal.types[i].lmax; 
  lsms.maxlmax=crystal.maxlmax;
}