예제 #1
0
void grad(int *n, int nthreads) {
  int nprocs, procid;
  MPI_Comm_rank(MPI_COMM_WORLD, &procid);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

  /* Create Cartesian Communicator */
  int c_dims[2]={0};
  MPI_Comm c_comm;
  accfft_create_comm(MPI_COMM_WORLD,c_dims,&c_comm);

  float *data;
  Complexf *data_hat;
  double f_time=0*MPI_Wtime(),i_time=0, setup_time=0;
  int alloc_max=0;

  int isize[3],osize[3],istart[3],ostart[3];
  /* Get the local pencil size and the allocation size */
  alloc_max=accfft_local_size_dft_r2cf(n,isize,istart,osize,ostart,c_comm);

  //data=(float*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(float));
  data=(float*)accfft_alloc(alloc_max);
  data_hat=(Complexf*)accfft_alloc(alloc_max);

  accfft_init(nthreads);

  /* Create FFT plan */
  setup_time=-MPI_Wtime();
  accfft_planf * plan=accfft_plan_dft_3d_r2cf(n,data,(float*)data_hat,c_comm,ACCFFT_MEASURE);
  setup_time+=MPI_Wtime();


  /*  Initialize data */
  initialize(data,n,c_comm);
  MPI_Barrier(c_comm);

  float * gradx=(float*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(float));
  float * grady=(float*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(float));
  float * gradz=(float*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(float));
  double timings[5]={0};

  std::bitset<3> XYZ=0;
  XYZ[0]=1;
  XYZ[1]=1;
  XYZ[2]=1;
  double exec_time=-MPI_Wtime();
  accfft_gradf(gradx,grady,gradz,data,plan,&XYZ,timings);
  exec_time+=MPI_Wtime();
  /* Check err*/
  PCOUT<<">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"<<std::endl;
  PCOUT<<">>>>>>>>Checking Gradx>>>>>>>>"<<std::endl;
  check_err_grad(gradx,n,c_comm,0);
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"<<std::endl;
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"<<std::endl;

  PCOUT<<">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"<<std::endl;
  PCOUT<<">>>>>>>>Checking Grady>>>>>>>>"<<std::endl;
  check_err_grad(grady,n,c_comm,1);
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"<<std::endl;
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"<<std::endl;

  PCOUT<<">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"<<std::endl;
  PCOUT<<">>>>>>>>Checking Gradz>>>>>>>>"<<std::endl;
  check_err_grad(gradz,n,c_comm,2);
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"<<std::endl;
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"<<std::endl;

  /* Compute some timings statistics */
  double g_setup_time,g_timings[5],g_exec_time;

  MPI_Reduce(timings,g_timings,5, MPI_DOUBLE, MPI_MAX,0, c_comm);
  MPI_Reduce(&setup_time,&g_setup_time,1, MPI_DOUBLE, MPI_MAX,0, c_comm);
  MPI_Reduce(&exec_time,&g_exec_time,1, MPI_DOUBLE, MPI_MAX,0, c_comm);

  PCOUT<<"Timing for Grad Computation for size "<<n[0]<<"*"<<n[1]<<"*"<<n[2]<<std::endl;
  PCOUT<<"Setup \t\t"<<g_setup_time<<std::endl;
  PCOUT<<"Evaluation \t"<<g_exec_time<<std::endl;

  accfft_free(data);
  accfft_free(data_hat);
  MPI_Barrier(c_comm);
  accfft_free(gradx);
  accfft_free(grady);
  accfft_free(gradz);
  accfft_destroy_plan(plan);
  accfft_cleanup();
  MPI_Comm_free(&c_comm);
  PCOUT<<"-------------------------------------------------------"<<std::endl;
  PCOUT<<"-------------------------------------------------------"<<std::endl;
  PCOUT<<"-------------------------------------------------------\n"<<std::endl;
  return ;

} // end grad
예제 #2
0
void divergence(int *n) {
  int nprocs, procid;
  MPI_Comm_rank(MPI_COMM_WORLD, &procid);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

  /* Create Cartesian Communicator */
  int c_dims[2]={0};
  MPI_Comm c_comm;
  accfft_create_comm(MPI_COMM_WORLD,c_dims,&c_comm);

  double f_time=0*MPI_Wtime(),i_time=0, setup_time=0;
  int alloc_max=0;

  int isize[3],osize[3],istart[3],ostart[3];
  /* Get the local pencil size and the allocation size */
  alloc_max=accfft_local_size_dft_r2c_gpu(n,isize,istart,osize,ostart,c_comm);

  //data=(double*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(double));
  double * data_cpu=(double*)accfft_alloc(alloc_max);
  double* data;
  Complex* data_hat;
  cudaMalloc((void**) &data    , alloc_max);
  cudaMalloc((void**) &data_hat, alloc_max);


  accfft_init();

  /* Create FFT plan */
  setup_time=-MPI_Wtime();
  accfft_plan_gpu * plan=accfft_plan_dft_3d_r2c_gpu(n,data,(double*)data_hat,c_comm,ACCFFT_MEASURE);
  setup_time+=MPI_Wtime();


  /*  Initialize data */
  initialize(data_cpu,n,c_comm);
  cudaMemcpy(data, data_cpu,alloc_max, cudaMemcpyHostToDevice);

  MPI_Barrier(c_comm);

  double * gradx,*grady, *gradz, *divergence;
  cudaMalloc((void**) &gradx     , alloc_max);
  cudaMalloc((void**) &grady     , alloc_max);
  cudaMalloc((void**) &gradz     , alloc_max);
  cudaMalloc((void**) &divergence, alloc_max);
  double timings[5]={0};


  std::bitset<3> XYZ=0;
  XYZ[0]=1;
  XYZ[1]=1;
  XYZ[2]=1;
  double exec_time=-MPI_Wtime();
  accfft_grad_gpu(gradx,grady,gradz,data,plan,XYZ,timings);
  accfft_divergence_gpu(divergence,gradx,grady,gradz,plan,timings);
  exec_time+=MPI_Wtime();

  PCOUT<<">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"<<std::endl;
  PCOUT<<">>>>>Checking Divergence>>>>>>"<<std::endl;
  cudaMemcpy(data_cpu, divergence, alloc_max, cudaMemcpyDeviceToHost);
  check_err_laplace(data_cpu,n,c_comm);
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"<<std::endl;
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"<<std::endl;


  /* Compute some timings statistics */
  double g_setup_time,g_timings[5],g_exec_time;

  MPI_Reduce(timings,g_timings,5, MPI_DOUBLE, MPI_MAX,0, c_comm);
  MPI_Reduce(&setup_time,&g_setup_time,1, MPI_DOUBLE, MPI_MAX,0, c_comm);
  MPI_Reduce(&exec_time,&g_exec_time,1, MPI_DOUBLE, MPI_MAX,0, c_comm);

  PCOUT<<"Timing for Grad Computation for size "<<n[0]<<"*"<<n[1]<<"*"<<n[2]<<std::endl;
  PCOUT<<"Setup \t\t"<<g_setup_time<<std::endl;
  PCOUT<<"Evaluation \t"<<g_exec_time<<std::endl;

  accfft_free(data_cpu);
  cudaFree(data);
  cudaFree(data_hat);
  MPI_Barrier(c_comm);
  cudaFree(gradx);
  cudaFree(grady);
  cudaFree(gradz);
  accfft_destroy_plan(plan);
  accfft_cleanup_gpu();
  MPI_Comm_free(&c_comm);
  PCOUT<<"-------------------------------------------------------"<<std::endl;
  PCOUT<<"-------------------------------------------------------"<<std::endl;
  PCOUT<<"-------------------------------------------------------\n"<<std::endl;
  return ;

} // end divergence
예제 #3
0
void step3(int *n, int nthreads) {
  int nprocs, procid;
  MPI_Comm_rank(MPI_COMM_WORLD, &procid);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

  /* Create Cartesian Communicator */
  int c_dims[2]={0};
  MPI_Comm c_comm;
  accfft_create_comm(MPI_COMM_WORLD,c_dims,&c_comm);

#ifdef INPLACE
  Complexf *data;
#else
  Complexf *data;
  Complexf *data_hat;
#endif
  double f_time=0*MPI_Wtime(),i_time=0, setup_time=0;
  int alloc_max=0;

  int isize[3],osize[3],istart[3],ostart[3];
  /* Get the local pencil size and the allocation size */
  alloc_max=accfft_local_size_dft_c2cf(n,isize,istart,osize,ostart,c_comm);

#ifdef INPLACE
  data=(Complexf*)accfft_alloc(alloc_max);
#else
  data=(Complexf*)accfft_alloc(isize[0]*isize[1]*isize[2]*2*sizeof(float));
  data_hat=(Complexf*)accfft_alloc(alloc_max);
#endif

  accfft_init(nthreads);

  /* Create FFT plan */
  setup_time=-MPI_Wtime();
#ifdef INPLACE
  accfft_planf * plan=accfft_plan_dft_3d_c2cf(n,data,data,c_comm,ACCFFT_MEASURE);
#else
  accfft_planf * plan=accfft_plan_dft_3d_c2cf(n,data,data_hat,c_comm,ACCFFT_MEASURE);
#endif
  setup_time+=MPI_Wtime();

  /*  Initialize data */
  initialize(data,n,c_comm);
  MPI_Barrier(c_comm);

  /* Perform forward FFT */
  f_time-=MPI_Wtime();
#ifdef INPLACE
  accfft_execute_c2cf(plan,ACCFFT_FORWARD,data,data);
#else
  accfft_execute_c2cf(plan,ACCFFT_FORWARD,data,data_hat);
#endif
  f_time+=MPI_Wtime();

  MPI_Barrier(c_comm);


  /* Perform backward FFT */
#ifdef INPLACE
  i_time-=MPI_Wtime();
  accfft_execute_c2cf(plan,ACCFFT_BACKWARD,data,data);
  i_time+=MPI_Wtime();
#else
  Complexf * data2=(Complexf*)accfft_alloc(isize[0]*isize[1]*isize[2]*2*sizeof(float));
  i_time-=MPI_Wtime();
  accfft_execute_c2cf(plan,ACCFFT_BACKWARD,data_hat,data2);
  i_time+=MPI_Wtime();
#endif

  /* Check Error */
#ifdef INPLACE
  check_err(data,n,c_comm);
#else
  check_err(data2,n,c_comm);
#endif

  /* Compute some timings statistics */
  double g_f_time, g_i_time, g_setup_time;
  MPI_Reduce(&f_time,&g_f_time,1, MPI_DOUBLE, MPI_MAX,0, MPI_COMM_WORLD);
  MPI_Reduce(&i_time,&g_i_time,1, MPI_DOUBLE, MPI_MAX,0, MPI_COMM_WORLD);
  MPI_Reduce(&setup_time,&g_setup_time,1, MPI_DOUBLE, MPI_MAX,0, MPI_COMM_WORLD);

#ifdef INPLACE
  PCOUT<<"Timing for Inplace FFT of size "<<n[0]<<"*"<<n[1]<<"*"<<n[2]<<std::endl;
#else
  PCOUT<<"Timing for Outplace FFT of size "<<n[0]<<"*"<<n[1]<<"*"<<n[2]<<std::endl;
#endif
  PCOUT<<"Setup \t"<<g_setup_time<<std::endl;
  PCOUT<<"FFT \t"<<g_f_time<<std::endl;
  PCOUT<<"IFFT \t"<<g_i_time<<std::endl;

  accfft_free(data);
#ifndef INPLACE
  accfft_free(data_hat);
  accfft_free(data2);
#endif
  accfft_destroy_plan(plan);
  accfft_cleanup();
  MPI_Comm_free(&c_comm);
  return ;

} // end step3
예제 #4
0
void step2(int *n, int nthreads) {
    int nprocs, procid;
    MPI_Comm_rank(MPI_COMM_WORLD, &procid);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* Create Cartesian Communicator */
    int c_dims[2];
    MPI_Comm c_comm;
    accfft_create_comm(MPI_COMM_WORLD,c_dims,&c_comm);

    float *data;
    double f_time=0*MPI_Wtime(),i_time=0, setup_time=0;
    int alloc_max=0;

    int isize[3],osize[3],istart[3],ostart[3];
    /* Get the local pencil size and the allocation size */
    alloc_max=accfft_local_size_dft_r2cf(n,isize,istart,osize,ostart,c_comm);

    data=(float*)accfft_alloc(alloc_max);

    accfft_init(nthreads);

    setup_time=-MPI_Wtime();
    /* Create FFT plan */
    accfft_planf * plan=accfft_plan_dft_3d_r2cf(n,data,data,c_comm,ACCFFT_MEASURE); // note that in and out are both data -> inplace plan
    setup_time+=MPI_Wtime();

    /* Warm Up */
    accfft_execute_r2cf(plan,data,(Complexf*)data);
    accfft_execute_r2cf(plan,data,(Complexf*)data);

    /*  Initialize data */
    initialize(data,n,c_comm); // special initialize plan for inplace transform -> difference in padding
    MPI_Barrier(c_comm);

    /* Perform forward FFT */
    f_time-=MPI_Wtime();
    accfft_execute_r2cf(plan,data,(Complexf*)data);
    f_time+=MPI_Wtime();

    MPI_Barrier(c_comm);

    /* Perform backward FFT */
    i_time-=MPI_Wtime();
    accfft_execute_c2rf(plan,(Complexf*)data,data);
    i_time+=MPI_Wtime();

    /* Check Error */
    check_err(data,n,c_comm);

    /* Compute some timings statistics */
    double g_f_time, g_i_time, g_setup_time;
    MPI_Reduce(&f_time,&g_f_time,1, MPI_DOUBLE, MPI_MAX,0, MPI_COMM_WORLD);
    MPI_Reduce(&i_time,&g_i_time,1, MPI_DOUBLE, MPI_MAX,0, MPI_COMM_WORLD);
    MPI_Reduce(&setup_time,&g_setup_time,1, MPI_DOUBLE, MPI_MAX,0, MPI_COMM_WORLD);

    PCOUT<<"Timing for FFT of size "<<n[0]<<"*"<<n[1]<<"*"<<n[2]<<std::endl;
    PCOUT<<"Setup \t"<<g_setup_time<<std::endl;
    PCOUT<<"FFT \t"<<g_f_time<<std::endl;
    PCOUT<<"IFFT \t"<<g_i_time<<std::endl;

    accfft_free(data);
    accfft_destroy_plan(plan);
    accfft_cleanup();
    MPI_Comm_free(&c_comm);
    return ;

} // end step2
예제 #5
0
void laplace(int *n, int nthreads) {
  int nprocs, procid;
  MPI_Comm_rank(MPI_COMM_WORLD, &procid);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

  /* Create Cartesian Communicator */
  int c_dims[2]={0};
  MPI_Comm c_comm;
  accfft_create_comm(MPI_COMM_WORLD,c_dims,&c_comm);

  double *data;
  Complex *data_hat;
  double f_time=0*MPI_Wtime(),i_time=0, setup_time=0;
  int alloc_max=0;

  int isize[3],osize[3],istart[3],ostart[3];
  /* Get the local pencil size and the allocation size */
  alloc_max=accfft_local_size_dft_r2c(n,isize,istart,osize,ostart,c_comm);

  //data=(double*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(double));
  data=(double*)accfft_alloc(alloc_max);
  data_hat=(Complex*)accfft_alloc(alloc_max);

  accfft_init(nthreads);

  /* Create FFT plan */
  setup_time=-MPI_Wtime();
  accfft_plan * plan=accfft_plan_dft_3d_r2c(n,data,(double*)data_hat,c_comm,ACCFFT_MEASURE);
  setup_time+=MPI_Wtime();


  /*  Initialize data */
  initialize(data,n,c_comm);
  MPI_Barrier(c_comm);

  double * laplace=(double*)accfft_alloc(isize[0]*isize[1]*isize[2]*sizeof(double));
  double timings[5]={0};

  double exec_time=-MPI_Wtime();
  accfft_laplace(laplace,data,plan,timings);
  exec_time+=MPI_Wtime();
  /* Check err*/
  PCOUT<<">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"<<std::endl;
  PCOUT<<">>>>>>>Checking Laplace>>>>>>>"<<std::endl;
  check_err_laplace(laplace,n,c_comm);
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"<<std::endl;
  PCOUT<<"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"<<std::endl;

  /* Compute some timings statistics */
  double g_setup_time,g_timings[5],g_exec_time;

  MPI_Reduce(timings,g_timings,5, MPI_DOUBLE, MPI_MAX,0, c_comm);
  MPI_Reduce(&setup_time,&g_setup_time,1, MPI_DOUBLE, MPI_MAX,0, c_comm);
  MPI_Reduce(&exec_time,&g_exec_time,1, MPI_DOUBLE, MPI_MAX,0, c_comm);

  PCOUT<<"Timing for Laplace Computation for size "<<n[0]<<"*"<<n[1]<<"*"<<n[2]<<std::endl;
  PCOUT<<"Setup \t\t"<<g_setup_time<<std::endl;
  PCOUT<<"Evaluation \t"<<g_exec_time<<std::endl;

  accfft_free(data);
  accfft_free(data_hat);
  MPI_Barrier(c_comm);
  accfft_free(laplace);
  accfft_destroy_plan(plan);
  accfft_cleanup();
  MPI_Comm_free(&c_comm);
  return ;

} // end laplace