void initialise_chunk_kernel_zz_c_wrapper(
  int *p_a0,
  int *p_a1,
  int arg_idx0, int arg_idx1, int arg_idx2,
  int x_size, int y_size, int z_size) {
  #ifdef OPS_GPU
  #pragma acc parallel deviceptr(p_a0)
  #pragma acc loop
  #endif
  for ( int n_z=0; n_z<z_size; n_z++ ){
    #ifdef OPS_GPU
    #pragma acc loop
    #endif
    for ( int n_y=0; n_y<y_size; n_y++ ){
      #ifdef OPS_GPU
      #pragma acc loop
      #endif
      for ( int n_x=0; n_x<x_size; n_x++ ){
        int arg_idx[] = {arg_idx0+n_x, arg_idx1+n_y, arg_idx2+n_z};
        initialise_chunk_kernel_zz(  p_a0 + n_x*0 + n_y*xdim0_initialise_chunk_kernel_zz*0 + n_z*xdim0_initialise_chunk_kernel_zz*ydim0_initialise_chunk_kernel_zz*1,
          arg_idx );

      }
    }
  }
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_zz(char const *name, ops_block block, int dim, int* range,
 ops_arg arg0, ops_arg arg1) {

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c1,&t1);


  int  offs[2][3];
  ops_arg args[2] = { arg0, arg1};



  ops_timing_realloc(132,"initialise_chunk_kernel_zz");
  OPS_kernels[132].count++;

  //compute locally allocated range for the sub-block

  int start[3];
  int end[3];

  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<3; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<3; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI
  #ifdef OPS_DEBUG
  ops_register_args(args, "initialise_chunk_kernel_zz");
  #endif

  offs[0][0] = args[0].stencil->stride[0]*1;  //unit step in x dimension
  offs[0][1] = off3D(1, &start[0],
      &end[0],args[0].dat->size, args[0].stencil->stride) - offs[0][0];
  offs[0][2] = off3D(2, &start[0],
      &end[0],args[0].dat->size, args[0].stencil->stride) - offs[0][1] - offs[0][0];



  int off0_0 = offs[0][0];
  int off0_1 = offs[0][1];
  int off0_2 = offs[0][2];
  int dat0 = args[0].dat->elem_size;


  #ifdef _OPENMP
  int nthreads = omp_get_max_threads( );
  #else
  int nthreads = 1;
  #endif
  xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  ydim0 = args[0].dat->size[1];

  ops_H_D_exchanges_host(args, 2);

  //Halo Exchanges
  ops_halo_exchanges(args,2,range);


  ops_timers_core(&c2,&t2);
  OPS_kernels[132].mpi_time += t2-t1;


  #pragma omp parallel for
  for ( int thr=0; thr<nthreads; thr++ ){

    int z_size = end[2]-start[2];
    char *p_a[2];

    int start_i = start[2] + ((z_size-1)/nthreads+1)*thr;
    int finish_i = start[2] + MIN(((z_size-1)/nthreads+1)*(thr+1),z_size);

    //get address per thread
    int start0 = start[0];
    int start1 = start[1];
    int start2 = start_i;

    int arg_idx[3];
    #ifdef OPS_MPI
    arg_idx[0] = sb->decomp_disp[0]+start0;
    arg_idx[1] = sb->decomp_disp[1]+start1;
    arg_idx[2] = sb->decomp_disp[2]+start2;
    #else //OPS_MPI
    arg_idx[0] = start0;
    arg_idx[1] = start1;
    arg_idx[2] = start2;
    #endif //OPS_MPI
    //set up initial pointers 
    int d_m[OPS_MAX_DIM];
    #ifdef OPS_MPI
    for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
    #else //OPS_MPI
    for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
    #endif //OPS_MPI
    int base0 = dat0 * 1 * 
    (start0 * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
    base0 = base0+ dat0 *
      args[0].dat->size[0] *
      (start1 * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
    base0 = base0+ dat0 *
      args[0].dat->size[0] *
      args[0].dat->size[1] *
      (start2 * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
    p_a[0] = (char *)args[0].data + base0;

    p_a[1] = (char *)arg_idx;


    for ( int n_z=start_i; n_z<finish_i; n_z++ ){
      for ( int n_y=start[1]; n_y<end[1]; n_y++ ){
        for ( int n_x=start[0]; n_x<start[0]+(end[0]-start[0])/SIMD_VEC; n_x++ ){
          //call kernel function, passing in pointers to data -vectorised
          for ( int i=0; i<SIMD_VEC; i++ ){
            initialise_chunk_kernel_zz(  (int * )p_a[0]+ i*0, arg_idx );

            arg_idx[0]++;
          }

          //shift pointers to data x direction
          p_a[0]= p_a[0] + (dat0 * off0_0)*SIMD_VEC;
        }

        for ( int n_x=start[0]+((end[0]-start[0])/SIMD_VEC)*SIMD_VEC; n_x<end[0]; n_x++ ){
          //call kernel function, passing in pointers to data - remainder
          initialise_chunk_kernel_zz(  (int * )p_a[0], arg_idx );


          //shift pointers to data x direction
          p_a[0]= p_a[0] + (dat0 * off0_0);
          arg_idx[0]++;
        }

        //shift pointers to data y direction
        p_a[0]= p_a[0] + (dat0 * off0_1);
        #ifdef OPS_MPI
        arg_idx[0] = sb->decomp_disp[0]+start0;
        #else //OPS_MPI
        arg_idx[0] = start0;
        #endif //OPS_MPI
        arg_idx[1]++;
      }
      //shift pointers to data z direction
      p_a[0]= p_a[0] + (dat0 * off0_2);
      #ifdef OPS_MPI
      arg_idx[0] = sb->decomp_disp[0]+start0;
      arg_idx[1] = sb->decomp_disp[1]+start1;
      #else //OPS_MPI
      arg_idx[0] = start0;
      arg_idx[1] = start1;
      #endif //OPS_MPI
      arg_idx[2]++;
    }
  }

  ops_timers_core(&c1,&t1);
  OPS_kernels[132].time += t1-t2;

  ops_set_dirtybit_host(args, 2);

  ops_set_halo_dirtybit3(&args[0],range);

  //Update kernel record
  ops_timers_core(&c2,&t2);
  OPS_kernels[132].mpi_time += t2-t1;
  OPS_kernels[132].transfer += ops_compute_transfer(dim, range, &arg0);
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_zz(char const *name, ops_block block,
                                             int dim, int *range, ops_arg arg0,
                                             ops_arg arg1) {

  // Timing
  double t1, t2, c1, c2;

  char *p_a[2];
  int offs[2][3];
  ops_arg args[2] = {arg0, arg1};

#ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args, 2, range, 2))
    return;
#endif

  if (OPS_diags > 1) {
    ops_timing_realloc(2, "initialise_chunk_kernel_zz");
    OPS_kernels[2].count++;
    ops_timers_core(&c2, &t2);
  }

  // compute locally allocated range for the sub-block
  int start[3];
  int end[3];

#ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
#endif
#ifdef OPS_DEBUG
  ops_register_args(args, "initialise_chunk_kernel_zz");
#endif

  int arg_idx[3];
  int arg_idx_base[3];
#ifdef OPS_MPI
  if (compute_ranges(args, 2, block, range, start, end, arg_idx) < 0)
    return;
#else  // OPS_MPI
  for (int n = 0; n < 3; n++) {
    start[n] = range[2 * n];
    end[n] = range[2 * n + 1];
    arg_idx[n] = start[n];
  }
#endif // OPS_MPI
  for (int n = 0; n < 3; n++) {
    arg_idx_base[n] = arg_idx[n];
  }
  offs[0][0] = args[0].stencil->stride[0] * 1; // unit step in x dimension
  offs[0][1] =
      off3D(1, &start[0], &end[0], args[0].dat->size, args[0].stencil->stride) -
      offs[0][0];
  offs[0][2] =
      off3D(2, &start[0], &end[0], args[0].dat->size, args[0].stencil->stride) -
      offs[0][1] - offs[0][0];

  int off0_0 = offs[0][0];
  int off0_1 = offs[0][1];
  int off0_2 = offs[0][2];
  int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);

  // set up initial pointers and exchange halos if necessary
  int base0 = args[0].dat->base_offset +
              (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
                  start[0] * args[0].stencil->stride[0];
  base0 = base0 +
          (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
              args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
  base0 = base0 +
          (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
              args[0].dat->size[0] * args[0].dat->size[1] * start[2] *
              args[0].stencil->stride[2];
  p_a[0] = (char *)args[0].data + base0;

  p_a[1] = (char *)arg_idx;

  // initialize global variable with the dimension of dats
  xdim0 = args[0].dat->size[0];
  ydim0 = args[0].dat->size[1];

  // Halo Exchanges
  ops_H_D_exchanges_host(args, 2);
  ops_halo_exchanges(args, 2, range);
  ops_H_D_exchanges_host(args, 2);

  if (OPS_diags > 1) {
    ops_timers_core(&c1, &t1);
    OPS_kernels[2].mpi_time += t1 - t2;
  }

  int n_x;
  for (int n_z = start[2]; n_z < end[2]; n_z++) {
    for (int n_y = start[1]; n_y < end[1]; n_y++) {
#pragma novector
      for (n_x = start[0];
           n_x < start[0] + ((end[0] - start[0]) / SIMD_VEC) * SIMD_VEC;
           n_x += SIMD_VEC) {
        // call kernel function, passing in pointers to data -vectorised
        for (int i = 0; i < SIMD_VEC; i++) {
          initialise_chunk_kernel_zz((int *)p_a[0] + i * 0 * 1, (int *)p_a[1]);

          arg_idx[0]++;
        }

        // shift pointers to data x direction
        p_a[0] = p_a[0] + (dat0 * off0_0) * SIMD_VEC;
      }

      for (int n_x = start[0] + ((end[0] - start[0]) / SIMD_VEC) * SIMD_VEC;
           n_x < end[0]; n_x++) {
        // call kernel function, passing in pointers to data - remainder
        initialise_chunk_kernel_zz((int *)p_a[0], (int *)p_a[1]);

        // shift pointers to data x direction
        p_a[0] = p_a[0] + (dat0 * off0_0);
        arg_idx[0]++;
      }

      // shift pointers to data y direction
      p_a[0] = p_a[0] + (dat0 * off0_1);
      arg_idx[0] = arg_idx_base[0];
      arg_idx[1]++;
    }
    // shift pointers to data z direction
    p_a[0] = p_a[0] + (dat0 * off0_2);
    arg_idx[0] = arg_idx_base[0];
    arg_idx[1] = arg_idx_base[1];
    arg_idx[2]++;
  }
  if (OPS_diags > 1) {
    ops_timers_core(&c2, &t2);
    OPS_kernels[2].time += t2 - t1;
  }
  ops_set_dirtybit_host(args, 2);
  ops_set_halo_dirtybit3(&args[0], range);

  if (OPS_diags > 1) {
    // Update kernel record
    ops_timers_core(&c1, &t1);
    OPS_kernels[2].mpi_time += t1 - t2;
    OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0);
  }
}