int main(int argc, char **argv) { double t_start, t_end; int *ptr_gen_array, elem_per_node; float *local_array; int i, num_procs, local_rank, name_len; char proc_name[MPI_MAX_PROCESSOR_NAME]; MPI_Comm comm_new; gen_time = 0.0; proc_time = 0.0; comm_time = 0.0; total_time = 0.0; // Parse the arguments if( parse_arguments(argc, argv) ) return 1; // Initialize MPI MPI_Init(&argc, &argv); MPI_Get_processor_name(proc_name, &name_len); // Initially create the topology if( type == ring ) { create_ring_topology(&comm_new, &local_rank, &num_procs); } else { create_2dmesh_topology(&comm_new, &local_rank, &num_procs); } t_start = MPI_Wtime(); if( type == ring ) { local_array = generate_2d(&comm_new, local_rank, num_procs, proc_name, &elem_per_node); } else { local_array = generate_mesh(&comm_new, local_rank, num_procs, proc_name, &elem_per_node); } if( type == ring ) { one_d_partitioning(&comm_new, local_array, local_rank, num_procs); } else { two_d_partitioning(&comm_new, local_array, local_rank, num_procs); } t_end = MPI_Wtime(); total_time = t_end - t_start; if( computerStats ) { printf("%d\tg\t%s\t%d\t%f\n", n, s_local_coords, num_procs, gen_time); printf("%d\tp\t%s\t%d\t%f\n", n, s_local_coords, num_procs, proc_time); printf("%d\tc\t%s\t%d\t%f\n", n, s_local_coords, num_procs, comm_time); printf("%d\tt\t%s\t%d\t%f\n", n, s_local_coords, num_procs, total_time); } free(local_array); MPI_Comm_free(&comm_new); MPI_Finalize(); // Exit MPI return 0; }
virtual std::vector<ParametersPackedRealInplaceInterleaved> & parameter_sets (clfftDim dimension, clfftDirection direction, clfftPrecision precision) { supported_length_data(); switch (dimension) { case CLFFT_1D: generate_1d(direction, precision); break; case CLFFT_2D: generate_2d(direction, precision); break; case CLFFT_3D: generate_3d(direction, precision); break; } return data_sets; }