int main(int argc, char *argv[]) { Parameters *params; // user defined parameters double ***phi; // flux array FILE *fp = NULL; // output file // Get inputs params = set_default_params(); parse_params("parameters", params); read_CLI(argc, argv, params); print_params(params); // Initial guess of flux phi = init_flux(params); solve(phi, params); printf("keff = %f\n", params->k); // Write solution if(params->write_flux == TRUE) { write_flux(phi, params, fp); } // Free memory free_flux(phi); free(params); return 0; }
int main( int argc, char * argv[] ) { int version = 4; int mype = 0; #ifdef MPI int nranks; MPI_Status stat; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif #ifdef PAPI papi_serial_init(); #endif srand(time(NULL) * (mype+1)); Input input = set_default_input(); read_CLI( argc, argv, &input ); calculate_derived_inputs( &input ); if( mype == 0 ) logo(version); #ifdef OPENMP omp_set_num_threads(input.nthreads); #endif Params params = build_tracks( &input ); CommGrid grid = init_mpi_grid( input ); if( mype == 0 ) print_input_summary(input); float res; float keff = 1.0; int num_iters = 1; double time_transport = 0; double time_flux_exchange = 0; double time_renormalize_flux = 0; double time_update_sources = 0; double time_compute_keff = 0; double start, stop; if(mype==0) { center_print("SIMULATION", 79); border_print(); } for( int i = 0; i < num_iters; i++) { // Transport Sweep start = get_time(); transport_sweep(¶ms, &input); stop = get_time(); time_transport += stop-start; // Domain Boundary Flux Exchange (MPI) #ifdef MPI start = get_time(); fast_transfer_boundary_fluxes(params, input, grid); stop = get_time(); time_flux_exchange += stop-start; #endif // Flux Renormalization start = get_time(); renormalize_flux(params,input, grid); stop = get_time(); time_renormalize_flux += stop-start; // Update Source Regions start = get_time(); res = update_sources(params, input, keff); stop = get_time(); time_update_sources += stop-start; // Calculate K-Effective start = get_time(); keff = compute_keff(params, input, grid); stop = get_time(); time_compute_keff += stop-start; if( mype == 0 ) printf("keff = %f\n", keff); } double time_total = time_transport + time_flux_exchange + time_renormalize_flux + time_update_sources + time_compute_keff; if( mype == 0 ) { border_print(); center_print("RESULTS SUMMARY", 79); border_print(); printf("Transport Sweep Time: %6.2lf sec (%4.1lf%%)\n", time_transport, 100*time_transport/time_total); printf("Domain Flux Exchange Time: %6.2lf sec (%4.1lf%%)\n", time_flux_exchange, 100*time_flux_exchange/time_total); printf("Flux Renormalization Time: %6.2lf sec (%4.1lf%%)\n", time_renormalize_flux, 100*time_renormalize_flux/time_total); printf("Update Source Time: %6.2lf sec (%4.1lf%%)\n", time_update_sources, 100*time_update_sources/time_total); printf("K-Effective Calc Time: %6.2lf sec (%4.1lf%%)\n", time_compute_keff, 100*time_compute_keff/time_total); printf("Total Time: %6.2lf sec\n", time_total); } long tracks_per_second = 2 * input.ntracks/time_transport; #ifdef MPI MPI_Barrier(grid.cart_comm_3d); long global_tps = 0; MPI_Reduce( &tracks_per_second, // Send Buffer &global_tps, // Receive Buffer 1, // Element Count MPI_LONG, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator MPI_Barrier(grid.cart_comm_3d); tracks_per_second = global_tps; #endif if( mype == 0 ) { printf("Time per Intersection: "); printf("%.2lf ns\n", time_per_intersection( input, time_transport )); border_print(); } free_2D_tracks( params.tracks_2D ); free_tracks( params.tracks ); #ifdef MPI MPI_Finalize(); #endif return 0; }
int main( int argc, char* argv[] ) { // ===================================================================== // Initialization & Command Line Read-In // ===================================================================== int version = 13; int mype = 0; int max_procs = omp_get_num_procs(); int i, thread, mat; unsigned long seed; double omp_start, omp_end, p_energy; unsigned long long vhash = 0; int nprocs; #ifdef MPI MPI_Status stat; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif // rand() is only used in the serial initialization stages. // A custom RNG is used in parallel portions. #ifdef VERIFICATION srand(26); #else srand(time(NULL)); #endif // Process CLI Fields -- store in "Inputs" structure Inputs in = read_CLI( argc, argv ); // Set number of OpenMP Threads omp_set_num_threads(in.nthreads); // Print-out of Input Summary if( mype == 0 ) print_inputs( in, nprocs, version ); // ===================================================================== // Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data // ===================================================================== // Allocate & fill energy grids #ifndef BINARY_READ if( mype == 0) printf("Generating Nuclide Energy Grids...\n"); #endif NuclideGridPoint ** nuclide_grids = gpmatrix(in.n_isotopes,in.n_gridpoints); #ifdef VERIFICATION generate_grids_v( nuclide_grids, in.n_isotopes, in.n_gridpoints ); #else generate_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints ); #endif // Sort grids by energy #ifndef BINARY_READ if( mype == 0) printf("Sorting Nuclide Energy Grids...\n"); sort_nuclide_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints ); #endif // Prepare Unionized Energy Grid Framework #ifndef BINARY_READ GridPoint * energy_grid = generate_energy_grid( in.n_isotopes, in.n_gridpoints, nuclide_grids ); #else GridPoint * energy_grid = (GridPoint *)malloc( in.n_isotopes * in.n_gridpoints * sizeof( GridPoint ) ); int * index_data = (int *) malloc( in.n_isotopes * in.n_gridpoints * in.n_isotopes * sizeof(int)); for( i = 0; i < in.n_isotopes*in.n_gridpoints; i++ ) energy_grid[i].xs_ptrs = &index_data[i*in.n_isotopes]; #endif // Double Indexing. Filling in energy_grid with pointers to the // nuclide_energy_grids. #ifndef BINARY_READ set_grid_ptrs( energy_grid, nuclide_grids, in.n_isotopes, in.n_gridpoints ); #endif #ifdef BINARY_READ if( mype == 0 ) printf("Reading data from \"XS_data.dat\" file...\n"); binary_read(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid); #endif // Get material data if( mype == 0 ) printf("Loading Mats...\n"); int *num_nucs = load_num_nucs(in.n_isotopes); int **mats = load_mats(num_nucs, in.n_isotopes); #ifdef VERIFICATION double **concs = load_concs_v(num_nucs); #else double **concs = load_concs(num_nucs); #endif #ifdef BINARY_DUMP if( mype == 0 ) printf("Dumping data to binary file...\n"); binary_dump(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid); if( mype == 0 ) printf("Binary file \"XS_data.dat\" written! Exiting...\n"); return 0; #endif // ===================================================================== // Cross Section (XS) Parallel Lookup Simulation Begins // ===================================================================== // Outer benchmark loop can loop through all possible # of threads #ifdef BENCHMARK for( int bench_n = 1; bench_n <=omp_get_num_procs(); bench_n++ ) { in.nthreads = bench_n; omp_set_num_threads(in.nthreads); #endif if( mype == 0 ) { printf("\n"); border_print(); center_print("SIMULATION", 79); border_print(); } omp_start = omp_get_wtime(); //initialize papi with one thread (master) here #ifdef PAPI if ( PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT){ fprintf(stderr, "PAPI library init error!\n"); exit(1); } #endif // OpenMP compiler directives - declaring variables as shared or private #pragma omp parallel default(none) \ private(i, thread, p_energy, mat, seed) \ shared( max_procs, in, energy_grid, nuclide_grids, \ mats, concs, num_nucs, mype, vhash) { // Initialize parallel PAPI counters #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events); } #endif double macro_xs_vector[5]; double * xs = (double *) calloc(5, sizeof(double)); // Initialize RNG seeds for threads thread = omp_get_thread_num(); seed = (thread+1)*19+17; // XS Lookup Loop #pragma omp for schedule(dynamic) for( i = 0; i < in.lookups; i++ ) { // Status text if( INFO && mype == 0 && thread == 0 && i % 1000 == 0 ) printf("\rCalculating XS's... (%.0lf%% completed)", (i / ( (double)in.lookups / (double) in.nthreads )) / (double) in.nthreads * 100.0); // Randomly pick an energy and material for the particle #ifdef VERIFICATION #pragma omp critical { p_energy = rn_v(); mat = pick_mat(&seed); } #else p_energy = rn(&seed); mat = pick_mat(&seed); #endif // debugging //printf("E = %lf mat = %d\n", p_energy, mat); // This returns the macro_xs_vector, but we're not going // to do anything with it in this program, so return value // is written over. calculate_macro_xs( p_energy, mat, in.n_isotopes, in.n_gridpoints, num_nucs, concs, energy_grid, nuclide_grids, mats, macro_xs_vector ); // Copy results from above function call onto heap // so that compiler cannot optimize function out // (only occurs if -flto flag is used) memcpy(xs, macro_xs_vector, 5*sizeof(double)); // Verification hash calculation // This method provides a consistent hash accross // architectures and compilers. #ifdef VERIFICATION char line[256]; sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf", p_energy, mat, macro_xs_vector[0], macro_xs_vector[1], macro_xs_vector[2], macro_xs_vector[3], macro_xs_vector[4]); unsigned long long vhash_local = hash(line, 10000); #pragma omp atomic vhash += vhash_local; #endif } // Prints out thread local PAPI counters #ifdef PAPI if( mype == 0 && thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events); #endif } #ifndef PAPI if( mype == 0) { printf("\n" ); printf("Simulation complete.\n" ); } #endif omp_end = omp_get_wtime(); // Print / Save Results and Exit print_results( in, mype, omp_end-omp_start, nprocs, vhash ); #ifdef BENCHMARK } #endif #ifdef MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { Parameters *parameters; // user defined parameters Geometry *geometry; // homogenous cube geometry Material *material; // problem material Bank *source_bank; // array for particle source sites Tally *tally; // scalar flux tally double *keff; // effective multiplication factor double t1, t2; // timers #ifdef _OPENMP unsigned long counter = 0; //counter to decide the start pos of master bank copy from sub banks Bank *g_fission_bank; //global fission bank #endif // Get inputs: set parameters to default values, parse parameter file, // override with any command line inputs, and print parameters parameters = init_parameters(); parse_parameters(parameters); read_CLI(argc, argv, parameters); print_parameters(parameters); // Set initial RNG seed set_initial_seed(parameters->seed); set_stream(STREAM_INIT); // Create files for writing results to init_output(parameters); // Set up geometry geometry = init_geometry(parameters); // Set up material material = init_material(parameters); // Set up tallies tally = init_tally(parameters); // Create source bank and initial source distribution source_bank = init_source_bank(parameters, geometry); // Create fission bank #ifdef _OPENMP omp_set_num_threads(parameters->n_threads); // Set number of openmp threads printf("threads num: %d\n", parameters->n_threads); // Allocate one master fission bank g_fission_bank = init_bank(2*parameters->n_particles); #endif // Set up array for k effective keff = calloc(parameters->n_active, sizeof(double)); center_print("SIMULATION", 79); border_print(); printf("%-15s %-15s %-15s\n", "BATCH", "KEFF", "MEAN KEFF"); #ifdef _OPENMP // Start time t1 = omp_get_wtime(); run_eigenvalue(counter, g_fission_bank, parameters, geometry, material, source_bank, fission_bank, tally, keff); // Stop time t2 = omp_get_wtime(); #endif printf("Simulation time: %f secs\n", t2-t1); // Free memory #ifdef _OPENMP free_bank(g_fission_bank); #endif free(keff); free_tally(tally); free_bank(source_bank); free_material(material); free(geometry); free(parameters); return 0; }
int main(int argc, char *argv[]) { Parameters *parameters; // user defined parameters Geometry *geometry; // homogenous cube geometry Material *material; // problem material Bank *source_bank; // array for particle source sites Bank *fission_bank; // array for particle fission sites Tally *tally; // scalar flux tally double *keff; // effective multiplication factor double t1, t2; // timers // Get inputs: set parameters to default values, parse parameter file, // override with any command line inputs, and print parameters parameters = init_parameters(); parse_parameters(parameters); read_CLI(argc, argv, parameters); print_parameters(parameters); // Set initial RNG seed set_initial_seed(parameters->seed); set_stream(STREAM_OTHER); // Create files for writing results to init_output(parameters); // Set up geometry geometry = init_geometry(parameters); // Set up material material = init_material(parameters); // Set up tallies tally = init_tally(parameters); // Create source bank and initial source distribution source_bank = init_source_bank(parameters, geometry); // Create fission bank fission_bank = init_fission_bank(parameters); // Set up array for k effective keff = calloc(parameters->n_active, sizeof(double)); center_print("SIMULATION", 79); border_print(); printf("%-15s %-15s %-15s %-15s\n", "BATCH", "ENTROPY", "KEFF", "MEAN KEFF"); // Start time t1 = timer(); run_eigenvalue(parameters, geometry, material, source_bank, fission_bank, tally, keff); // Stop time t2 = timer(); printf("Simulation time: %f secs\n", t2-t1); // Free memory free(keff); free_tally(tally); free_bank(fission_bank); free_bank(source_bank); free_material(material); free(geometry); free(parameters); return 0; }