/** This will set up the MPE logging event numbers. * * @param my_rank the rank of the processor running the code. * @param event_num array of MPE event numbers. * * @return 0 for success, non-zero for failure. */ int init_logging(int my_rank, int event_num[][NUM_EVENTS]) { #ifdef HAVE_MPE /* Get a bunch of event numbers. */ event_num[START][INIT] = MPE_Log_get_event_number(); event_num[END][INIT] = MPE_Log_get_event_number(); event_num[START][CREATE_PNETCDF] = MPE_Log_get_event_number(); event_num[END][CREATE_PNETCDF] = MPE_Log_get_event_number(); event_num[START][CREATE_CLASSIC] = MPE_Log_get_event_number(); event_num[END][CREATE_CLASSIC] = MPE_Log_get_event_number(); event_num[START][CREATE_SERIAL4] = MPE_Log_get_event_number(); event_num[END][CREATE_SERIAL4] = MPE_Log_get_event_number(); event_num[START][CREATE_PARALLEL4] = MPE_Log_get_event_number(); event_num[END][CREATE_PARALLEL4] = MPE_Log_get_event_number(); event_num[START][CALCULATE] = MPE_Log_get_event_number(); event_num[END][CALCULATE] = MPE_Log_get_event_number(); event_num[START][WRITE] = MPE_Log_get_event_number(); event_num[END][WRITE] = MPE_Log_get_event_number(); event_num[START][CLOSE] = MPE_Log_get_event_number(); event_num[END][CLOSE] = MPE_Log_get_event_number(); event_num[START][FREE] = MPE_Log_get_event_number(); event_num[END][FREE] = MPE_Log_get_event_number(); event_num[START][READ] = MPE_Log_get_event_number(); event_num[END][READ] = MPE_Log_get_event_number(); /* You should track at least initialization and partitioning, data * ingest, update computation, all communications, any memory * copies (if you do that), any output rendering, and any global * communications. */ if (!my_rank) { MPE_Describe_state(event_num[START][INIT], event_num[END][INIT], "init", "yellow"); MPE_Describe_state(event_num[START][CREATE_PNETCDF], event_num[END][CREATE_PNETCDF], "create pnetcdf", "red"); MPE_Describe_state(event_num[START][CREATE_CLASSIC], event_num[END][CREATE_CLASSIC], "create classic", "red"); MPE_Describe_state(event_num[START][CREATE_SERIAL4], event_num[END][CREATE_SERIAL4], "create netcdf-4 serial", "red"); MPE_Describe_state(event_num[START][CREATE_PARALLEL4], event_num[END][CREATE_PARALLEL4], "create netcdf-4 parallel", "red"); MPE_Describe_state(event_num[START][CALCULATE], event_num[END][CALCULATE], "calculate", "orange"); MPE_Describe_state(event_num[START][WRITE], event_num[END][WRITE], "write", "green"); MPE_Describe_state(event_num[START][CLOSE], event_num[END][CLOSE], "close", "purple"); MPE_Describe_state(event_num[START][FREE], event_num[END][FREE], "free", "blue"); MPE_Describe_state(event_num[START][READ], event_num[END][READ], "read", "pink"); } #endif /* HAVE_MPE */ return 0; }
static int PyMPELog_NewState(int commID, const char name[], const char color[], const char format[], int stateID[2]) { int ierr = 0; #if HAVE_MPE MPI_Comm comm = PyMPELog_GetComm(commID); if (comm == MPI_COMM_NULL) return 0; #if MPE_VERSION==2 ierr = MPE_Log_get_state_eventIDs(&stateID[0], &stateID[1]); if (ierr == -99999) { ierr = 0; stateID[0] = stateID[1] = -99999; } if (ierr != 0) return ierr; ierr = MPE_Describe_comm_state(comm, stateID[0], stateID[1], name, color, format); #else stateID[0] = MPE_Log_get_event_number(); stateID[1] = MPE_Log_get_event_number(); ierr = MPE_Describe_state(stateID[0], stateID[1], (char *)name, (char *)color); #endif #endif /* HAVE_MPE */ return ierr; }
int MPI_Init(int *argc, char*** argv) { int wynik=PMPI_Init(argc,argv); MPE_Init_log(); int proc; MPI_Comm_rank(MPI_COMM_WORLD,&proc); if(proc==0) { MPE_Describe_state(START_BCAST,END_BCAST,"broadcast","red"); MPE_Describe_state(START_SEND,END_SEND,"send","blue"); MPE_Describe_state(START_RECV,END_RECV,"recv","green"); }; MPE_Start_log(); return wynik; };
MPELogger::MPEEvent MPELogger::newEvent(const std::string& name, const std::string& color) { MPELogger::MPEEvent evnt; #ifdef ENABLE_MPE evnt.start = MPE_Log_get_event_number(); evnt.end = MPE_Log_get_event_number(); MPE_Describe_state(evnt.start, evnt.end, name.c_str(), color.c_str()); #endif return evnt; }
static Int /* mpe_create_state(+Event,+Event,+Text,+Colour) */ p_create_state() { Term t_start = Deref(ARG1), t_end = Deref(ARG2), t_descr = Deref(ARG3), t_colour = Deref(ARG4); Int start_id, end_id; char *descr, *colour; int retv; /* The first and second args must be bount to integer event IDs. */ if (IsVarTerm(t_start)) { Yap_Error(INSTANTIATION_ERROR, t_start, "mpe_create_state"); return (FALSE); } else if( !IsIntegerTerm(t_start) ) { Yap_Error(TYPE_ERROR_INTEGER, t_start, "mpe_create_state"); return (FALSE); } else { start_id = IntOfTerm(t_start); } if (IsVarTerm(t_end)) { Yap_Error(INSTANTIATION_ERROR, t_end, "mpe_create_state"); return (FALSE); } else if( !IsIntegerTerm(t_end) ) { Yap_Error(TYPE_ERROR_INTEGER, t_end, "mpe_create_state"); return (FALSE); } else { end_id = IntOfTerm(t_end); } /* The third and fourth args must be bound to atoms. */ if (IsVarTerm(t_descr)) { Yap_Error(INSTANTIATION_ERROR, t_descr, "mpe_create_state"); return (FALSE); } else if( !IsAtomTerm(t_descr) ) { Yap_Error(TYPE_ERROR_ATOM, t_descr, "mpe_create_state"); return (FALSE); } else { descr = RepAtom(AtomOfTerm(t_descr))->StrOfAE; } if (IsVarTerm(t_colour)) { Yap_Error(INSTANTIATION_ERROR, t_colour, "mpe_create_state"); return (FALSE); } else if( !IsAtomTerm(t_colour) ) { Yap_Error(TYPE_ERROR_ATOM, t_colour, "mpe_create_state"); return (FALSE); } else { colour = RepAtom(AtomOfTerm(t_colour))->StrOfAE; } retv = MPE_Describe_state( (int)start_id, (int)end_id, descr, colour ); return (retv == 0); }
/*@C EventRegLogRegister - Registers an event for logging operations in an application code. Not Collective Input Parameters: + eventLog - The EventLog . ename - The name associated with the event - classid - The classid associated to the class for this event Output Parameter: . event - The event Example of Usage: .vb int USER_EVENT; PetscLogDouble user_event_flops; PetscLogEventRegister("User event name",0,&USER_EVENT); PetscLogEventBegin(USER_EVENT,0,0,0,0); [code segment to monitor] PetscLogFlops(user_event_flops); PetscLogEventEnd(USER_EVENT,0,0,0,0); .ve Notes: PETSc automatically logs library events if the code has been compiled with -DPETSC_USE_LOG (which is the default) and -log, -log_summary, or -log_all are specified. PetscLogEventRegister() is intended for logging user events to supplement this PETSc information. PETSc can gather data for use with the utilities Jumpshot (part of the MPICH distribution). If PETSc has been compiled with flag -DPETSC_HAVE_MPE (MPE is an additional utility within MPICH), the user can employ another command line option, -log_mpe, to create a logfile, "mpe.log", which can be visualized Jumpshot. Level: developer .keywords: log, event, register .seealso: PetscLogEventBegin(), PetscLogEventEnd(), PetscLogFlops(), PetscLogEventMPEActivate(), PetscLogEventMPEDeactivate(), EventLogActivate(), EventLogDeactivate() @*/ PetscErrorCode EventRegLogRegister(PetscEventRegLog eventLog, const char ename[], PetscClassId classid, PetscLogEvent *event) { PetscEventRegInfo *eventInfo; char *str; int e; PetscErrorCode ierr; PetscFunctionBegin; PetscValidCharPointer(ename,2); PetscValidIntPointer(event,4); /* Should check classid I think */ e = eventLog->numEvents++; if (eventLog->numEvents > eventLog->maxEvents) { ierr = PetscMalloc1(eventLog->maxEvents*2, &eventInfo); CHKERRQ(ierr); ierr = PetscMemcpy(eventInfo, eventLog->eventInfo, eventLog->maxEvents * sizeof(PetscEventRegInfo)); CHKERRQ(ierr); ierr = PetscFree(eventLog->eventInfo); CHKERRQ(ierr); eventLog->eventInfo = eventInfo; eventLog->maxEvents *= 2; } ierr = PetscStrallocpy(ename, &str); CHKERRQ(ierr); eventLog->eventInfo[e].name = str; eventLog->eventInfo[e].classid = classid; #if defined(PETSC_HAVE_MPE) if (PetscLogPLB == PetscLogEventBeginMPE) { const char *color; PetscMPIInt rank; int beginID, endID; beginID = MPE_Log_get_event_number(); endID = MPE_Log_get_event_number(); eventLog->eventInfo[e].mpe_id_begin = beginID; eventLog->eventInfo[e].mpe_id_end = endID; ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank); CHKERRQ(ierr); if (!rank) { ierr = PetscLogMPEGetRGBColor(&color); CHKERRQ(ierr); MPE_Describe_state(beginID, endID, str, (char*)color); } } #endif *event = e; PetscFunctionReturn(0); }
/** Sets up user_state events This does nothing if MPE is not enabled */ static void setup_mpe_events(int num_types, int* types) { #ifdef ENABLE_MPE PMPI_Comm_rank(MPI_COMM_WORLD,&my_log_rank); user_state_start = malloc(num_types * sizeof(int)); user_state_end = malloc(num_types * sizeof(int)); for (int i = 0; i < num_types; i++) { MPE_Log_get_state_eventIDs(&user_state_start[i], &user_state_end[i]); if ( my_log_rank == 0 ) { sprintf(user_state_description,"user_state_%d", types[i]); MPE_Describe_state(user_state_start[i], user_state_end[i], user_state_description, "MPE_CHOOSE_COLOR"); } } #endif }
int main(int argc, char** argv) { int my_rank; /* My process rank */ int p; /* The number of processes */ float a = 0.0; /* Left endpoint */ float b = 30.0; /* Right endpoint */ long int n = 10000000; /* Number of trapezoids */ double h; /* Trapezoid base length */ float local_a; /* Left endpoint my process */ float local_b; /* Right endpoint my process */ long int local_n; /* Number of trapezoids for */ /* my calculation */ long double integral; /* Integral over my interval */ long double total_integral; /* Total integral */ int source; /* Process sending integral */ int dest = 0; /* All messages go to 0 */ int tag = 0; MPI_Status status; double startTime, endTime, timeDifference; /* Let the system do what it needs to start up MPI */ MPI_Init(&argc, &argv); /* Get my process rank */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); /* Find out how many processes are being used */ MPI_Comm_size(MPI_COMM_WORLD, &p); // Initializing the log, after communication MPE_Init_log(); int event1a = MPE_Log_get_event_number(); int event1b = MPE_Log_get_event_number(); int event2a = MPE_Log_get_event_number(); int event2b = MPE_Log_get_event_number(); int event3a = MPE_Log_get_event_number(); int event3b = MPE_Log_get_event_number(); MPE_Describe_state(event1a, event1b, "Receive", "blue"); MPE_Describe_state(event2a, event2b, "Send", "yellow"); MPE_Describe_state(event3a, event3b, "Compute", "red"); // Starting to log MPE_Start_log(); if (my_rank==0) startTime=MPI_Wtime(); local_n = n / p; /* So is the number of trapezoids */ /* Length of each process' interval of integration = local_n * h. * So my interval starts at: */ MPE_Log_event(event3a, 0, "start compute"); integral = throwNeedles(local_n); MPE_Log_event(event3b, 0, "end compute"); /* Add up the integrals calculated by each process */ if (my_rank == 0) { total_integral = integral; for (source=1; source<p; source++) { MPE_Log_event(event1a, 0, "Start to receive"); MPI_Recv(&integral, 1, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status); MPE_Log_event(event1b, 0, "Recieved"); total_integral = total_integral + integral; } } else { MPE_Log_event(event2a, 0, "Start to Send"); MPI_Send(&integral, 1, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD); MPE_Log_event(event2b, 0, "Sent"); } if (my_rank==0) { endTime = MPI_Wtime(); timeDifference = endTime - startTime; } /* Print the result */ if(my_rank == 0) { printf("The real value of PI is 3.141592653589793238462643\n"); printf("our estimate of the value of PI is %.25Lf\n", total_integral/p); printf("Time taken for whole computation = %f seconds\n", timeDifference); } // Before finalize MPE_Finish_log(argv[1]); /* Shut down MPI */ MPI_Finalize(); return 0; } /* main */
int main( int argc, char *argv[] ) { int n, myid, numprocs, ii, jj; double PI25DT = 3.141592653589793238462643; double mypi, pi, h, sum, x; double startwtime = 0.0, endwtime; int namelen; int event1a, event1b, event2a, event2b, event3a, event3b, event4a, event4b; char processor_name[ MPI_MAX_PROCESSOR_NAME ]; MPE_LOG_BYTES bytebuf; int bytebuf_pos; MPI_Init( &argc, &argv ); MPI_Pcontrol( 0 ); MPI_Comm_size( MPI_COMM_WORLD, &numprocs ); MPI_Comm_rank( MPI_COMM_WORLD, &myid ); MPI_Get_processor_name( processor_name, &namelen ); fprintf( stderr, "Process %d running on %s\n", myid, processor_name ); /* MPE_Init_log() & MPE_Finish_log() are NOT needed when liblmpe.a is linked with this program. In that case, MPI_Init() would have called MPE_Init_log() already. */ #if defined( NO_MPI_LOGGING ) MPE_Init_log(); #endif /* Get event ID from MPE, user should NOT assign event ID directly */ event1a = MPE_Log_get_event_number(); event1b = MPE_Log_get_event_number(); event2a = MPE_Log_get_event_number(); event2b = MPE_Log_get_event_number(); event3a = MPE_Log_get_event_number(); event3b = MPE_Log_get_event_number(); event4a = MPE_Log_get_event_number(); event4b = MPE_Log_get_event_number(); if ( myid == 0 ) { MPE_Describe_state( event1a, event1b, "Broadcast", "red" ); MPE_Describe_info_state( event2a, event2b, "Sync", "orange", "source = %s()'s line %d." ); MPE_Describe_info_state( event3a, event3b, "Compute", "blue", "mypi = %E computed at iteration %d." ); MPE_Describe_info_state( event4a, event4b, "Reduce", "green", "final pi = %E at iteration %d." ); } if ( myid == 0 ) { n = 1000000; startwtime = MPI_Wtime(); } MPI_Barrier( MPI_COMM_WORLD ); MPI_Pcontrol( 1 ); /* MPE_Start_log(); */ for ( jj = 0; jj < ITER_COUNT; jj++ ) { MPE_Log_event( event1a, 0, NULL ); MPI_Bcast( &n, 1, MPI_INT, 0, MPI_COMM_WORLD ); MPE_Log_event( event1b, 0, NULL ); MPE_Log_event( event2a, 0, NULL ); MPI_Barrier( MPI_COMM_WORLD ); int line_num; bytebuf_pos = 0; MPE_Log_pack( bytebuf, &bytebuf_pos, 's', sizeof(__func__)-1, __func__ ); line_num = __LINE__; MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &line_num ); MPE_Log_event( event2b, 0, bytebuf ); MPE_Log_event( event3a, 0, NULL ); h = 1.0 / (double) n; sum = 0.0; for ( ii = myid + 1; ii <= n; ii += numprocs ) { x = h * ((double)ii - 0.5); sum += f(x); } mypi = h * sum; bytebuf_pos = 0; MPE_Log_pack( bytebuf, &bytebuf_pos, 'E', 1, &mypi ); MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &jj ); MPE_Log_event( event3b, 0, bytebuf ); pi = 0.0; MPE_Log_event( event4a, 0, NULL ); MPI_Reduce( &mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); bytebuf_pos = 0; MPE_Log_pack( bytebuf, &bytebuf_pos, 'E', 1, &pi ); MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &jj ); MPE_Log_event( event4b, 0, bytebuf ); } #if defined( NO_MPI_LOGGING ) if ( argv != NULL ) MPE_Finish_log( argv[0] ); else MPE_Finish_log( "cpilog" ); #endif if ( myid == 0 ) { endwtime = MPI_Wtime(); printf( "pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT) ); printf( "wall clock time = %f\n", endwtime-startwtime ); } MPI_Finalize(); return( 0 ); }
int main(int argc, char *argv[]) { int nproc; // mpi groupsize // init MPI_Init(&argc, &argv); #ifdef MPE MPE_Log_get_state_eventIDs(&compute_begin, &compute_end); MPE_Describe_state(compute_begin, compute_end, "Compute", "red"); #endif GetArgs(argc, argv); // deleted TP 10/12/12 // #ifdef USE_BIL // BIL_Init(MPI_COMM_WORLD); // #endif MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); Init(); // run MPI_Barrier(MPI_COMM_WORLD); TotTime = MPI_Wtime(); Run(MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); TotTime = MPI_Wtime() - TotTime; // print the performance stats //parflow->PrintPerf(TotTime, TotInTime, TotOutTime, // TotCompCommTime, TotParticles, size); //#ifdef GRAPHICS #if 0 if (rank == 0) { VECTOR3 min, max; min = VECTOR3(0.0f, 0.0f, 0.0f); max = VECTOR3((float)(size[0] - 1), (float)(size[1] - 1), (float)(size[2] - 1)); DrawInit(pt, npt, tot_ntrace, argc, argv, min, max, 0); } #endif // Gather traces { // synchronize prior to gathering MPI_Barrier(MPI_COMM_WORLD); int *ntrace = NULL; // number of traces for each proc int n; // total number of my points // gather number of points in each trace at the root => ntrace int all_gather = 0; // 0: only root collects the data n = parflow->GatherNumPts(ntrace, all_gather, nblocks); // gather the actual points in each trace at the root parflow->GatherPts(ntrace, n, nblocks); MPI_Barrier(MPI_COMM_WORLD); if (ntrace) delete[] ntrace; } #if 1 if (rank==0) { PathlineLoader trace("field_lines.out"); trace.connectTraces(); trace.dump(); } #else // The direct printout does not have fixed order if (rank==0) { printf("Traces=%d\n", tot_ntrace); int i,j, c=0; for (i=0; i<tot_ntrace; i++) { for (j=0; j<npt[i]; j++) { VECTOR4 &v = pt[c++]; printf("%f %f %f %f, ", v[0], v[1], v[2], v[3]); } printf("\n"); } } #endif //printf("cleaning up\n"); Cleanup(); MPI_Barrier(MPI_COMM_WORLD); // edited TP 10/12/12 // #ifdef USE_BIL // BIL_Finalize(); // #endif //printf("DIY_Finalize\n"); DIY_Finalize(); // end TP MPI_Finalize(); }
int gmx_setup(int *argc,char **argv,int *nnodes) { #ifndef GMX_MPI gmx_call("gmx_setup"); return 0; #else char buf[256]; int resultlen; /* actual length of node name */ int i,flag; int mpi_num_nodes; int mpi_my_rank; char mpi_hostname[MPI_MAX_PROCESSOR_NAME]; /* Call the MPI routines */ (void) MPI_Init(argc,&argv); (void) MPI_Comm_size( MPI_COMM_WORLD, &mpi_num_nodes ); (void) MPI_Comm_rank( MPI_COMM_WORLD, &mpi_my_rank ); (void) MPI_Get_processor_name( mpi_hostname, &resultlen ); #ifdef USE_MPE /* MPE logging routines. Get event IDs from MPE: */ /* General events */ ev_timestep1 = MPE_Log_get_event_number( ); ev_timestep2 = MPE_Log_get_event_number( ); ev_force_start = MPE_Log_get_event_number( ); ev_force_finish = MPE_Log_get_event_number( ); ev_do_fnbf_start = MPE_Log_get_event_number( ); ev_do_fnbf_finish = MPE_Log_get_event_number( ); ev_ns_start = MPE_Log_get_event_number( ); ev_ns_finish = MPE_Log_get_event_number( ); ev_calc_bonds_start = MPE_Log_get_event_number( ); ev_calc_bonds_finish = MPE_Log_get_event_number( ); ev_global_stat_start = MPE_Log_get_event_number( ); ev_global_stat_finish = MPE_Log_get_event_number( ); ev_virial_start = MPE_Log_get_event_number( ); ev_virial_finish = MPE_Log_get_event_number( ); /* Shift related events */ ev_shift_start = MPE_Log_get_event_number( ); ev_shift_finish = MPE_Log_get_event_number( ); ev_unshift_start = MPE_Log_get_event_number( ); ev_unshift_finish = MPE_Log_get_event_number( ); ev_mk_mshift_start = MPE_Log_get_event_number( ); ev_mk_mshift_finish = MPE_Log_get_event_number( ); /* PME related events */ ev_pme_start = MPE_Log_get_event_number( ); ev_pme_finish = MPE_Log_get_event_number( ); ev_spread_on_grid_start = MPE_Log_get_event_number( ); ev_spread_on_grid_finish = MPE_Log_get_event_number( ); ev_sum_qgrid_start = MPE_Log_get_event_number( ); ev_sum_qgrid_finish = MPE_Log_get_event_number( ); ev_gmxfft3d_start = MPE_Log_get_event_number( ); ev_gmxfft3d_finish = MPE_Log_get_event_number( ); ev_solve_pme_start = MPE_Log_get_event_number( ); ev_solve_pme_finish = MPE_Log_get_event_number( ); ev_gather_f_bsplines_start = MPE_Log_get_event_number( ); ev_gather_f_bsplines_finish= MPE_Log_get_event_number( ); ev_reduce_start = MPE_Log_get_event_number( ); ev_reduce_finish = MPE_Log_get_event_number( ); ev_rscatter_start = MPE_Log_get_event_number( ); ev_rscatter_finish = MPE_Log_get_event_number( ); ev_alltoall_start = MPE_Log_get_event_number( ); ev_alltoall_finish = MPE_Log_get_event_number( ); ev_pmeredist_start = MPE_Log_get_event_number( ); ev_pmeredist_finish = MPE_Log_get_event_number( ); ev_init_pme_start = MPE_Log_get_event_number( ); ev_init_pme_finish = MPE_Log_get_event_number( ); ev_send_coordinates_start = MPE_Log_get_event_number( ); ev_send_coordinates_finish = MPE_Log_get_event_number( ); ev_update_fr_start = MPE_Log_get_event_number( ); ev_update_fr_finish = MPE_Log_get_event_number( ); ev_clear_rvecs_start = MPE_Log_get_event_number( ); ev_clear_rvecs_finish = MPE_Log_get_event_number( ); ev_update_start = MPE_Log_get_event_number( ); ev_update_finish = MPE_Log_get_event_number( ); ev_output_start = MPE_Log_get_event_number( ); ev_output_finish = MPE_Log_get_event_number( ); ev_sum_lrforces_start = MPE_Log_get_event_number( ); ev_sum_lrforces_finish = MPE_Log_get_event_number( ); ev_sort_start = MPE_Log_get_event_number( ); ev_sort_finish = MPE_Log_get_event_number( ); ev_sum_qgrid_start = MPE_Log_get_event_number( ); ev_sum_qgrid_finish = MPE_Log_get_event_number( ); /* Essential dynamics related events */ ev_edsam_start = MPE_Log_get_event_number( ); ev_edsam_finish = MPE_Log_get_event_number( ); ev_get_coords_start = MPE_Log_get_event_number( ); ev_get_coords_finish = MPE_Log_get_event_number( ); ev_ed_apply_cons_start = MPE_Log_get_event_number( ); ev_ed_apply_cons_finish = MPE_Log_get_event_number( ); ev_fit_to_reference_start = MPE_Log_get_event_number( ); ev_fit_to_reference_finish = MPE_Log_get_event_number( ); /* describe events: */ if ( mpi_my_rank == 0 ) { /* General events */ MPE_Describe_state(ev_timestep1, ev_timestep2, "timestep START", "magenta" ); MPE_Describe_state(ev_force_start, ev_force_finish, "force", "cornflower blue" ); MPE_Describe_state(ev_do_fnbf_start, ev_do_fnbf_finish, "do_fnbf", "navy" ); MPE_Describe_state(ev_ns_start, ev_ns_finish, "neighbor search", "tomato" ); MPE_Describe_state(ev_calc_bonds_start, ev_calc_bonds_finish, "bonded forces", "slate blue" ); MPE_Describe_state(ev_global_stat_start, ev_global_stat_finish, "global stat", "firebrick3"); MPE_Describe_state(ev_update_fr_start, ev_update_fr_finish, "update forcerec", "goldenrod"); MPE_Describe_state(ev_clear_rvecs_start, ev_clear_rvecs_finish, "clear rvecs", "bisque"); MPE_Describe_state(ev_update_start, ev_update_finish, "update", "cornsilk"); MPE_Describe_state(ev_output_start, ev_output_finish, "output", "black"); MPE_Describe_state(ev_virial_start, ev_virial_finish, "calc_virial", "thistle4"); /* PME related events */ MPE_Describe_state(ev_pme_start, ev_pme_finish, "doing PME", "grey" ); MPE_Describe_state(ev_spread_on_grid_start, ev_spread_on_grid_finish, "spread", "dark orange" ); MPE_Describe_state(ev_sum_qgrid_start, ev_sum_qgrid_finish, "sum qgrid", "slate blue"); MPE_Describe_state(ev_gmxfft3d_start, ev_gmxfft3d_finish, "fft3d", "snow2" ); MPE_Describe_state(ev_solve_pme_start, ev_solve_pme_finish, "solve PME", "indian red" ); MPE_Describe_state(ev_gather_f_bsplines_start, ev_gather_f_bsplines_finish, "bsplines", "light sea green" ); MPE_Describe_state(ev_reduce_start, ev_reduce_finish, "reduce", "cyan1" ); MPE_Describe_state(ev_rscatter_start, ev_rscatter_finish, "rscatter", "cyan3" ); MPE_Describe_state(ev_alltoall_start, ev_alltoall_finish, "alltoall", "LightCyan4" ); MPE_Describe_state(ev_pmeredist_start, ev_pmeredist_finish, "pmeredist", "thistle" ); MPE_Describe_state(ev_init_pme_start, ev_init_pme_finish, "init PME", "snow4"); MPE_Describe_state(ev_send_coordinates_start, ev_send_coordinates_finish, "send_coordinates","blue"); MPE_Describe_state(ev_sum_lrforces_start, ev_sum_lrforces_finish, "sum_LRforces", "lime green"); MPE_Describe_state(ev_sort_start, ev_sort_finish, "sort pme atoms", "brown"); MPE_Describe_state(ev_sum_qgrid_start, ev_sum_qgrid_finish, "sum charge grid", "medium orchid"); /* Shift related events */ MPE_Describe_state(ev_shift_start, ev_shift_finish, "shift", "orange"); MPE_Describe_state(ev_unshift_start, ev_unshift_finish, "unshift", "dark orange"); MPE_Describe_state(ev_mk_mshift_start, ev_mk_mshift_finish, "mk_mshift", "maroon"); /* Essential dynamics related events */ MPE_Describe_state(ev_edsam_start, ev_edsam_finish, "EDSAM", "deep sky blue"); MPE_Describe_state(ev_get_coords_start, ev_get_coords_finish, "ED get coords", "steel blue"); MPE_Describe_state(ev_ed_apply_cons_start, ev_ed_apply_cons_finish, "ED apply constr", "forest green"); MPE_Describe_state(ev_fit_to_reference_start, ev_fit_to_reference_finish, "ED fit to ref", "lavender"); } MPE_Init_log(); #endif fprintf(stderr,"NNODES=%d, MYRANK=%d, HOSTNAME=%s\n", mpi_num_nodes,mpi_my_rank,mpi_hostname); *nnodes=mpi_num_nodes; return mpi_my_rank; #endif }
void ADIO_Init(int *argc, char ***argv, int *error_code) { #if defined(ROMIO_XFS) || defined(ROMIO_LUSTRE) char *c; #endif ADIOI_UNREFERENCED_ARG(argc); ADIOI_UNREFERENCED_ARG(argv); /* initialize the linked list containing flattened datatypes */ ADIOI_Flatlist = (ADIOI_Flatlist_node *) ADIOI_Malloc(sizeof(ADIOI_Flatlist_node)); ADIOI_Flatlist->type = MPI_DATATYPE_NULL; ADIOI_Flatlist->next = NULL; ADIOI_Flatlist->blocklens = NULL; ADIOI_Flatlist->indices = NULL; #if defined(ROMIO_XFS) || defined(ROMIO_LUSTRE) c = getenv("MPIO_DIRECT_READ"); if (c && (!strcmp(c, "true") || !strcmp(c, "TRUE"))) ADIOI_Direct_read = 1; else ADIOI_Direct_read = 0; c = getenv("MPIO_DIRECT_WRITE"); if (c && (!strcmp(c, "true") || !strcmp(c, "TRUE"))) ADIOI_Direct_write = 1; else ADIOI_Direct_write = 0; #endif /* Assume system-wide hints won't change between runs: move hint processing * from ADIO_Open to here */ /* FIXME should be checking error code from MPI_Info_create here */ MPI_Info_create(&ADIOI_syshints); ADIOI_process_system_hints(ADIOI_syshints); #ifdef ADIOI_MPE_LOGGING { MPE_Log_get_state_eventIDs( &ADIOI_MPE_open_a, &ADIOI_MPE_open_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_read_a, &ADIOI_MPE_read_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_write_a, &ADIOI_MPE_write_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_lseek_a, &ADIOI_MPE_lseek_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_close_a, &ADIOI_MPE_close_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_writelock_a, &ADIOI_MPE_writelock_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_readlock_a, &ADIOI_MPE_readlock_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_unlock_a, &ADIOI_MPE_unlock_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_postwrite_a, &ADIOI_MPE_postwrite_b ); MPE_Log_get_state_eventIDs( &ADIOI_MPE_openinternal_a, &ADIOI_MPE_openinternal_b); MPE_Log_get_state_eventIDs( &ADIOI_MPE_stat_a, &ADIOI_MPE_stat_b); MPE_Log_get_state_eventIDs( &ADIOI_MPE_iread_a, &ADIOI_MPE_iread_b); MPE_Log_get_state_eventIDs( &ADIOI_MPE_iwrite_a, &ADIOI_MPE_iwrite_b); int comm_world_rank; MPI_Comm_rank( MPI_COMM_WORLD, &comm_world_rank ); if ( comm_world_rank == 0 ) { MPE_Describe_state( ADIOI_MPE_open_a, ADIOI_MPE_open_b, "open", "orange" ); MPE_Describe_state( ADIOI_MPE_read_a, ADIOI_MPE_read_b, "read", "green" ); MPE_Describe_state( ADIOI_MPE_write_a, ADIOI_MPE_write_b, "write", "blue" ); MPE_Describe_state( ADIOI_MPE_lseek_a, ADIOI_MPE_lseek_b, "lseek", "red" ); MPE_Describe_state( ADIOI_MPE_close_a, ADIOI_MPE_close_b, "close", "grey" ); MPE_Describe_state( ADIOI_MPE_writelock_a, ADIOI_MPE_writelock_b, "writelock", "plum" ); MPE_Describe_state( ADIOI_MPE_readlock_a, ADIOI_MPE_readlock_b, "readlock", "magenta" ); MPE_Describe_state( ADIOI_MPE_unlock_a, ADIOI_MPE_unlock_b, "unlock", "purple" ); MPE_Describe_state( ADIOI_MPE_postwrite_a, ADIOI_MPE_postwrite_b, "postwrite", "ivory" ); MPE_Describe_state( ADIOI_MPE_openinternal_a, ADIOI_MPE_openinternal_b, "open system", "blue"); MPE_Describe_state( ADIOI_MPE_stat_a, ADIOI_MPE_stat_b, "stat", "purple"); MPE_Describe_state( ADIOI_MPE_iread_a, ADIOI_MPE_iread_b, "iread", "purple"); MPE_Describe_state( ADIOI_MPE_iwrite_a, ADIOI_MPE_iwrite_b, "iwrite", "purple"); } } #endif *error_code = MPI_SUCCESS; MPI_Op_create(my_consensus, 1, &ADIO_same_amode); }
int main( int argc, char *argv[]) { int n, myid, numprocs, i, j; double PI25DT = 3.141592653589793238462643; double mypi, pi, h, sum, x; double startwtime = 0.0, endwtime; int namelen; int event1a, event1b, event2a, event2b, event3a, event3b, event4a, event4b; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc,&argv); MPI_Pcontrol( 0 ); MPI_Comm_size(MPI_COMM_WORLD,&numprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myid); MPI_Get_processor_name(processor_name,&namelen); fprintf(stderr,"Process %d running on %s\n", myid, processor_name); /* MPE_Init_log() & MPE_Finish_log() are NOT needed when liblmpe.a is linked with this program. In that case, MPI_Init() would have called MPE_Init_log() already. */ /* MPE_Init_log(); */ /* Get event ID from MPE, user should NOT assign event ID */ event1a = MPE_Log_get_event_number(); event1b = MPE_Log_get_event_number(); event2a = MPE_Log_get_event_number(); event2b = MPE_Log_get_event_number(); event3a = MPE_Log_get_event_number(); event3b = MPE_Log_get_event_number(); event4a = MPE_Log_get_event_number(); event4b = MPE_Log_get_event_number(); if (myid == 0) { MPE_Describe_state(event1a, event1b, "Broadcast", "red"); MPE_Describe_state(event2a, event2b, "Compute", "blue"); MPE_Describe_state(event3a, event3b, "Reduce", "green"); MPE_Describe_state(event4a, event4b, "Sync", "orange"); } if (myid == 0) { n = 1000000; startwtime = MPI_Wtime(); } MPI_Barrier(MPI_COMM_WORLD); MPI_Pcontrol( 1 ); /* MPE_Start_log(); */ for (j = 0; j < 5; j++) { MPE_Log_event(event1a, 0, NULL); MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); MPE_Log_event(event1b, 0, NULL); MPE_Log_event(event4a, 0, NULL); MPI_Barrier(MPI_COMM_WORLD); MPE_Log_event(event4b, 0, NULL); MPE_Log_event(event2a, 0, NULL); h = 1.0 / (double) n; sum = 0.0; for (i = myid + 1; i <= n; i += numprocs) { x = h * ((double)i - 0.5); sum += f(x); } mypi = h * sum; MPE_Log_event(event2b, 0, NULL); MPE_Log_event(event3a, 0, NULL); MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); MPE_Log_event(event3b, 0, NULL); } /* MPE_Finish_log("cpilog"); */ if (myid == 0) { endwtime = MPI_Wtime(); printf("pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT)); printf("wall clock time = %f\n", endwtime-startwtime); } MPI_Finalize(); return(0); }
int main(int argc, char **argv) { /* MPI stuff. */ int mpi_namelen; char mpi_name[MPI_MAX_PROCESSOR_NAME]; int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; /* Netcdf-4 stuff. */ int ncid, v1id, dimids[NDIMS]; size_t start[NDIMS], count[NDIMS]; int data[DIMSIZE * DIMSIZE], i, res; int slab_data[DIMSIZE * DIMSIZE / 4]; /* one slab */ char file_name[NC_MAX_NAME + 1]; #ifdef USE_MPE int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close; #endif /* USE_MPE */ /* Initialize MPI. */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Get_processor_name(mpi_name, &mpi_namelen); /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank);*/ #ifdef USE_MPE MPE_Init_log(); s_init = MPE_Log_get_event_number(); e_init = MPE_Log_get_event_number(); s_define = MPE_Log_get_event_number(); e_define = MPE_Log_get_event_number(); s_write = MPE_Log_get_event_number(); e_write = MPE_Log_get_event_number(); s_close = MPE_Log_get_event_number(); e_close = MPE_Log_get_event_number(); MPE_Describe_state(s_init, e_init, "Init", "red"); MPE_Describe_state(s_define, e_define, "Define", "yellow"); MPE_Describe_state(s_write, e_write, "Write", "green"); MPE_Describe_state(s_close, e_close, "Close", "purple"); MPE_Start_log(); MPE_Log_event(s_init, 0, "start init"); #endif /* USE_MPE */ if (mpi_rank == 1) { printf("\n*** tst_parallel testing very basic parallel access.\n"); printf("*** tst_parallel testing whether we can create file for parallel access and write to it..."); } /* Create phony data. We're going to write a 24x24 array of ints, in 4 sets of 144. */ /*printf("mpi_rank*QTR_DATA=%d (mpi_rank+1)*QTR_DATA-1=%d\n", mpi_rank*QTR_DATA, (mpi_rank+1)*QTR_DATA);*/ for (i = mpi_rank * QTR_DATA; i < (mpi_rank + 1) * QTR_DATA; i++) data[i] = mpi_rank; for (i = 0; i < DIMSIZE * DIMSIZE / 4; i++) slab_data[i] = mpi_rank; #ifdef USE_MPE MPE_Log_event(e_init, 0, "end init"); MPE_Log_event(s_define, 0, "start define file"); #endif /* USE_MPE */ /* Create a parallel netcdf-4 file. */ /*nc_set_log_level(3);*/ sprintf(file_name, "%s/%s", TEMP_LARGE, FILE); if ((res = nc_create_par(file_name, NC_NETCDF4|NC_MPIIO, comm, info, &ncid))) ERR; /* Create three dimensions. */ if (nc_def_dim(ncid, "d1", DIMSIZE, dimids)) ERR; if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR; if (nc_def_dim(ncid, "d3", NUM_SLABS, &dimids[2])) ERR; /* Create one var. */ if ((res = nc_def_var(ncid, "v1", NC_INT, NDIMS, dimids, &v1id))) ERR; /* Write metadata to file. */ if ((res = nc_enddef(ncid))) ERR; #ifdef USE_MPE MPE_Log_event(e_define, 0, "end define file"); if (mpi_rank) sleep(mpi_rank); #endif /* USE_MPE */ /* Set up slab for this process. */ start[0] = mpi_rank * DIMSIZE/mpi_size; start[1] = 0; count[0] = DIMSIZE/mpi_size; count[1] = DIMSIZE; count[2] = 1; /*printf("mpi_rank=%d start[0]=%d start[1]=%d count[0]=%d count[1]=%d\n", mpi_rank, start[0], start[1], count[0], count[1]);*/ if (nc_var_par_access(ncid, v1id, NC_COLLECTIVE)) ERR; /* if (nc_var_par_access(ncid, v1id, NC_INDEPENDENT)) ERR;*/ for (start[2] = 0; start[2] < NUM_SLABS; start[2]++) { #ifdef USE_MPE MPE_Log_event(s_write, 0, "start write slab"); #endif /* USE_MPE */ /* Write slabs of phoney data. */ if (nc_put_vara_int(ncid, v1id, start, count, slab_data)) ERR; #ifdef USE_MPE MPE_Log_event(e_write, 0, "end write file"); #endif /* USE_MPE */ } #ifdef USE_MPE MPE_Log_event(s_close, 0, "start close file"); #endif /* USE_MPE */ /* Close the netcdf file. */ if ((res = nc_close(ncid))) ERR; #ifdef USE_MPE MPE_Log_event(e_close, 0, "end close file"); #endif /* USE_MPE */ /* Delete this large file. */ remove(file_name); /* Shut down MPI. */ MPI_Finalize(); if (mpi_rank == 1) { SUMMARIZE_ERR; FINAL_RESULTS; } return 0; }
int main( int argc, char *argv[] ) { int n, myid, numprocs, ii, jj; double PI25DT = 3.141592653589793238462643; double mypi, pi, h, sum, x; double startwtime = 0.0, endwtime; int namelen; int event1a, event1b, event2a, event2b, event3a, event3b, event4a, event4b; int event1, event2, event3; char processor_name[ MPI_MAX_PROCESSOR_NAME ]; MPI_Init( &argc, &argv ); MPI_Pcontrol( 0 ); MPI_Comm_size( MPI_COMM_WORLD, &numprocs ); MPI_Comm_rank( MPI_COMM_WORLD, &myid ); MPI_Get_processor_name( processor_name, &namelen ); fprintf( stderr, "Process %d running on %s\n", myid, processor_name ); /* MPE_Init_log() & MPE_Finish_log() are NOT needed when liblmpe.a is linked with this program. In that case, MPI_Init() would have called MPE_Init_log() already. */ #if defined( NO_MPI_LOGGING ) MPE_Init_log(); #endif /* user should NOT assign eventIDs directly in MPE_Describe_state() Get the eventIDs for user-defined STATES(rectangles) from MPE_Log_get_state_eventIDs() instead of the deprecated function MPE_Log_get_event_number(). */ MPE_Log_get_state_eventIDs( &event1a, &event1b ); MPE_Log_get_state_eventIDs( &event2a, &event2b ); MPE_Log_get_state_eventIDs( &event3a, &event3b ); MPE_Log_get_state_eventIDs( &event4a, &event4b ); if ( myid == 0 ) { MPE_Describe_state( event1a, event1b, "Broadcast", "red" ); MPE_Describe_state( event2a, event2b, "Sync", "orange" ); MPE_Describe_state( event3a, event3b, "Compute", "blue" ); MPE_Describe_state( event4a, event4b, "Reduce", "green" ); } /* Get event ID for Solo-Event(single timestamp object) from MPE */ MPE_Log_get_solo_eventID( &event1 ); MPE_Log_get_solo_eventID( &event2 ); MPE_Log_get_solo_eventID( &event3 ); if ( myid == 0 ) { MPE_Describe_event( event1, "Broadcast Post", "white" ); MPE_Describe_event( event2, "Compute Start", "purple" ); MPE_Describe_event( event3, "Compute End", "navy" ); } if ( myid == 0 ) { n = 1000000; startwtime = MPI_Wtime(); } MPI_Barrier( MPI_COMM_WORLD ); MPI_Pcontrol( 1 ); /* MPE_Start_log(); */ for ( jj = 0; jj < 5; jj++ ) { MPE_Log_event( event1a, 0, NULL ); MPI_Bcast( &n, 1, MPI_INT, 0, MPI_COMM_WORLD ); MPE_Log_event( event1b, 0, NULL ); MPE_Log_event( event1, 0, NULL ); MPE_Log_event( event2a, 0, NULL ); MPI_Barrier( MPI_COMM_WORLD ); MPE_Log_event( event2b, 0, NULL ); MPE_Log_event( event2, 0, NULL ); MPE_Log_event( event3a, 0, NULL ); h = 1.0 / (double) n; sum = 0.0; for ( ii = myid + 1; ii <= n; ii += numprocs ) { x = h * ((double)ii - 0.5); sum += f(x); } mypi = h * sum; MPE_Log_event( event3b, 0, NULL ); MPE_Log_event( event3, 0, NULL ); pi = 0.0; MPE_Log_event( event4a, 0, NULL ); MPI_Reduce( &mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); MPE_Log_event( event4b, 0, NULL ); MPE_Log_sync_clocks(); } #if defined( NO_MPI_LOGGING ) if ( argv != NULL ) MPE_Finish_log( argv[0] ); else MPE_Finish_log( "cpilog" ); #endif if ( myid == 0 ) { endwtime = MPI_Wtime(); printf( "pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT) ); printf( "wall clock time = %f\n", endwtime-startwtime ); } MPI_Finalize(); return( 0 ); }
int main(int argc, char **argv) { int p, my_rank; #ifdef USE_MPE int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close; #endif /* USE_MPE */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &p); #ifdef USE_MPE MPE_Init_log(); s_init = MPE_Log_get_event_number(); e_init = MPE_Log_get_event_number(); s_define = MPE_Log_get_event_number(); e_define = MPE_Log_get_event_number(); s_write = MPE_Log_get_event_number(); e_write = MPE_Log_get_event_number(); s_close = MPE_Log_get_event_number(); e_close = MPE_Log_get_event_number(); MPE_Describe_state(s_init, e_init, "Init", "red"); MPE_Describe_state(s_define, e_define, "Define", "yellow"); MPE_Describe_state(s_write, e_write, "Write", "green"); MPE_Describe_state(s_close, e_close, "Close", "purple"); MPE_Start_log(); MPE_Log_event(s_init, 0, "start init"); #endif /* USE_MPE */ if (!my_rank) printf("*** Creating file for parallel I/O read, and rereading it..."); { hid_t fapl_id, fileid, whole_spaceid, dsid, slice_spaceid, whole_spaceid1, xferid; hsize_t start[NDIMS], count[NDIMS]; hsize_t dims[1]; int data[SC1], data_in[SC1]; int num_steps; double ftime; int write_us, read_us; int max_write_us, max_read_us; float write_rate, read_rate; int i, s; /* We will write the same slice of random data over and over to * fill the file. */ for (i = 0; i < SC1; i++) data[i] = rand(); #ifdef USE_MPE MPE_Log_event(e_init, 0, "end init"); MPE_Log_event(s_define, 0, "start define file"); #endif /* USE_MPE */ /* Create file. */ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR; if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR; if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) ERR; /* Create a space to deal with one slice in memory. */ dims[0] = SC1; if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR; /* Create a space to write all slices. */ dims[0] = DIM2_LEN; if ((whole_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR; /* Create dataset. */ if ((dsid = H5Dcreate1(fileid, VAR_NAME, H5T_NATIVE_INT, whole_spaceid, H5P_DEFAULT)) < 0) ERR; /* Use collective write operations. */ if ((xferid = H5Pcreate(H5P_DATASET_XFER)) < 0) ERR; if (H5Pset_dxpl_mpio(xferid, H5FD_MPIO_COLLECTIVE) < 0) ERR; #ifdef USE_MPE MPE_Log_event(e_define, 0, "end define file"); if (my_rank) sleep(my_rank); #endif /* USE_MPE */ /* Write the data in num_step steps. */ ftime = MPI_Wtime(); num_steps = (DIM2_LEN/SC1) / p; for (s = 0; s < num_steps; s++) { #ifdef USE_MPE MPE_Log_event(s_write, 0, "start write slab"); #endif /* USE_MPE */ /* Select hyperslab for write of one slice. */ start[0] = s * SC1 * p + my_rank * SC1; count[0] = SC1; if (H5Sselect_hyperslab(whole_spaceid, H5S_SELECT_SET, start, NULL, count, NULL) < 0) ERR; if (H5Dwrite(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid, xferid, data) < 0) ERR; #ifdef USE_MPE MPE_Log_event(e_write, 0, "end write file"); #endif /* USE_MPE */ } write_us = (MPI_Wtime() - ftime) * MILLION; MPI_Reduce(&write_us, &max_write_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD); if (!my_rank) { write_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_write_us; printf("\np=%d, write_rate=%g", p, write_rate); } #ifdef USE_MPE MPE_Log_event(s_close, 0, "start close file"); #endif /* USE_MPE */ /* Close. These collective operations will allow every process * to catch up. */ if (H5Dclose(dsid) < 0 || H5Sclose(whole_spaceid) < 0 || H5Sclose(slice_spaceid) < 0 || H5Pclose(fapl_id) < 0 || H5Fclose(fileid) < 0) ERR; #ifdef USE_MPE MPE_Log_event(e_close, 0, "end close file"); #endif /* USE_MPE */ /* Open the file. */ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR; if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR; if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) ERR; if ((fileid = H5Fopen(FILE_NAME, H5F_ACC_RDONLY, fapl_id)) < 0) ERR; /* Create a space to deal with one slice in memory. */ dims[0] = SC1; if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR; /* Open the dataset. */ if ((dsid = H5Dopen(fileid, VAR_NAME)) < 0) ERR; if ((whole_spaceid1 = H5Dget_space(dsid)) < 0) ERR; ftime = MPI_Wtime(); /* Read the data, a slice at a time. */ for (s = 0; s < num_steps; s++) { /* Select hyperslab for read of one slice. */ start[0] = s * SC1 * p + my_rank * SC1; count[0] = SC1; if (H5Sselect_hyperslab(whole_spaceid1, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { ERR; return 2; } if (H5Dread(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid1, H5P_DEFAULT, data_in) < 0) { ERR; return 2; } /* /\* Check the slice of data. *\/ */ /* for (i = 0; i < SC1; i++) */ /* if (data[i] != data_in[i]) */ /* { */ /* ERR; */ /* return 2; */ /* } */ } read_us = (MPI_Wtime() - ftime) * MILLION; MPI_Reduce(&read_us, &max_read_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD); if (!my_rank) { read_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_read_us; printf(", read_rate=%g\n", read_rate); } /* Close down. */ if (H5Dclose(dsid) < 0 || H5Sclose(slice_spaceid) < 0 || H5Sclose(whole_spaceid1) < 0 || H5Pclose(fapl_id) < 0 || H5Fclose(fileid) < 0) ERR; } if (!my_rank) SUMMARIZE_ERR; MPI_Finalize(); if (!my_rank) FINAL_RESULTS; return 0; }
int main(int argc, char **argv) { /* MPI stuff. */ int mpi_namelen; char mpi_name[MPI_MAX_PROCESSOR_NAME]; int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; double start_time = 0, total_time; /* Netcdf-4 stuff. */ int ncid, varid, dimids[NDIMS]; size_t start[NDIMS] = {0, 0, 0}; size_t count[NDIMS] = {1, DIMSIZE, DIMSIZE}; int data[DIMSIZE * DIMSIZE], data_in[DIMSIZE * DIMSIZE]; int j, i; char file_name[NC_MAX_NAME + 1]; int ndims_in, nvars_in, natts_in, unlimdimid_in; #ifdef USE_MPE int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close; #endif /* USE_MPE */ /* Initialize MPI. */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Get_processor_name(mpi_name, &mpi_namelen); /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank);*/ /* Must be able to evenly divide my slabs between processors. */ if (NUM_SLABS % mpi_size != 0) { if (!mpi_rank) printf("NUM_SLABS (%d) is not evenly divisible by mpi_size(%d)\n", NUM_SLABS, mpi_size); ERR; } #ifdef USE_MPE MPE_Init_log(); s_init = MPE_Log_get_event_number(); e_init = MPE_Log_get_event_number(); s_define = MPE_Log_get_event_number(); e_define = MPE_Log_get_event_number(); s_write = MPE_Log_get_event_number(); e_write = MPE_Log_get_event_number(); s_close = MPE_Log_get_event_number(); e_close = MPE_Log_get_event_number(); s_open = MPE_Log_get_event_number(); e_open = MPE_Log_get_event_number(); MPE_Describe_state(s_init, e_init, "Init", "red"); MPE_Describe_state(s_define, e_define, "Define", "yellow"); MPE_Describe_state(s_write, e_write, "Write", "green"); MPE_Describe_state(s_close, e_close, "Close", "purple"); MPE_Describe_state(s_open, e_open, "Open", "blue"); MPE_Start_log(); MPE_Log_event(s_init, 0, "start init"); #endif /* USE_MPE */ /* if (!mpi_rank) */ /* { */ /* printf("\n*** Testing parallel I/O some more.\n"); */ /* printf("*** writing a %d x %d x %d file from %d processors...\n", */ /* NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size); */ /* } */ /* We will write the same slab over and over. */ for (i = 0; i < DIMSIZE * DIMSIZE; i++) data[i] = mpi_rank; #ifdef USE_MPE MPE_Log_event(e_init, 0, "end init"); MPE_Log_event(s_define, 0, "start define file"); #endif /* USE_MPE */ /* Create a parallel netcdf-4 file. */ sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME); if (nc_create_par(file_name, NC_PNETCDF, comm, info, &ncid)) ERR; /* A global attribute holds the number of processors that created * the file. */ if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR; /* Create three dimensions. */ if (nc_def_dim(ncid, DIM1_NAME, NUM_SLABS, dimids)) ERR; if (nc_def_dim(ncid, DIM2_NAME, DIMSIZE, &dimids[1])) ERR; if (nc_def_dim(ncid, DIM3_NAME, DIMSIZE, &dimids[2])) ERR; /* Create one var. */ if (nc_def_var(ncid, VAR_NAME, NC_INT, NDIMS, dimids, &varid)) ERR; /* Write metadata to file. */ if (nc_enddef(ncid)) ERR; #ifdef USE_MPE MPE_Log_event(e_define, 0, "end define file"); if (mpi_rank) sleep(mpi_rank); #endif /* USE_MPE */ /* if (nc_var_par_access(ncid, varid, NC_COLLECTIVE)) ERR;*/ if (nc_var_par_access(ncid, varid, NC_INDEPENDENT)) ERR; if (!mpi_rank) start_time = MPI_Wtime(); /* Write all the slabs this process is responsible for. */ for (i = 0; i < NUM_SLABS / mpi_size; i++) { start[0] = NUM_SLABS / mpi_size * mpi_rank + i; #ifdef USE_MPE MPE_Log_event(s_write, 0, "start write slab"); #endif /* USE_MPE */ /* Write one slab of data. */ if (nc_put_vara_int(ncid, varid, start, count, data)) ERR; #ifdef USE_MPE MPE_Log_event(e_write, 0, "end write file"); #endif /* USE_MPE */ } if (!mpi_rank) { total_time = MPI_Wtime() - start_time; /* printf("num_proc\ttime(s)\n");*/ printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS * sizeof(int) / total_time); } #ifdef USE_MPE MPE_Log_event(s_close, 0, "start close file"); #endif /* USE_MPE */ /* Close the netcdf file. */ if (nc_close(ncid)) ERR; #ifdef USE_MPE MPE_Log_event(e_close, 0, "end close file"); #endif /* USE_MPE */ /* Reopen the file and check it. */ if (nc_open_par(file_name, NC_NOWRITE, comm, info, &ncid)) ERR; if (nc_inq(ncid, &ndims_in, &nvars_in, &natts_in, &unlimdimid_in)) ERR; if (ndims_in != NDIMS || nvars_in != 1 || natts_in != 1 || unlimdimid_in != -1) ERR; /* Read all the slabs this process is responsible for. */ for (i = 0; i < NUM_SLABS / mpi_size; i++) { start[0] = NUM_SLABS / mpi_size * mpi_rank + i; #ifdef USE_MPE MPE_Log_event(s_read, 0, "start read slab"); #endif /* USE_MPE */ /* Read one slab of data. */ if (nc_get_vara_int(ncid, varid, start, count, data_in)) ERR; /* Check data. */ for (j = 0; j < DIMSIZE * DIMSIZE; j++) if (data_in[j] != mpi_rank) { ERR; break; } #ifdef USE_MPE MPE_Log_event(e_read, 0, "end read file"); #endif /* USE_MPE */ } #ifdef USE_MPE MPE_Log_event(s_close, 0, "start close file"); #endif /* USE_MPE */ /* Close the netcdf file. */ if (nc_close(ncid)) ERR; #ifdef USE_MPE MPE_Log_event(e_close, 0, "end close file"); #endif /* USE_MPE */ /* Delete this large file. */ remove(file_name); /* Shut down MPI. */ MPI_Finalize(); /* if (!mpi_rank) */ /* { */ /* SUMMARIZE_ERR; */ /* FINAL_RESULTS; */ /* } */ return total_err; }
void ADIO_Init(int *argc, char ***argv, int *error_code) { #if defined(ROMIO_XFS) || defined(ROMIO_LUSTRE) char *c; #endif MPL_UNREFERENCED_ARG(argc); MPL_UNREFERENCED_ARG(argv); #ifdef ROMIO_INSIDE_MPICH MPIR_Ext_init(); #endif #if defined(ROMIO_XFS) || defined(ROMIO_LUSTRE) c = getenv("MPIO_DIRECT_READ"); if (c && (!strcmp(c, "true") || !strcmp(c, "TRUE"))) ADIOI_Direct_read = 1; else ADIOI_Direct_read = 0; c = getenv("MPIO_DIRECT_WRITE"); if (c && (!strcmp(c, "true") || !strcmp(c, "TRUE"))) ADIOI_Direct_write = 1; else ADIOI_Direct_write = 0; #endif #ifdef ADIOI_MPE_LOGGING { MPE_Log_get_state_eventIDs(&ADIOI_MPE_open_a, &ADIOI_MPE_open_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_read_a, &ADIOI_MPE_read_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_write_a, &ADIOI_MPE_write_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_lseek_a, &ADIOI_MPE_lseek_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_close_a, &ADIOI_MPE_close_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_writelock_a, &ADIOI_MPE_writelock_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_readlock_a, &ADIOI_MPE_readlock_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_unlock_a, &ADIOI_MPE_unlock_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_postwrite_a, &ADIOI_MPE_postwrite_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_openinternal_a, &ADIOI_MPE_openinternal_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_stat_a, &ADIOI_MPE_stat_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_iread_a, &ADIOI_MPE_iread_b); MPE_Log_get_state_eventIDs(&ADIOI_MPE_iwrite_a, &ADIOI_MPE_iwrite_b); int comm_world_rank; MPI_Comm_rank(MPI_COMM_WORLD, &comm_world_rank); if (comm_world_rank == 0) { MPE_Describe_state(ADIOI_MPE_open_a, ADIOI_MPE_open_b, "open", "orange"); MPE_Describe_state(ADIOI_MPE_read_a, ADIOI_MPE_read_b, "read", "green"); MPE_Describe_state(ADIOI_MPE_write_a, ADIOI_MPE_write_b, "write", "blue"); MPE_Describe_state(ADIOI_MPE_lseek_a, ADIOI_MPE_lseek_b, "lseek", "red"); MPE_Describe_state(ADIOI_MPE_close_a, ADIOI_MPE_close_b, "close", "grey"); MPE_Describe_state(ADIOI_MPE_writelock_a, ADIOI_MPE_writelock_b, "writelock", "plum"); MPE_Describe_state(ADIOI_MPE_readlock_a, ADIOI_MPE_readlock_b, "readlock", "magenta"); MPE_Describe_state(ADIOI_MPE_unlock_a, ADIOI_MPE_unlock_b, "unlock", "purple"); MPE_Describe_state(ADIOI_MPE_postwrite_a, ADIOI_MPE_postwrite_b, "postwrite", "ivory"); MPE_Describe_state(ADIOI_MPE_openinternal_a, ADIOI_MPE_openinternal_b, "open system", "blue"); MPE_Describe_state(ADIOI_MPE_stat_a, ADIOI_MPE_stat_b, "stat", "purple"); MPE_Describe_state(ADIOI_MPE_iread_a, ADIOI_MPE_iread_b, "iread", "purple"); MPE_Describe_state(ADIOI_MPE_iwrite_a, ADIOI_MPE_iwrite_b, "iwrite", "purple"); } } #endif *error_code = MPI_SUCCESS; MPI_Op_create(my_consensus, 1, &ADIO_same_amode); }