void wallcycle_start(gmx_wallcycle_t wc, int ewc) { gmx_cycles_t cycle; if (wc == NULL) { return; } #ifdef GMX_MPI if (wc->wc_barrier) { MPI_Barrier(wc->mpi_comm_mygroup); } #endif cycle = gmx_cycles_read(); wc->wcc[ewc].start = cycle; if (wc->wcc_all != NULL) { wc->wc_depth++; if (ewc == ewcRUN) { wallcycle_all_start(wc,ewc,cycle); } else if (wc->wc_depth == 3) { wallcycle_all_stop(wc,ewc,cycle); } } }
double wallcycle_stop(gmx_wallcycle_t wc, int ewc) { gmx_cycles_t cycle,last; if (wc == NULL) { return 0; } #ifdef GMX_MPI if (wc->wc_barrier) { MPI_Barrier(wc->mpi_comm_mygroup); } #endif cycle = gmx_cycles_read(); last = cycle - wc->wcc[ewc].start; wc->wcc[ewc].c += last; wc->wcc[ewc].n++; if (wc->wcc_all) { wc->wc_depth--; if (ewc == ewcRUN) { wallcycle_all_stop(wc,ewc,cycle); } else if (wc->wc_depth == 2) { wallcycle_all_start(wc,ewc,cycle); } } return last; }
double wallcycle_stop(gmx_wallcycle_t wc, int ewc) { gmx_cycles_t cycle, last; if (wc == NULL) { return 0; } #if GMX_MPI if (wc->wc_barrier) { MPI_Barrier(wc->mpi_comm_mygroup); } #endif #ifdef DEBUG_WCYCLE debug_stop_check(wc, ewc); #endif /* When processes or threads migrate between cores, the cycle counting * can get messed up if the cycle counter on different cores are not * synchronized. When this happens we expect both large negative and * positive cycle differences. We can detect negative cycle differences. * Detecting too large positive counts if difficult, since count can be * large, especially for ewcRUN. If we detect a negative count, * we will not print the cycle accounting table. */ cycle = gmx_cycles_read(); if (cycle >= wc->wcc[ewc].start) { last = cycle - wc->wcc[ewc].start; } else { last = 0; wc->haveInvalidCount = TRUE; } wc->wcc[ewc].c += last; wc->wcc[ewc].n++; if (wc->wcc_all) { wc->wc_depth--; if (ewc == ewcRUN) { wallcycle_all_stop(wc, ewc, cycle); } else if (wc->wc_depth == 2) { wallcycle_all_start(wc, ewc, cycle); } } return last; }