void stat_exit (void) { generation *gen; Time gc_cpu = 0; Time gc_elapsed = 0; Time init_cpu = 0; Time init_elapsed = 0; Time mut_cpu = 0; Time mut_elapsed = 0; Time exit_cpu = 0; Time exit_elapsed = 0; W_ tot_alloc; W_ alloc; if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) { char temp[BIG_STRING_LEN]; Time tot_cpu; Time tot_elapsed; nat i, g, total_collections = 0; getProcessTimes( &tot_cpu, &tot_elapsed ); tot_elapsed -= start_init_elapsed; tot_alloc = calcTotalAllocated(); // allocated since the last GC alloc = tot_alloc - GC_tot_alloc; GC_tot_alloc = tot_alloc; /* Count total garbage collections */ for (g = 0; g < RtsFlags.GcFlags.generations; g++) total_collections += generations[g].collections; /* avoid divide by zero if tot_cpu is measured as 0.00 seconds -- SDM */ if (tot_cpu == 0.0) tot_cpu = 1; if (tot_elapsed == 0.0) tot_elapsed = 1; if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) { statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (W_)alloc*sizeof(W_), "", ""); statsPrintf(" %6.3f %6.3f\n\n", 0.0, 0.0); } for (i = 0; i < RtsFlags.GcFlags.generations; i++) { gc_cpu += GC_coll_cpu[i]; gc_elapsed += GC_coll_elapsed[i]; } // heapCensus() is called by the GC, so RP and HC time are // included in the GC stats. We therefore subtract them to // obtain the actual GC cpu time. gc_cpu -= PROF_VAL(RP_tot_time + HC_tot_time); gc_elapsed -= PROF_VAL(RPe_tot_time + HCe_tot_time); init_cpu = get_init_cpu(); init_elapsed = get_init_elapsed(); exit_cpu = end_exit_cpu - start_exit_cpu; exit_elapsed = end_exit_elapsed - start_exit_elapsed; mut_elapsed = start_exit_elapsed - end_init_elapsed - gc_elapsed; mut_cpu = start_exit_cpu - end_init_cpu - gc_cpu - PROF_VAL(RP_tot_time + HC_tot_time); if (mut_cpu < 0) { mut_cpu = 0; } if (RtsFlags.GcFlags.giveStats >= SUMMARY_GC_STATS) { showStgWord64(GC_tot_alloc*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes allocated in the heap\n", temp); showStgWord64(GC_tot_copied*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes copied during GC\n", temp); if ( residency_samples > 0 ) { showStgWord64(max_residency*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes maximum residency (%" FMT_Word " sample(s))\n", temp, residency_samples); } showStgWord64(max_slop*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes maximum slop\n", temp); statsPrintf("%16" FMT_SizeT " MB total memory in use (%" FMT_SizeT " MB lost due to fragmentation)\n\n", (size_t)(peak_mblocks_allocated * MBLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)), (size_t)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_))); /* Print garbage collections in each gen */ statsPrintf(" Tot time (elapsed) Avg pause Max pause\n"); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { gen = &generations[g]; statsPrintf(" Gen %2d %5d colls, %5d par %6.3fs %6.3fs %3.4fs %3.4fs\n", gen->no, gen->collections, gen->par_collections, TimeToSecondsDbl(GC_coll_cpu[g]), TimeToSecondsDbl(GC_coll_elapsed[g]), gen->collections == 0 ? 0 : TimeToSecondsDbl(GC_coll_elapsed[g] / gen->collections), TimeToSecondsDbl(GC_coll_max_pause[g])); } #if defined(THREADED_RTS) if (RtsFlags.ParFlags.parGcEnabled && n_capabilities > 1) { statsPrintf("\n Parallel GC work balance: %.2f%% (serial 0%%, perfect 100%%)\n", 100 * (((double)GC_par_tot_copied / (double)GC_par_max_copied) - 1) / (n_capabilities - 1) ); } #endif statsPrintf("\n"); #if defined(THREADED_RTS) statsPrintf(" TASKS: %d (%d bound, %d peak workers (%d total), using -N%d)\n", taskCount, taskCount - workerCount, peakWorkerCount, workerCount, n_capabilities); statsPrintf("\n"); { nat i; SparkCounters sparks = { 0, 0, 0, 0, 0, 0}; for (i = 0; i < n_capabilities; i++) { sparks.created += capabilities[i]->spark_stats.created; sparks.dud += capabilities[i]->spark_stats.dud; sparks.overflowed+= capabilities[i]->spark_stats.overflowed; sparks.converted += capabilities[i]->spark_stats.converted; sparks.gcd += capabilities[i]->spark_stats.gcd; sparks.fizzled += capabilities[i]->spark_stats.fizzled; } statsPrintf(" SPARKS: %" FMT_Word " (%" FMT_Word " converted, %" FMT_Word " overflowed, %" FMT_Word " dud, %" FMT_Word " GC'd, %" FMT_Word " fizzled)\n\n", sparks.created + sparks.dud + sparks.overflowed, sparks.converted, sparks.overflowed, sparks.dud, sparks.gcd, sparks.fizzled); } #endif statsPrintf(" INIT time %7.3fs (%7.3fs elapsed)\n", TimeToSecondsDbl(init_cpu), TimeToSecondsDbl(init_elapsed)); statsPrintf(" MUT time %7.3fs (%7.3fs elapsed)\n", TimeToSecondsDbl(mut_cpu), TimeToSecondsDbl(mut_elapsed)); statsPrintf(" GC time %7.3fs (%7.3fs elapsed)\n", TimeToSecondsDbl(gc_cpu), TimeToSecondsDbl(gc_elapsed)); #ifdef PROFILING statsPrintf(" RP time %7.3fs (%7.3fs elapsed)\n", TimeToSecondsDbl(RP_tot_time), TimeToSecondsDbl(RPe_tot_time)); statsPrintf(" PROF time %7.3fs (%7.3fs elapsed)\n", TimeToSecondsDbl(HC_tot_time), TimeToSecondsDbl(HCe_tot_time)); #endif statsPrintf(" EXIT time %7.3fs (%7.3fs elapsed)\n", TimeToSecondsDbl(exit_cpu), TimeToSecondsDbl(exit_elapsed)); statsPrintf(" Total time %7.3fs (%7.3fs elapsed)\n\n", TimeToSecondsDbl(tot_cpu), TimeToSecondsDbl(tot_elapsed)); #ifndef THREADED_RTS statsPrintf(" %%GC time %5.1f%% (%.1f%% elapsed)\n\n", TimeToSecondsDbl(gc_cpu)*100/TimeToSecondsDbl(tot_cpu), TimeToSecondsDbl(gc_elapsed)*100/TimeToSecondsDbl(tot_elapsed)); #endif if (mut_cpu == 0) { showStgWord64(0, temp, rtsTrue/*commas*/); } else { showStgWord64( (StgWord64)((GC_tot_alloc*sizeof(W_)) / TimeToSecondsDbl(mut_cpu)), temp, rtsTrue/*commas*/); } statsPrintf(" Alloc rate %s bytes per MUT second\n\n", temp); statsPrintf(" Productivity %5.1f%% of total user, %.1f%% of total elapsed\n\n", TimeToSecondsDbl(tot_cpu - gc_cpu - PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100 / TimeToSecondsDbl(tot_cpu), TimeToSecondsDbl(tot_cpu - gc_cpu - PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100 / TimeToSecondsDbl(tot_elapsed)); /* TICK_PRINT(1); TICK_PRINT(2); REPORT(TOTAL_CALLS); TICK_PRINT_TOT(1); TICK_PRINT_TOT(2); */ #if USE_PAPI papi_stats_report(); #endif #if defined(THREADED_RTS) && defined(PROF_SPIN) { nat g; statsPrintf("gc_alloc_block_sync: %"FMT_Word64"\n", gc_alloc_block_sync.spin); statsPrintf("whitehole_spin: %"FMT_Word64"\n", whitehole_spin); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { statsPrintf("gen[%d].sync: %"FMT_Word64"\n", g, generations[g].sync.spin); } } #endif } if (RtsFlags.GcFlags.giveStats == ONELINE_GC_STATS) { char *fmt1, *fmt2; if (RtsFlags.MiscFlags.machineReadable) { fmt1 = " [(\"bytes allocated\", \"%llu\")\n"; fmt2 = " ,(\"num_GCs\", \"%d\")\n" " ,(\"average_bytes_used\", \"%ld\")\n" " ,(\"max_bytes_used\", \"%ld\")\n" " ,(\"num_byte_usage_samples\", \"%ld\")\n" " ,(\"peak_megabytes_allocated\", \"%lu\")\n" " ,(\"init_cpu_seconds\", \"%.3f\")\n" " ,(\"init_wall_seconds\", \"%.3f\")\n" " ,(\"mutator_cpu_seconds\", \"%.3f\")\n" " ,(\"mutator_wall_seconds\", \"%.3f\")\n" " ,(\"GC_cpu_seconds\", \"%.3f\")\n" " ,(\"GC_wall_seconds\", \"%.3f\")\n" " ]\n"; } else { fmt1 = "<<ghc: %llu bytes, "; fmt2 = "%d GCs, %ld/%ld avg/max bytes residency (%ld samples), %luM in use, %.3f INIT (%.3f elapsed), %.3f MUT (%.3f elapsed), %.3f GC (%.3f elapsed) :ghc>>\n"; } /* print the long long separately to avoid bugginess on mingwin (2001-07-02, mingw-0.5) */ statsPrintf(fmt1, GC_tot_alloc*(StgWord64)sizeof(W_)); statsPrintf(fmt2, total_collections, residency_samples == 0 ? 0 : cumulative_residency*sizeof(W_)/residency_samples, max_residency*sizeof(W_), residency_samples, (unsigned long)(peak_mblocks_allocated * MBLOCK_SIZE / (1024L * 1024L)), TimeToSecondsDbl(init_cpu), TimeToSecondsDbl(init_elapsed), TimeToSecondsDbl(mut_cpu), TimeToSecondsDbl(mut_elapsed), TimeToSecondsDbl(gc_cpu), TimeToSecondsDbl(gc_elapsed)); } statsFlush(); statsClose(); } if (GC_coll_cpu) { stgFree(GC_coll_cpu); GC_coll_cpu = NULL; } if (GC_coll_elapsed) { stgFree(GC_coll_elapsed); GC_coll_elapsed = NULL; } if (GC_coll_max_pause) { stgFree(GC_coll_max_pause); GC_coll_max_pause = NULL; } }
void stat_exit(int alloc) { generation *gen; Ticks gc_cpu = 0; Ticks gc_elapsed = 0; Ticks init_cpu = 0; Ticks init_elapsed = 0; Ticks mut_cpu = 0; Ticks mut_elapsed = 0; Ticks exit_cpu = 0; Ticks exit_elapsed = 0; if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) { char temp[BIG_STRING_LEN]; Ticks tot_cpu; Ticks tot_elapsed; nat i, g, total_collections = 0; getProcessTimes( &tot_cpu, &tot_elapsed ); tot_elapsed -= start_init_elapsed; GC_tot_alloc += alloc; /* Count total garbage collections */ for (g = 0; g < RtsFlags.GcFlags.generations; g++) total_collections += generations[g].collections; /* avoid divide by zero if tot_cpu is measured as 0.00 seconds -- SDM */ if (tot_cpu == 0.0) tot_cpu = 1; if (tot_elapsed == 0.0) tot_elapsed = 1; if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) { statsPrintf("%9ld %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", ""); statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0); } for (i = 0; i < RtsFlags.GcFlags.generations; i++) { gc_cpu += GC_coll_cpu[i]; gc_elapsed += GC_coll_elapsed[i]; } init_cpu = get_init_cpu(); init_elapsed = get_init_elapsed(); exit_cpu = end_exit_cpu - start_exit_cpu; exit_elapsed = end_exit_elapsed - start_exit_elapsed; mut_elapsed = start_exit_elapsed - end_init_elapsed - gc_elapsed; mut_cpu = start_exit_cpu - end_init_cpu - gc_cpu - PROF_VAL(RP_tot_time + HC_tot_time); if (mut_cpu < 0) { mut_cpu = 0; } if (RtsFlags.GcFlags.giveStats >= SUMMARY_GC_STATS) { showStgWord64(GC_tot_alloc*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes allocated in the heap\n", temp); showStgWord64(GC_tot_copied*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes copied during GC\n", temp); if ( residency_samples > 0 ) { showStgWord64(max_residency*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes maximum residency (%ld sample(s))\n", temp, residency_samples); } showStgWord64(max_slop*sizeof(W_), temp, rtsTrue/*commas*/); statsPrintf("%16s bytes maximum slop\n", temp); statsPrintf("%16ld MB total memory in use (%ld MB lost due to fragmentation)\n\n", peak_mblocks_allocated * MBLOCK_SIZE_W / (1024 * 1024 / sizeof(W_)), (peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_))); /* Print garbage collections in each gen */ statsPrintf(" Tot time (elapsed) Avg pause Max pause\n"); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { gen = &generations[g]; statsPrintf(" Gen %2d %5d colls, %5d par %5.2fs %5.2fs %3.4fs %3.4fs\n", gen->no, gen->collections, gen->par_collections, TICK_TO_DBL(GC_coll_cpu[g]), TICK_TO_DBL(GC_coll_elapsed[g]), gen->collections == 0 ? 0 : TICK_TO_DBL(GC_coll_elapsed[g] / gen->collections), TICK_TO_DBL(GC_coll_max_pause[g])); } #if defined(THREADED_RTS) if (RtsFlags.ParFlags.parGcEnabled) { statsPrintf("\n Parallel GC work balance: %.2f (%ld / %ld, ideal %d)\n", (double)GC_par_avg_copied / (double)GC_par_max_copied, (lnat)GC_par_avg_copied, (lnat)GC_par_max_copied, RtsFlags.ParFlags.nNodes ); } #endif statsPrintf("\n"); #if defined(THREADED_RTS) { nat i; Task *task; statsPrintf(" MUT time (elapsed) GC time (elapsed)\n"); for (i = 0, task = all_tasks; task != NULL; i++, task = task->all_link) { statsPrintf(" Task %2d %-8s : %6.2fs (%6.2fs) %6.2fs (%6.2fs)\n", i, (task->worker) ? "(worker)" : "(bound)", TICK_TO_DBL(task->mut_time), TICK_TO_DBL(task->mut_etime), TICK_TO_DBL(task->gc_time), TICK_TO_DBL(task->gc_etime)); } } statsPrintf("\n"); { nat i; SparkCounters sparks = { 0, 0, 0, 0, 0, 0}; for (i = 0; i < n_capabilities; i++) { sparks.created += capabilities[i].spark_stats.created; sparks.dud += capabilities[i].spark_stats.dud; sparks.overflowed+= capabilities[i].spark_stats.overflowed; sparks.converted += capabilities[i].spark_stats.converted; sparks.gcd += capabilities[i].spark_stats.gcd; sparks.fizzled += capabilities[i].spark_stats.fizzled; } statsPrintf(" SPARKS: %ld (%ld converted, %ld overflowed, %ld dud, %ld GC'd, %ld fizzled)\n\n", sparks.created + sparks.dud + sparks.overflowed, sparks.converted, sparks.overflowed, sparks.dud, sparks.gcd, sparks.fizzled); } #endif statsPrintf(" INIT time %6.2fs (%6.2fs elapsed)\n", TICK_TO_DBL(init_cpu), TICK_TO_DBL(init_elapsed)); statsPrintf(" MUT time %6.2fs (%6.2fs elapsed)\n", TICK_TO_DBL(mut_cpu), TICK_TO_DBL(mut_elapsed)); statsPrintf(" GC time %6.2fs (%6.2fs elapsed)\n", TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed)); #ifdef PROFILING statsPrintf(" RP time %6.2fs (%6.2fs elapsed)\n", TICK_TO_DBL(RP_tot_time), TICK_TO_DBL(RPe_tot_time)); statsPrintf(" PROF time %6.2fs (%6.2fs elapsed)\n", TICK_TO_DBL(HC_tot_time), TICK_TO_DBL(HCe_tot_time)); #endif statsPrintf(" EXIT time %6.2fs (%6.2fs elapsed)\n", TICK_TO_DBL(exit_cpu), TICK_TO_DBL(exit_elapsed)); statsPrintf(" Total time %6.2fs (%6.2fs elapsed)\n\n", TICK_TO_DBL(tot_cpu), TICK_TO_DBL(tot_elapsed)); #ifndef THREADED_RTS statsPrintf(" %%GC time %5.1f%% (%.1f%% elapsed)\n\n", TICK_TO_DBL(gc_cpu)*100/TICK_TO_DBL(tot_cpu), TICK_TO_DBL(gc_elapsed)*100/TICK_TO_DBL(tot_elapsed)); #endif if (tot_cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time) == 0) showStgWord64(0, temp, rtsTrue/*commas*/); else showStgWord64( (StgWord64)((GC_tot_alloc*sizeof(W_))/ TICK_TO_DBL(tot_cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time))), temp, rtsTrue/*commas*/); statsPrintf(" Alloc rate %s bytes per MUT second\n\n", temp); statsPrintf(" Productivity %5.1f%% of total user, %.1f%% of total elapsed\n\n", TICK_TO_DBL(tot_cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100 / TICK_TO_DBL(tot_cpu), TICK_TO_DBL(tot_cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100 / TICK_TO_DBL(tot_elapsed)); /* TICK_PRINT(1); TICK_PRINT(2); REPORT(TOTAL_CALLS); TICK_PRINT_TOT(1); TICK_PRINT_TOT(2); */ #if USE_PAPI papi_stats_report(); #endif #if defined(THREADED_RTS) && defined(PROF_SPIN) { nat g; statsPrintf("gc_alloc_block_sync: %"FMT_Word64"\n", gc_alloc_block_sync.spin); statsPrintf("whitehole_spin: %"FMT_Word64"\n", whitehole_spin); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { statsPrintf("gen[%d].sync: %"FMT_Word64"\n", g, generations[g].sync.spin); } } #endif } if (RtsFlags.GcFlags.giveStats == ONELINE_GC_STATS) { char *fmt1, *fmt2; if (RtsFlags.MiscFlags.machineReadable) { fmt1 = " [(\"bytes allocated\", \"%llu\")\n"; fmt2 = " ,(\"num_GCs\", \"%d\")\n" " ,(\"average_bytes_used\", \"%ld\")\n" " ,(\"max_bytes_used\", \"%ld\")\n" " ,(\"num_byte_usage_samples\", \"%ld\")\n" " ,(\"peak_megabytes_allocated\", \"%lu\")\n" " ,(\"init_cpu_seconds\", \"%.2f\")\n" " ,(\"init_wall_seconds\", \"%.2f\")\n" " ,(\"mutator_cpu_seconds\", \"%.2f\")\n" " ,(\"mutator_wall_seconds\", \"%.2f\")\n" " ,(\"GC_cpu_seconds\", \"%.2f\")\n" " ,(\"GC_wall_seconds\", \"%.2f\")\n" " ]\n"; } else { fmt1 = "<<ghc: %llu bytes, "; fmt2 = "%d GCs, %ld/%ld avg/max bytes residency (%ld samples), %luM in use, %.2f INIT (%.2f elapsed), %.2f MUT (%.2f elapsed), %.2f GC (%.2f elapsed) :ghc>>\n"; } /* print the long long separately to avoid bugginess on mingwin (2001-07-02, mingw-0.5) */ statsPrintf(fmt1, GC_tot_alloc*(StgWord64)sizeof(W_)); statsPrintf(fmt2, total_collections, residency_samples == 0 ? 0 : cumulative_residency*sizeof(W_)/residency_samples, max_residency*sizeof(W_), residency_samples, (unsigned long)(peak_mblocks_allocated * MBLOCK_SIZE / (1024L * 1024L)), TICK_TO_DBL(init_cpu), TICK_TO_DBL(init_elapsed), TICK_TO_DBL(mut_cpu), TICK_TO_DBL(mut_elapsed), TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed)); } statsFlush(); statsClose(); } if (GC_coll_cpu) { stgFree(GC_coll_cpu); GC_coll_cpu = NULL; } if (GC_coll_elapsed) { stgFree(GC_coll_elapsed); GC_coll_elapsed = NULL; } if (GC_coll_max_pause) { stgFree(GC_coll_max_pause); GC_coll_max_pause = NULL; } }
void stat_endGC (Capability *cap, gc_thread *gct, W_ live, W_ copied, W_ slop, nat gen, nat par_n_threads, W_ par_max_copied, W_ par_tot_copied) { W_ tot_alloc; W_ alloc; if (RtsFlags.GcFlags.giveStats != NO_GC_STATS || RtsFlags.ProfFlags.doHeapProfile) // heap profiling needs GC_tot_time { Time cpu, elapsed, gc_cpu, gc_elapsed; // Has to be emitted while all caps stopped for GC, but before GC_END. // See trac.haskell.org/ThreadScope/wiki/RTSsummaryEvents // for a detailed design rationale of the current setup // of GC eventlog events. traceEventGcGlobalSync(cap); // Emitted before GC_END on all caps, which simplifies tools code. traceEventGcStats(cap, CAPSET_HEAP_DEFAULT, gen, copied * sizeof(W_), slop * sizeof(W_), /* current loss due to fragmentation */ (mblocks_allocated * BLOCKS_PER_MBLOCK - n_alloc_blocks) * BLOCK_SIZE_W * sizeof(W_), par_n_threads, par_max_copied * sizeof(W_), par_tot_copied * sizeof(W_)); getProcessTimes(&cpu, &elapsed); // Post EVENT_GC_END with the same timestamp as used for stats // (though converted from Time=StgInt64 to EventTimestamp=StgWord64). // Here, as opposed to other places, the event is emitted on the cap // that initiates the GC and external tools expect it to have the same // timestamp as used in +RTS -s calculcations. traceEventGcEndAtT(cap, TimeToNS(elapsed - start_init_elapsed)); gc_elapsed = elapsed - gct->gc_start_elapsed; gc_cpu = cpu - gct->gc_start_cpu; /* For the moment we calculate both per-HEC and total allocation. * There is thus redundancy here, but for the moment we will calculate * it both the old and new way and assert they're the same. * When we're sure it's working OK then we can simplify things. */ tot_alloc = calcTotalAllocated(); // allocated since the last GC alloc = tot_alloc - GC_tot_alloc; GC_tot_alloc = tot_alloc; if (RtsFlags.GcFlags.giveStats == VERBOSE_GC_STATS) { W_ faults = getPageFaults(); statsPrintf("%9" FMT_SizeT " %9" FMT_SizeT " %9" FMT_SizeT, alloc*sizeof(W_), copied*sizeof(W_), live*sizeof(W_)); statsPrintf(" %6.3f %6.3f %8.3f %8.3f %4" FMT_Word " %4" FMT_Word " (Gen: %2d)\n", TimeToSecondsDbl(gc_cpu), TimeToSecondsDbl(gc_elapsed), TimeToSecondsDbl(cpu), TimeToSecondsDbl(elapsed - start_init_elapsed), faults - gct->gc_start_faults, gct->gc_start_faults - GC_end_faults, gen); GC_end_faults = faults; statsFlush(); } GC_coll_cpu[gen] += gc_cpu; GC_coll_elapsed[gen] += gc_elapsed; if (GC_coll_max_pause[gen] < gc_elapsed) { GC_coll_max_pause[gen] = gc_elapsed; } GC_tot_copied += (StgWord64) copied; GC_par_max_copied += (StgWord64) par_max_copied; GC_par_tot_copied += (StgWord64) par_tot_copied; GC_tot_cpu += gc_cpu; traceEventHeapSize(cap, CAPSET_HEAP_DEFAULT, mblocks_allocated * MBLOCK_SIZE_W * sizeof(W_)); if (gen == RtsFlags.GcFlags.generations-1) { /* major GC? */ if (live > max_residency) { max_residency = live; } current_residency = live; residency_samples++; cumulative_residency += live; traceEventHeapLive(cap, CAPSET_HEAP_DEFAULT, live * sizeof(W_)); } if (slop > max_slop) max_slop = slop; } if (rub_bell) { debugBelch("\b\b\b \b\b\b"); rub_bell = 0; } #if USE_PAPI if(papi_is_reporting) { /* Switch to counting mutator events */ if (gen == 0) { papi_stop_gc0_count(); } else { papi_stop_gc1_count(); } papi_start_mutator_count(); } #endif }
void stat_endGC (gc_thread *gct, lnat alloc, lnat live, lnat copied, nat gen, lnat max_copied, lnat avg_copied, lnat slop) { if (RtsFlags.GcFlags.giveStats != NO_GC_STATS || RtsFlags.ProfFlags.doHeapProfile) // heap profiling needs GC_tot_time { Ticks cpu, elapsed, thread_gc_cpu, gc_cpu, gc_elapsed; getProcessTimes(&cpu, &elapsed); gc_elapsed = elapsed - gct->gc_start_elapsed; thread_gc_cpu = getThreadCPUTime() - gct->gc_start_thread_cpu; gc_cpu = cpu - gct->gc_start_cpu; taskDoneGC(gct->cap->running_task, thread_gc_cpu, gc_elapsed); if (RtsFlags.GcFlags.giveStats == VERBOSE_GC_STATS) { nat faults = getPageFaults(); statsPrintf("%9ld %9ld %9ld", alloc*sizeof(W_), copied*sizeof(W_), live*sizeof(W_)); statsPrintf(" %5.2f %5.2f %7.2f %7.2f %4ld %4ld (Gen: %2d)\n", TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed), TICK_TO_DBL(cpu), TICK_TO_DBL(elapsed - start_init_elapsed), faults - gct->gc_start_faults, gct->gc_start_faults - GC_end_faults, gen); GC_end_faults = faults; statsFlush(); } GC_coll_cpu[gen] += gc_cpu; GC_coll_elapsed[gen] += gc_elapsed; if (GC_coll_max_pause[gen] < gc_elapsed) { GC_coll_max_pause[gen] = gc_elapsed; } GC_tot_copied += (StgWord64) copied; GC_tot_alloc += (StgWord64) alloc; GC_par_max_copied += (StgWord64) max_copied; GC_par_avg_copied += (StgWord64) avg_copied; GC_tot_cpu += gc_cpu; if (gen == RtsFlags.GcFlags.generations-1) { /* major GC? */ if (live > max_residency) { max_residency = live; } current_residency = live; residency_samples++; cumulative_residency += live; } if (slop > max_slop) max_slop = slop; } if (rub_bell) { debugBelch("\b\b\b \b\b\b"); rub_bell = 0; } #if USE_PAPI if(papi_is_reporting) { /* Switch to counting mutator events */ if (gen == 0) { papi_stop_gc0_count(); } else { papi_stop_gc1_count(); } papi_start_mutator_count(); } #endif }