static jlong get_live_thread_count() { return _live_threads_count->get_value() - _exiting_threads_count; }
inline void update_change_young_gen_for_maj_pauses() { _change_young_gen_for_maj_pauses_counter->set_value( ps_size_policy()->change_young_gen_for_maj_pauses()); }
inline void update_avg_major_interval() { _avg_major_interval->set_value( (jlong)(ps_size_policy()->_avg_major_interval->average() * 1000.0) ); }
inline void update_promo_size() { _promo_size_counter->set_value( size_policy()->calculated_promo_size_in_bytes()); }
inline void update_avg_promoted_padded_avg() { _avg_promoted_padded_avg_counter->set_value( (jlong)(ps_size_policy()->avg_promoted()->padded_average()) ); }
inline void update_decrement_tenuring_threshold_for_gc_cost() { _decrement_tenuring_threshold_for_gc_cost_counter->set_value( size_policy()->decrement_tenuring_threshold_for_gc_cost()); }
inline void update_major_collection_slope_counter() { _major_collection_slope_counter->set_value( (jlong)(size_policy()->major_collection_slope() * 1000) ); }
inline void update_avg_young_live_counter() { _avg_young_live_counter->set_value( (jlong)(size_policy()->avg_young_live()->average()) ); }
inline void update_avg_survived_dev_counters() { _avg_survived_dev_counter->set_value( (jlong)(size_policy()->_avg_survived->deviation()) ); }
inline void update_minor_pause_counter() { _minor_pause_counter->set_value((jlong) (size_policy()->avg_minor_pause()->last_sample() * 1000.0)); }
inline void update_minor_gc_cost_counter() { _minor_gc_cost_counter->set_value((jlong) (size_policy()->minor_gc_cost() * 100.0)); }
static jlong class_method_data_size() { return (UsePerfData ? _class_methods_size->get_value() : -1); }
static void add_class_method_size(int size) { if (UsePerfData) { _class_methods_size->inc(size); } }
static jlong get_daemon_thread_count() { return _daemon_threads_count->get_value() - _exiting_daemon_threads_count; }
inline void update_young_capacity(size_t size_in_bytes) { _young_capacity_counter->set_value(size_in_bytes); }
inline void update_avg_survived_padded_avg_counters() { _avg_survived_padded_avg_counter->set_value( (jlong)(size_policy()->_avg_survived->padded_average()) ); }
inline void update_survivor_overflowed(bool survivor_overflowed) { _survivor_overflowed_counter->set_value(survivor_overflowed); }
inline void update_change_young_gen_for_throughput() { _change_young_gen_for_throughput_counter->set_value( size_policy()->change_young_gen_for_throughput()); }
inline void update_decrement_tenuring_threshold_for_survivor_limit() { _decrement_tenuring_threshold_for_survivor_limit_counter->set_value( size_policy()->decrement_tenuring_threshold_for_survivor_limit()); }
inline void update_decrease_for_footprint() { _decrease_for_footprint_counter->set_value( size_policy()->decrease_for_footprint()); }
inline void update_eden_size() { size_t eden_size_in_bytes = size_policy()->calculated_eden_size_in_bytes(); _eden_size_counter->set_value(eden_size_in_bytes); }
inline void update_decide_at_full_gc_counter() { _decide_at_full_gc_counter->set_value( size_policy()->decide_at_full_gc()); }
inline void update_avg_minor_pause_counter() { _avg_minor_pause_counter->set_value((jlong) (size_policy()->avg_minor_pause()->average() * 1000.0)); }
inline void update_minor_pause_young_slope_counter() { _minor_pause_young_slope_counter->set_value( (jlong)(size_policy()->minor_pause_young_slope() * 1000) ); }
inline void update_avg_pretenured_padded_avg() { _avg_pretenured_padded_avg->set_value( (jlong)(ps_size_policy()->_avg_pretenured->padded_average()) ); }
inline void update_survived(size_t survived) { _survived_counter->set_value(survived); }
inline void update_change_old_gen_for_min_pauses() { _change_old_gen_for_min_pauses->set_value( ps_size_policy()->change_old_gen_for_min_pauses()); }
inline void update_promoted(size_t promoted) { _promoted_counter->set_value(promoted); }
inline void update_live_space() { _live_space->set_value(ps_size_policy()->live_space()); }
static jlong get_peak_thread_count() { return _peak_threads_count->get_value(); }