static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { if (tracking_level() < NMT_summary) return; if (addr != NULL) { ThreadCritical tc; if (tracking_level() < NMT_summary) return; VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); } }
static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { if (tracking_level() < NMT_summary) return; if (addr != NULL) { ThreadCritical tc; if (tracking_level() < NMT_summary) return; VirtualMemoryTracker::add_committed_region((address)addr, size, stack); } }
static inline void release_thread_stack(void* addr, size_t size) { if (tracking_level() < NMT_summary) return; if (addr != NULL) { // uses thread stack malloc slot for book keeping number of threads MallocMemorySummary::record_free(0, mtThreadStack); ThreadCritical tc; if (tracking_level() < NMT_summary) return; VirtualMemoryTracker::remove_released_region((address)addr, size); } }
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone) { if (tracking_level() < NMT_summary) return; if (addr != NULL) { ThreadCritical tc; if (tracking_level() < NMT_summary) return; VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag, true); } }
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone) { if (tracking_level() < NMT_summary) return; if (addr != NULL) { ThreadCritical tc; // Recheck to avoid potential racing during NMT shutdown if (tracking_level() < NMT_summary) return; VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); } }
static inline void record_thread_stack(void* addr, size_t size) { if (tracking_level() < NMT_summary) return; if (addr != NULL) { // uses thread stack malloc slot for book keeping number of threads MallocMemorySummary::record_malloc(0, mtThreadStack); record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); } }
void MemTracker::init() { NMT_TrackingLevel level = tracking_level(); if (level >= NMT_summary) { if (!VirtualMemoryTracker::late_initialize(level)) { shutdown(); return; } _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); // Already OOM. It is unlikely, but still have to handle it. if (_query_lock == NULL) { shutdown(); } } }
bool MemTracker::transition_to(NMT_TrackingLevel level) { NMT_TrackingLevel current_level = tracking_level(); assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off"); if (current_level == level) { return true; } else if (current_level > level) { // Downgrade tracking level, we want to lower the tracking level first _tracking_level = level; // Make _tracking_level visible immediately. OrderAccess::fence(); VirtualMemoryTracker::transition(current_level, level); MallocTracker::transition(current_level, level); } else { // Upgrading tracking level is not supported and has never been supported. // Allocating and deallocating malloc tracking structures is not thread safe and // leads to inconsistencies unless a lot coarser locks are added. } return true; }
static void final_report(outputStream* output) { NMT_TrackingLevel level = tracking_level(); if (level >= NMT_summary) { report(level == NMT_summary, output); } }
// Make a final report or report for hs_err file. static void error_report(outputStream* output) { if (tracking_level() >= NMT_summary) { report(true, output); // just print summary for error case. } }
static inline Tracker get_virtual_memory_release_tracker() { assert(tracking_level() >= NMT_summary, "Check by caller"); return Tracker(Tracker::release); }
static inline Tracker get_virtual_memory_uncommit_tracker() { assert(tracking_level() >= NMT_summary, "Check by caller"); return Tracker(Tracker::uncommit); }
// Record arena size change. Arena size is the size of all arena // chuncks that backing up the arena. static inline void record_arena_size_change(int diff, MEMFLAGS flag) { if (tracking_level() < NMT_summary) return; MallocTracker::record_arena_size_change(diff, flag); }
// Record destruction of an arena static inline void record_arena_free(MEMFLAGS flag) { if (tracking_level() < NMT_summary) return; MallocTracker::record_arena_free(flag); }
static size_t malloc_header_size(void* memblock) { if (tracking_level() != NMT_off) { return MallocTracker::get_header_size(memblock); } return 0; }
// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock void MemTracker::shutdown() { // We can only shutdown NMT to minimal tracking level if it is ever on. if (tracking_level () > NMT_minimal) { transition_to(NMT_minimal); } }