No_GC_Verifier::No_GC_Verifier(bool verifygc) { _verifygc = verifygc; if (_verifygc) { CollectedHeap* h = Universe::heap(); assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); _old_invocations = h->total_collections(); } }
Pause_No_GC_Verifier::~Pause_No_GC_Verifier() { if (_ngcv->_verifygc) { // if we were verifying before, then reenable verification CollectedHeap* h = Universe::heap(); assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); _ngcv->_old_invocations = h->total_collections(); } }
No_GC_Verifier::~No_GC_Verifier() { if (_verifygc) { CollectedHeap* h = Universe::heap(); assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); if (_old_invocations != h->total_collections()) { fatal("collection in a No_GC_Verifier secured function"); } } }
Pause_No_GC_Verifier::Pause_No_GC_Verifier(No_GC_Verifier * ngcv) { _ngcv = ngcv; if (_ngcv->_verifygc) { // if we were verifying, then make sure that nothing is // wrong before we "pause" verification CollectedHeap* h = Universe::heap(); assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); if (_ngcv->_old_invocations != h->total_collections()) { fatal("collection in a No_GC_Verifier secured function"); } } }
void SharedUserData::task(){ #ifdef AZ_PROXIED // Static variables store peak values seen during the life of the run. static volatile sud_jvm_heap_rev1_t peak_jvm_heap; static sud_io_rev1_t io_stats; static volatile bool initialized = false; if (!initialized) { memset ((void*)(&peak_jvm_heap), 0, sizeof(peak_jvm_heap)); initialized = true; } if (SafepointSynchronize::is_at_safepoint()) return; CollectedHeap *heap = Universe::heap(); if (!heap) return; size_t l = heap->last_gc_live_bytes(); size_t u=heap->used(); size_t c = heap->capacity(); size_t m = heap->max_capacity(); size_t pu = heap->permanent_used(); size_t pc = heap->permanent_capacity(); // Make sure that the numbers make sense when graphing. c = (u > c) ? u : c; m = (c > m) ? c : m; pc = (pu > pc) ? pu : pc; sud_jvm_heap_rev1_t jvm_heap; memset(&jvm_heap, 0, sizeof(jvm_heap)); jvm_heap.revision = SUD_JVM_HEAP_REVISION; switch (heap->kind()) { case CollectedHeap::GenCollectedHeap: strcpy(jvm_heap.name, "GenCollectedHeap"); break; case CollectedHeap::ParallelScavengeHeap: strcpy(jvm_heap.name, "ParallelScavengeHeap"); break; case CollectedHeap::PauselessHeap: strcpy(jvm_heap.name, "PauselessHeap"); break; default: strcpy(jvm_heap.name, ""); } if (heap->supports_tlab_allocation()) jvm_heap.flags |= SUD_JVM_HEAP_FLAG_TLAB_ALLOCATION; if (heap->supports_inline_contig_alloc()) jvm_heap.flags |= SUD_JVM_HEAP_FLAG_INLINE_CONTIG_ALLOC; uint64_t now = (uint64_t) os::javaTimeMillis(); jvm_heap.timestamp_ms = now; jvm_heap.live_bytes = l; jvm_heap.used_bytes = u; jvm_heap.capacity_bytes = c; jvm_heap.max_capacity_bytes = m; jvm_heap.permanent_used_bytes = pu; jvm_heap.permanent_capacity_bytes = pc; jvm_heap.total_collections = heap->total_collections(); libos::AccountInfo ai; az_allocid_t allocid = process_get_allocationid(); sys_return_t ret = ai.inspectProcess (allocid); if (ret == SYSERR_NONE) { // Copy memory_accounting information into the sud structure. // Take care not to overflow the accounts past the maximum storable. const account_info_t *account_info = ai.getAccountInfo(); uint64_t count = (account_info->ac_count < SUD_MAX_ACCOUNTS) ? account_info->ac_count : SUD_MAX_ACCOUNTS; jvm_heap.account_info.ac_count = count; for (uint64_t i = 0; i < count; i++) { jvm_heap.account_info.ac_array[i] = account_info->ac_array[i]; } } else { warning("Failed to inspect memory accounting info (%d)",ret); } #define UPDATE_PEAK(struct_member,value) \ if (peak_jvm_heap.peak_ ## struct_member ## _bytes < value) { \ peak_jvm_heap.peak_ ## struct_member ## _bytes = value; \ peak_jvm_heap.peak_ ## struct_member ## _timestamp_ms = now; \ } \ jvm_heap.peak_ ## struct_member ## _bytes = peak_jvm_heap.peak_ ## struct_member ## _bytes; \ jvm_heap.peak_ ## struct_member ## _timestamp_ms = peak_jvm_heap.peak_ ## struct_member ## _timestamp_ms; UPDATE_PEAK (live,l); UPDATE_PEAK (used,u); UPDATE_PEAK (capacity,c); UPDATE_PEAK (max_capacity,m); UPDATE_PEAK (permanent_used,pu); UPDATE_PEAK (permanent_capacity,pc); UPDATE_PEAK (allocated,ai.getAllocatedBytes()); UPDATE_PEAK (funded,ai.getFundedBytes()); UPDATE_PEAK (overdraft,ai.getOverdraftBytes()); UPDATE_PEAK (footprint,ai.getFootprintBytes()); UPDATE_PEAK (committed,ai.getCommittedBytes()); UPDATE_PEAK (grant,ai.getGrantBytes()); UPDATE_PEAK (allocated_from_committed,ai.getAllocatedFromCommittedBytes()); UPDATE_PEAK (default_allocated,ai.getDefaultAllocatedBytes()); UPDATE_PEAK (default_committed,ai.getDefaultCommittedBytes()); UPDATE_PEAK (default_footprint,ai.getDefaultFootprintBytes()); UPDATE_PEAK (default_grant,ai.getDefaultGrantBytes()); UPDATE_PEAK (heap_allocated,ai.getHeapAllocatedBytes()); UPDATE_PEAK (heap_committed,ai.getHeapCommittedBytes()); UPDATE_PEAK (heap_footprint,ai.getHeapFootprintBytes()); UPDATE_PEAK (heap_grant,ai.getHeapGrantBytes()); ret = shared_user_data_set_jvm_heap_rev1 (allocid, &jvm_heap); if (ret != SYSERR_NONE) warning("Failed to set jvm_heap shared user data (%d)", ret); memset ((void*)(&io_stats), 0, sizeof(io_stats)); io_stats.revision = SUD_IO_REVISION; atcpn_stats_get_io_rev1(&io_stats); ret = shared_user_data_set_io_rev1 (allocid, &io_stats); if (ret != SYSERR_NONE) warning("Failed to set io_stats shared user data (%d)", ret); #endif // AZ_PROXIED }