void VM_GC_HeapInspection::doit() { HandleMark hm; CollectedHeap* ch = Universe::heap(); ch->ensure_parsability(false); // must happen, even if collection does // not happen (e.g. due to GC_locker) if (_full_gc) { // The collection attempt below would be skipped anyway if // the gc locker is held. The following dump may then be a tad // misleading to someone expecting only live objects to show // up in the dump (see CR 6944195). Just issue a suitable warning // in that case and do not attempt to do a collection. // The latter is a subtle point, because even a failed attempt // to GC will, in fact, induce one in the future, which we // probably want to avoid in this case because the GC that we may // be about to attempt holds value for us only // if it happens now and not if it happens in the eventual // future. if (GC_locker::is_active()) { warning("GC locker is held; pre-dump GC was skipped"); } else { ch->collect_as_vm_thread(GCCause::_heap_inspection); } } HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); }
void VM_CollectForMetadataAllocation::doit() { SvcGCMarker sgcm(SvcGCMarker::FULL); CollectedHeap* heap = Universe::heap(); GCCauseSetter gccs(heap, _gc_cause); // Check again if the space is available. Another thread // may have similarly failed a metadata allocation and induced // a GC that freed space for the allocation. if (!MetadataAllocationFailALot) { _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL) { if (UseConcMarkSweepGC) { if (CMSClassUnloadingEnabled) { MetaspaceGC::set_should_concurrent_collect(true); } // For CMS expand since the collection is going to be concurrent. _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); } if (_result == NULL) { // Don't clear the soft refs yet. if (Verbose && PrintGCDetails && UseConcMarkSweepGC) { gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); // After a GC try to allocate without expanding. Could fail // and expansion will be tried below. _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL) { // If still failing, allow the Metaspace to expand. // See delta_capacity_until_GC() for explanation of the // amount of the expansion. // This should work unless there really is no more space // or a MaxMetaspaceSize has been specified on the command line. _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); if (_result == NULL) { // If expansion failed, do a last-ditch collection and try allocating // again. A last-ditch collection will clear softrefs. This // behavior is similar to the last-ditch collection done for perm // gen when it was full and a collection for failed allocation // did not free perm gen space. heap->collect_as_vm_thread(GCCause::_last_ditch_collection); _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } } if (Verbose && PrintGCDetails && _result == NULL) { gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " SIZE_FORMAT, _size); } } if (_result == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } }