// This method contains all heap specific policy for invoking scavenge. // PSScavenge::invoke_no_policy() will do nothing but attempt to // scavenge. It will not clean up after failed promotions, bail out if // we've exceeded policy time limits, or any other special behavior. // All such policy should be placed here. // // Note that this method should only be called from the vm_thread while // at a safepoint! bool PSScavenge::invoke() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; const bool scavenge_done = PSScavenge::invoke_no_policy(); const bool need_full_gc = !scavenge_done || policy->should_full_GC(heap->old_gen()->free_in_bytes()); bool full_gc_done = false; if (UsePerfData) { PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; counters->update_full_follows_scavenge(ffs_val); } if (need_full_gc) { GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); CollectorPolicy* cp = heap->collector_policy(); const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); if (UseParallelOldGC) { full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); } else { full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); } } return full_gc_done; }
void VM_CollectForMetadataAllocation::doit() { SvcGCMarker sgcm(SvcGCMarker::FULL); CollectedHeap* heap = Universe::heap(); GCCauseSetter gccs(heap, _gc_cause); // Check again if the space is available. Another thread // may have similarly failed a metadata allocation and induced // a GC that freed space for the allocation. if (!MetadataAllocationFailALot) { _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL) { if (UseConcMarkSweepGC) { if (CMSClassUnloadingEnabled) { MetaspaceGC::set_should_concurrent_collect(true); } // For CMS expand since the collection is going to be concurrent. _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); } if (_result == NULL) { // Don't clear the soft refs yet. if (Verbose && PrintGCDetails && UseConcMarkSweepGC) { gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); // After a GC try to allocate without expanding. Could fail // and expansion will be tried below. _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL) { // If still failing, allow the Metaspace to expand. // See delta_capacity_until_GC() for explanation of the // amount of the expansion. // This should work unless there really is no more space // or a MaxMetaspaceSize has been specified on the command line. _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); if (_result == NULL) { // If expansion failed, do a last-ditch collection and try allocating // again. A last-ditch collection will clear softrefs. This // behavior is similar to the last-ditch collection done for perm // gen when it was full and a collection for failed allocation // did not free perm gen space. heap->collect_as_vm_thread(GCCause::_last_ditch_collection); _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } } if (Verbose && PrintGCDetails && _result == NULL) { gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " SIZE_FORMAT, _size); } } if (_result == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } }