void G1MarkSweep::mark_sweep_phase3() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); Generation* pg = g1h->perm_gen(); // Adjust the pointers to reflect the new locations EventMark m("3 adjust pointers"); TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); sh->process_strong_roots(true, // activate StrongRootsScope true, // Collecting permanent generation. SharedHeap::SO_AllClasses, &GenMarkSweep::adjust_root_pointer_closure, NULL, // do not touch code cache here &GenMarkSweep::adjust_pointer_closure); g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, &GenMarkSweep::adjust_pointer_closure); GenMarkSweep::adjust_marks(); G1AdjustPointersClosure blk; g1h->heap_region_iterate(&blk); pg->adjust_pointers(); }
void G1MarkSweep::mark_sweep_phase3() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); // Need cleared claim bits for the strong roots processing ClassLoaderDataGraph::clear_claimed_marks(); sh->process_strong_roots(true, // activate StrongRootsScope false, // not scavenging. SharedHeap::SO_AllClasses, &GenMarkSweep::adjust_pointer_closure, NULL, // do not touch code cache here &GenMarkSweep::adjust_klass_closure); assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure); GenMarkSweep::adjust_marks(); G1AdjustPointersClosure blk; g1h->heap_region_iterate(&blk); }
void VM_GenCollectForPermanentAllocation::doit() { SvcGCMarker sgcm(SvcGCMarker::FULL); SharedHeap* heap = (SharedHeap*)Universe::heap(); GCCauseSetter gccs(heap, _gc_cause); switch (heap->kind()) { case (CollectedHeap::GenCollectedHeap): { GenCollectedHeap* gch = (GenCollectedHeap*)heap; gch->do_full_collection(gch->must_clear_all_soft_refs(), gch->n_gens() - 1); break; } #ifndef SERIALGC case (CollectedHeap::G1CollectedHeap): { G1CollectedHeap* g1h = (G1CollectedHeap*)heap; g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection); break; } #endif // SERIALGC default: ShouldNotReachHere(); } _res = heap->perm_gen()->allocate(_size, false); assert(heap->is_in_reserved_or_null(_res), "result not in heap"); if (_res == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } }
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); SharedHeap* sh = SharedHeap::heap(); #ifdef ASSERT if (sh->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earler"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); GenMarkSweep::_ref_processor = rp; rp->setup_policy(clear_all_softrefs); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); bool marked_for_unloading = false; allocate_stacks(); // We should save the marks of the currently locked biased monitors. // The marking doesn't preserve the marks of biased objects. BiasedLocking::preserve_marks(); mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); GenMarkSweep::restore_marks(); BiasedLocking::restore_marks(); GenMarkSweep::deallocate_stacks(); // "free at last gc" is calculated from these. // CHF: cheating for now!!! // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); Threads::gc_epilogue(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); // refs processing: clean slate GenMarkSweep::_ref_processor = NULL; }
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them EventMark m("1 mark object"); TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); sh->process_strong_roots(true, // activeate StrongRootsScope true, // Collecting permanent generation. SharedHeap::SO_SystemClasses, &GenMarkSweep::follow_root_closure, &GenMarkSweep::follow_code_root_closure, &GenMarkSweep::follow_root_closure); // Process reference objects found during marking ReferenceProcessor* rp = GenMarkSweep::ref_processor(); rp->setup_policy(clear_all_softrefs); rp->process_discovered_references(&GenMarkSweep::is_alive, &GenMarkSweep::keep_alive, &GenMarkSweep::follow_stack_closure, NULL); // Follow system dictionary roots and unload classes bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); assert(GenMarkSweep::_marking_stack.is_empty(), "stack should be empty by now"); // Follow code cache roots (has to be done after system dictionary, // assumes all live klasses are marked) CodeCache::do_unloading(&GenMarkSweep::is_alive, &GenMarkSweep::keep_alive, purged_class); GenMarkSweep::follow_stack(); // Update subklass/sibling/implementor links of live klasses GenMarkSweep::follow_weak_klass_links(); assert(GenMarkSweep::_marking_stack.is_empty(), "stack should be empty by now"); // Visit memoized MDO's and clear any unmarked weak refs GenMarkSweep::follow_mdo_weak_refs(); assert(GenMarkSweep::_marking_stack.is_empty(), "just drained"); // Visit symbol and interned string tables and delete unmarked oops SymbolTable::unlink(&GenMarkSweep::is_alive); StringTable::unlink(&GenMarkSweep::is_alive); assert(GenMarkSweep::_marking_stack.is_empty(), "stack should be empty by now"); }
HeapWord* HeapInspection::start_of_perm_gen() { if (is_shared_heap()) { SharedHeap* sh = SharedHeap::heap(); return sh->perm_gen()->used_region().start(); } #ifndef SERIALGC ParallelScavengeHeap* psh = (ParallelScavengeHeap*)Universe::heap(); return psh->perm_gen()->object_space()->used_region().start(); #else ShouldNotReachHere(); return NULL; #endif // SERIALGC }
int ageTable::compute_tenuring_threshold(size_t survivor_capacity) { size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100); size_t total = 0; int age = 1; assert(sizes[0] == 0, "no objects with age zero should be recorded"); while (age < table_size) { total += sizes[age]; // check if including objects of age 'age' made us pass the desired // size, if so 'age' is the new threshold if (total > desired_survivor_size) break; age++; } int result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold; if (PrintTenuringDistribution || UsePerfData) { if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", desired_survivor_size*oopSize, result, MaxTenuringThreshold); } total = 0; age = 1; while (age < table_size) { total += sizes[age]; if (sizes[age] > 0) { if (PrintTenuringDistribution) { gclog_or_tty->print_cr("- age %3d: %10ld bytes, %10ld total", age, sizes[age]*oopSize, total*oopSize); } } if (UsePerfData) { _perf_sizes[age]->set_value(sizes[age]*oopSize); } age++; } if (UsePerfData) { SharedHeap* sh = SharedHeap::heap(); CollectorPolicy* policy = sh->collector_policy(); GCPolicyCounters* gc_counters = policy->counters(); gc_counters->tenuring_threshold()->set_value(result); gc_counters->desired_survivor_size()->set_value( desired_survivor_size*oopSize); } } return result; }
void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) { ResourceMark rm; HeapWord* ref; CollectedHeap* heap = Universe::heap(); bool is_shared_heap = false; switch (heap->kind()) { case CollectedHeap::G1CollectedHeap: case CollectedHeap::GenCollectedHeap: { is_shared_heap = true; SharedHeap* sh = (SharedHeap*)heap; if (need_prologue) { sh->gc_prologue(false /* !full */); // get any necessary locks, etc. } ref = sh->perm_gen()->used_region().start(); break; } #ifndef SERIALGC case CollectedHeap::ParallelScavengeHeap: { ParallelScavengeHeap* psh = (ParallelScavengeHeap*)heap; ref = psh->perm_gen()->object_space()->used_region().start(); break; } #endif // SERIALGC default: ShouldNotReachHere(); // Unexpected heap kind for this op } // Collect klass instance info KlassInfoTable cit(KlassInfoTable::cit_size, ref); if (!cit.allocation_failed()) { // Iterate over objects in the heap RecordInstanceClosure ric(&cit); // If this operation encounters a bad object when using CMS, // consider using safe_object_iterate() which avoids perm gen // objects that may contain bad references. Universe::heap()->object_iterate(&ric); // Report if certain classes are not counted because of // running out of C-heap for the histogram. size_t missed_count = ric.missed_count(); if (missed_count != 0) { st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below", missed_count); } // Sort and print klass instance info KlassInfoHisto histo("\n" " num #instances #bytes class name\n" "----------------------------------------------", KlassInfoHisto::histo_initial_size); HistoClosure hc(&histo); cit.iterate(&hc); histo.sort(); histo.print_on(st); } else { st->print_cr("WARNING: Ran out of C-heap; histogram not generated"); } st->flush(); if (need_prologue && is_shared_heap) { SharedHeap* sh = (SharedHeap*)heap; sh->gc_epilogue(false /* !full */); // release all acquired locks, etc. } }
void HeapInspection::epilogue() { if (is_shared_heap()) { SharedHeap* sh = SharedHeap::heap(); sh->gc_epilogue(false /* !full */); // release all acquired locks, etc. } }
void HeapInspection::prologue() { if (is_shared_heap()) { SharedHeap* sh = SharedHeap::heap(); sh->gc_prologue(false /* !full */); // get any necessary locks, etc. } }
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); SharedHeap* sh = SharedHeap::heap(); #ifdef ASSERT if (sh->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earler"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); GenMarkSweep::_ref_processor = rp; rp->setup_policy(clear_all_softrefs); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); // Increment the invocation count for the permanent generation, since it is // implicitly collected whenever we do a full mark sweep collection. sh->perm_gen()->stat_record()->invocations++; bool marked_for_unloading = false; allocate_stacks(); // We should save the marks of the currently locked biased monitors. // The marking doesn't preserve the marks of biased objects. BiasedLocking::preserve_marks(); mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); if (VerifyDuringGC) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); g1h->checkConcurrentMark(); } mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); GenMarkSweep::restore_marks(); BiasedLocking::restore_marks(); GenMarkSweep::deallocate_stacks(); // We must invalidate the perm-gen rs, so that it gets rebuilt. GenRemSet* rs = sh->rem_set(); rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/); // "free at last gc" is calculated from these. // CHF: cheating for now!!! // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); Threads::gc_epilogue(); CodeCache::gc_epilogue(); // refs processing: clean slate GenMarkSweep::_ref_processor = NULL; }
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); // Need cleared claim bits for the strong roots processing ClassLoaderDataGraph::clear_claimed_marks(); sh->process_strong_roots(true, // activate StrongRootsScope false, // not scavenging. SharedHeap::SO_SystemClasses, &GenMarkSweep::follow_root_closure, &GenMarkSweep::follow_code_root_closure, &GenMarkSweep::follow_klass_closure); // Process reference objects found during marking ReferenceProcessor* rp = GenMarkSweep::ref_processor(); assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity"); rp->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&GenMarkSweep::is_alive, &GenMarkSweep::keep_alive, &GenMarkSweep::follow_stack_closure, NULL, gc_timer()); gc_tracer()->report_gc_reference_stats(stats); // This is the point where the entire marking should have completed. assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); // Unload nmethods. CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); // Delete entries for dead interned strings. StringTable::unlink(&GenMarkSweep::is_alive); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); if (VerifyDuringGC) { HandleMark hm; // handle scope COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); Universe::heap()->prepare_for_verify(); // Note: we can verify only the heap here. When an object is // marked, the previous value of the mark word (including // identity hash values, ages, etc) is preserved, and the mark // word is set to markOop::marked_value - effectively removing // any hash values from the mark word. These hash values are // used when verifying the dictionaries and so removing them // from the mark word can make verification of the dictionaries // fail. At the end of the GC, the orginal mark word values // (including hash values) are restored to the appropriate // objects. if (!VerifySilently) { gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); } Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord); if (!VerifySilently) { gclog_or_tty->print_cr("]"); } } gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); }