void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); SharedHeap* sh = SharedHeap::heap(); #ifdef ASSERT if (sh->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earler"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); GenMarkSweep::_ref_processor = rp; rp->setup_policy(clear_all_softrefs); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); bool marked_for_unloading = false; allocate_stacks(); // We should save the marks of the currently locked biased monitors. // The marking doesn't preserve the marks of biased objects. BiasedLocking::preserve_marks(); mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); GenMarkSweep::restore_marks(); BiasedLocking::restore_marks(); GenMarkSweep::deallocate_stacks(); // "free at last gc" is calculated from these. // CHF: cheating for now!!! // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); Threads::gc_epilogue(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); // refs processing: clean slate GenMarkSweep::_ref_processor = NULL; }
int ageTable::compute_tenuring_threshold(size_t survivor_capacity) { size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100); size_t total = 0; int age = 1; assert(sizes[0] == 0, "no objects with age zero should be recorded"); while (age < table_size) { total += sizes[age]; // check if including objects of age 'age' made us pass the desired // size, if so 'age' is the new threshold if (total > desired_survivor_size) break; age++; } int result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold; if (PrintTenuringDistribution || UsePerfData) { if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", desired_survivor_size*oopSize, result, MaxTenuringThreshold); } total = 0; age = 1; while (age < table_size) { total += sizes[age]; if (sizes[age] > 0) { if (PrintTenuringDistribution) { gclog_or_tty->print_cr("- age %3d: %10ld bytes, %10ld total", age, sizes[age]*oopSize, total*oopSize); } } if (UsePerfData) { _perf_sizes[age]->set_value(sizes[age]*oopSize); } age++; } if (UsePerfData) { SharedHeap* sh = SharedHeap::heap(); CollectorPolicy* policy = sh->collector_policy(); GCPolicyCounters* gc_counters = policy->counters(); gc_counters->tenuring_threshold()->set_value(result); gc_counters->desired_survivor_size()->set_value( desired_survivor_size*oopSize); } } return result; }
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); SharedHeap* sh = SharedHeap::heap(); #ifdef ASSERT if (sh->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earler"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); GenMarkSweep::_ref_processor = rp; rp->setup_policy(clear_all_softrefs); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); // Increment the invocation count for the permanent generation, since it is // implicitly collected whenever we do a full mark sweep collection. sh->perm_gen()->stat_record()->invocations++; bool marked_for_unloading = false; allocate_stacks(); // We should save the marks of the currently locked biased monitors. // The marking doesn't preserve the marks of biased objects. BiasedLocking::preserve_marks(); mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); if (VerifyDuringGC) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); g1h->checkConcurrentMark(); } mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); GenMarkSweep::restore_marks(); BiasedLocking::restore_marks(); GenMarkSweep::deallocate_stacks(); // We must invalidate the perm-gen rs, so that it gets rebuilt. GenRemSet* rs = sh->rem_set(); rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/); // "free at last gc" is calculated from these. // CHF: cheating for now!!! // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); Threads::gc_epilogue(); CodeCache::gc_epilogue(); // refs processing: clean slate GenMarkSweep::_ref_processor = NULL; }