// Clears the given card, return true if the corresponding card should be // processed. bool clear_card(jbyte* entry) { if (_is_par) { while (true) { // In the parallel case, we may have to do this several times. jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should " "be the only place they get cleaned."); if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) || _ct->is_prev_youngergen_card_val(entry_val)) { jbyte res = atomic::compare_and_exchange_byte(CardTableRS::clean_card_val(), entry, entry_val); if (res == entry_val) { break; } else { assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, "The CAS above should only fail if another thread did " "a GC write barrier."); } } else if (entry_val == CardTableRS::cur_youngergen_and_prev_nonclean_card) { // Parallelism shouldn't matter in this case. Only the thread // assigned to scan the card should change this value. *entry = _ct->cur_youngergen_card_val(); break; } else { assert(entry_val == _ct->cur_youngergen_card_val(), "Should be the only possibility."); // In this case, the card was clean before, and become // cur_youngergen only because of processing of a promoted object. // We don't have to look at the card. return false; } } return true; } else { jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should " "be the only place they get cleaned."); assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, "This should be possible in the sequential case."); *entry = CardTableRS::clean_card_val(); return true; } }
void do_MemRegion(MemRegion mr) { // We start at the high end of "mr", walking backwards // while accumulating a contiguous dirty range of cards in // [start_of_non_clean, end_of_non_clean) which we then // process en masse. HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; jbyte* entry = _ct->byte_for(mr.last()); const jbyte* first_entry = _ct->byte_for(mr.start()); while (entry >= first_entry) { HeapWord* cur = _ct->addr_for(entry); if (!clear_card(entry)) { // We hit a clean card; process any non-empty // dirty range accumulated so far. if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } // Reset the dirty window while continuing to // look for the next dirty window to process. end_of_non_clean = cur; start_of_non_clean = end_of_non_clean; } // Open the left end of the window one card to the left. start_of_non_clean = cur; // Note that "entry" leads "start_of_non_clean" in // its leftward excursion after this point // in the loop and, when we hit the left end of "mr", // will point off of the left end of the card-table // for "mr". entry--; } // If the first card of "mr" was dirty, we will have // been left with a dirty window, co-initial with "mr", // which we now process. if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } }
void do_MemRegion(MemRegion mr) { HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; jbyte* entry = _ct->byte_for(mr.last()); HeapWord* cur = _ct->addr_for(entry); while (mr.contains(cur)) { jbyte entry_val = *entry; if (!clear_card(entry)) { if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } end_of_non_clean = cur; start_of_non_clean = end_of_non_clean; } entry--; start_of_non_clean = cur; cur = _ct->addr_for(entry); } if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } }
void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); GenCollectedHeap* gch = GenCollectedHeap::heap(); #ifdef ASSERT if (gch->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earlier"); } #endif // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); set_ref_processor(rp); rp->setup_policy(clear_all_softrefs); gch->trace_heap_before_gc(_gc_tracer); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); // Increment the invocation count _total_invocations++; // Capture used regions for each generation that will be // subject to collection, so that card table adjustments can // be made intelligently (see clear / invalidate further below). gch->save_used_regions(); allocate_stacks(); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 #if defined(COMPILER2) || INCLUDE_JVMCI assert(DerivedPointerTable::is_active(), "Sanity"); DerivedPointerTable::set_active(false); #endif mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); // Set saved marks for allocation profiler (and other things? -- dld) // (Should this be in general part?) gch->save_marks(); deallocate_stacks(); // If compaction completely evacuated the young generation then we // can clear the card table. Otherwise, we must invalidate // it (consider all cards dirty). In the future, we might consider doing // compaction within generations only, and doing card-table sliding. CardTableRS* rs = gch->rem_set(); Generation* old_gen = gch->old_gen(); // Clear/invalidate below make use of the "prev_used_regions" saved earlier. if (gch->young_gen()->used() == 0) { // We've evacuated the young generation. rs->clear_into_younger(old_gen); } else { // Invalidate the cards corresponding to the currently used // region and clear those corresponding to the evacuated region. rs->invalidate_or_clear(old_gen); } CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); // refs processing: clean slate set_ref_processor(NULL); // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); // Update time of last gc for all generations we collected // (which currently is all the generations in the heap). // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; gch->update_time_of_last_gc(now); gch->trace_heap_after_gc(_gc_tracer); }
virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }