void do_MemRegion(MemRegion mr) { // We start at the high end of "mr", walking backwards // while accumulating a contiguous dirty range of cards in // [start_of_non_clean, end_of_non_clean) which we then // process en masse. HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; jbyte* entry = _ct->byte_for(mr.last()); const jbyte* first_entry = _ct->byte_for(mr.start()); while (entry >= first_entry) { HeapWord* cur = _ct->addr_for(entry); if (!clear_card(entry)) { // We hit a clean card; process any non-empty // dirty range accumulated so far. if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } // Reset the dirty window while continuing to // look for the next dirty window to process. end_of_non_clean = cur; start_of_non_clean = end_of_non_clean; } // Open the left end of the window one card to the left. start_of_non_clean = cur; // Note that "entry" leads "start_of_non_clean" in // its leftward excursion after this point // in the loop and, when we hit the left end of "mr", // will point off of the left end of the card-table // for "mr". entry--; } // If the first card of "mr" was dirty, we will have // been left with a dirty window, co-initial with "mr", // which we now process. if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } }
void do_MemRegion(MemRegion mr) { HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; jbyte* entry = _ct->byte_for(mr.last()); HeapWord* cur = _ct->addr_for(entry); while (mr.contains(cur)) { jbyte entry_val = *entry; if (!clear_card(entry)) { if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } end_of_non_clean = cur; start_of_non_clean = end_of_non_clean; } entry--; start_of_non_clean = cur; cur = _ct->addr_for(entry); } if (start_of_non_clean < end_of_non_clean) { MemRegion mr2(start_of_non_clean, end_of_non_clean); _dirty_card_closure->do_MemRegion(mr2); } }
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { // Some collectors need to do special things whenever their dirty // cards are processed. For instance, CMS must remember mutator updates // (i.e. dirty cards) so as to re-scan mutated objects. // Such work can be piggy-backed here on dirty card scanning, so as to make // it slightly more efficient than doing a complete non-detructive pre-scan // of the card table. MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); if (pCl != NULL) { pCl->do_MemRegion(mr); } HeapWord* bottom = mr.start(); HeapWord* last = mr.last(); HeapWord* top = mr.end(); HeapWord* bottom_obj; HeapWord* top_obj; assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || _precision == CardTableModRefBS::Precise, "Only ones we deal with for now."); assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || _cl->idempotent() || _last_bottom == NULL || top <= _last_bottom, "Not decreasing"); NOT_PRODUCT(_last_bottom = mr.start()); bottom_obj = _sp->block_start(bottom); top_obj = _sp->block_start(last); assert(bottom_obj <= bottom, "just checking"); assert(top_obj <= top, "just checking"); // Given what we think is the top of the memory region and // the start of the object at the top, get the actual // value of the top. top = get_actual_top(top, top_obj); // If the previous call did some part of this region, don't redo. if (_precision == CardTableModRefBS::ObjHeadPreciseArray && _min_done != NULL && _min_done < top) { top = _min_done; } // Top may have been reset, and in fact may be below bottom, // e.g. the dirty card region is entirely in a now free object // -- something that could happen with a concurrent sweeper. bottom = MIN2(bottom, top); mr = MemRegion(bottom, top); assert(bottom <= top && (_precision != CardTableModRefBS::ObjHeadPreciseArray || _min_done == NULL || top <= _min_done), "overlap!"); // Walk the region if it is not empty; otherwise there is nothing to do. if (!mr.is_empty()) { walk_mem_region(mr, bottom_obj, top); } // An idempotent closure might be applied in any order, so we don't // record a _min_done for it. if (!_cl->idempotent()) { _min_done = bottom; } else { assert(_min_done == _last_explicit_min_done, "Don't update _min_done for idempotent cl"); } }