void HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i) { // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); assert(r != NULL, "unexpected null"); HeapWord* end = _ct_bs->addr_for(card_ptr + 1); MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO init_ct_freq_table(_g1->g1_reserved_obj_bytes()); ct_freq_note_card(_ct_bs->index_for(start)); #endif UpdateRSOopClosure update_rs_oop_cl(this, worker_i); update_rs_oop_cl.set_from(r); FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl); // Undirty the card. *card_ptr = CardTableModRefBS::clean_card_val(); // We must complete this write before we do any of the reads below. OrderAccess::storeload(); // And process it, being careful of unallocated portions of TLAB's. HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // card and re-enqueue: if we put off the card until a GC pause, then the // unallocated portion will be filled in. Alternatively, we might try // the full complexity of the technique used in "regular" precleaning. if (stop_point != NULL) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) if (*card_ptr != CardTableModRefBS::dirty_card_val()) { *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); _conc_refine_cards++; } }
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, bool check_for_refs_into_cset) { // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); assert(r != NULL, "unexpected null"); HeapWord* end = _ct_bs->addr_for(card_ptr + 1); MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO init_ct_freq_table(_g1->max_capacity()); ct_freq_note_card(_ct_bs->index_for(start)); #endif assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity"); UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, _g1->g1_rem_set(), _cset_rs_update_cl[worker_i], check_for_refs_into_cset, worker_i); update_rs_oop_cl.set_from(r); TriggerClosure trigger_cl; FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl); InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, (check_for_refs_into_cset ? (OopClosure*)&mux : (OopClosure*)&update_rs_oop_cl)); // Undirty the card. *card_ptr = CardTableModRefBS::clean_card_val(); // We must complete this write before we do any of the reads below. OrderAccess::storeload(); // And process it, being careful of unallocated portions of TLAB's. // The region for the current card may be a young region. The // current card may have been a card that was evicted from the // card cache. When the card was inserted into the cache, we had // determined that its region was non-young. While in the cache, // the region may have been freed during a cleanup pause, reallocated // and tagged as young. // // We wish to filter out cards for such a region but the current // thread, if we're running conucrrently, may "see" the young type // change at any time (so an earlier "is_young" check may pass or // fail arbitrarily). We tell the iteration code to perform this // filtering when it has been determined that there has been an actual // allocation in this region and making it safe to check the young type. bool filter_young = true; HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl, filter_young); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // card and re-enqueue: if we put off the card until a GC pause, then the // unallocated portion will be filled in. Alternatively, we might try // the full complexity of the technique used in "regular" precleaning. if (stop_point != NULL) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) if (*card_ptr != CardTableModRefBS::dirty_card_val()) { *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); _conc_refine_cards++; } return trigger_cl.value(); }
bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, bool check_for_refs_into_cset) { assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap", p2i(card_ptr), _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), _ct_bs->addr_for(card_ptr), _g1->addr_to_region(_ct_bs->addr_for(card_ptr)))); // If the card is no longer dirty, nothing to do. if (*card_ptr != CardTableModRefBS::dirty_card_val()) { // No need to return that this card contains refs that point // into the collection set. return false; } // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); // Why do we have to check here whether a card is on a young region, // given that we dirty young regions and, as a result, the // post-barrier is supposed to filter them out and never to enqueue // them? When we allocate a new region as the "allocation region" we // actually dirty its cards after we release the lock, since card // dirtying while holding the lock was a performance bottleneck. So, // as a result, it is possible for other threads to actually // allocate objects in the region (after the acquire the lock) // before all the cards on the region are dirtied. This is unlikely, // and it doesn't happen often, but it can happen. So, the extra // check below filters out those cards. if (r->is_young()) { return false; } // While we are processing RSet buffers during the collection, we // actually don't want to scan any cards on the collection set, // since we don't want to update remembered sets with entries that // point into the collection set, given that live objects from the // collection set are about to move and such entries will be stale // very soon. This change also deals with a reliability issue which // involves scanning a card in the collection set and coming across // an array that was being chunked and looking malformed. Note, // however, that if evacuation fails, we have to scan any objects // that were not moved and create any missing entries. if (r->in_collection_set()) { return false; } // The result from the hot card cache insert call is either: // * pointer to the current card // (implying that the current card is not 'hot'), // * null // (meaning we had inserted the card ptr into the "hot" card cache, // which had some headroom), // * a pointer to a "hot" card that was evicted from the "hot" cache. // G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); if (hot_card_cache->use_cache()) { assert(!check_for_refs_into_cset, "sanity"); assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); card_ptr = hot_card_cache->insert(card_ptr); if (card_ptr == NULL) { // There was no eviction. Nothing to do. return false; } start = _ct_bs->addr_for(card_ptr); r = _g1->heap_region_containing(start); // Checking whether the region we got back from the cache // is young here is inappropriate. The region could have been // freed, reallocated and tagged as young while in the cache. // Hence we could see its young type change at any time. } // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. This is not safe without a perm // gen at the upper end of the heap. HeapWord* end = start + CardTableModRefBS::card_size_in_words; MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO init_ct_freq_table(_g1->max_capacity()); ct_freq_note_card(_ct_bs->index_for(start)); #endif G1ParPushHeapRSClosure* oops_in_heap_closure = NULL; if (check_for_refs_into_cset) { // ConcurrentG1RefineThreads have worker numbers larger than what // _cset_rs_update_cl[] is set up to handle. But those threads should // only be active outside of a collection which means that when they // reach here they should have check_for_refs_into_cset == false. assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length"); oops_in_heap_closure = _cset_rs_update_cl[worker_i]; } G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, _g1->g1_rem_set(), oops_in_heap_closure, check_for_refs_into_cset, worker_i); update_rs_oop_cl.set_from(r); G1TriggerClosure trigger_cl; FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl); G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, (check_for_refs_into_cset ? (OopClosure*)&mux : (OopClosure*)&update_rs_oop_cl)); // The region for the current card may be a young region. The // current card may have been a card that was evicted from the // card cache. When the card was inserted into the cache, we had // determined that its region was non-young. While in the cache, // the region may have been freed during a cleanup pause, reallocated // and tagged as young. // // We wish to filter out cards for such a region but the current // thread, if we're running concurrently, may "see" the young type // change at any time (so an earlier "is_young" check may pass or // fail arbitrarily). We tell the iteration code to perform this // filtering when it has been determined that there has been an actual // allocation in this region and making it safe to check the young type. bool filter_young = true; HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl, filter_young, card_ptr); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // card and re-enqueue: if we put off the card until a GC pause, then the // unallocated portion will be filled in. Alternatively, we might try // the full complexity of the technique used in "regular" precleaning. if (stop_point != NULL) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) if (*card_ptr != CardTableModRefBS::dirty_card_val()) { *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { _conc_refine_cards++; } // This gets set to true if the card being refined has // references that point into the collection set. bool has_refs_into_cset = trigger_cl.triggered(); // We should only be detecting that the card contains references // that point into the collection set if the current thread is // a GC worker thread. assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), "invalid result at non safepoint"); return has_refs_into_cset; }