G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size); _hot_cache = NULL; } }
void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) { assert(default_use_cache(), "Drain only necessary if we use the hot card cache."); assert(_hot_cache != NULL, "Logic"); assert(!use_cache(), "cache should be disabled"); while (_hot_cache_par_claimed_idx < _hot_cache_size) { size_t end_idx = Atomic::add(_hot_cache_par_chunk_size, &_hot_cache_par_claimed_idx); size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); for (size_t i = start_idx; i < end_idx; i++) { jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { bool result = cl->do_card_ptr(card_ptr, worker_i); assert(result, "Closure should always return true"); } else { break; } } } // The existing entries in the hot card cache, which were just refined // above, are discarded prior to re-enabling the cache near the end of the GC. }
// Resets the hot card cache and discards the entries. void reset_hot_cache() { assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint"); assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread"); if (default_use_cache()) { reset_hot_cache_internal(); } }
G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); _hot_cache_memory.free(); _hot_cache = NULL; } }
void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; _hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size); reset_hot_cache_internal(); // For refining the cards in the hot cache in parallel _hot_cache_par_chunk_size = ClaimChunkSize; _hot_cache_par_claimed_idx = 0; _card_counts.initialize(card_counts_storage); } }
void G1HotCardCache::drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq) { if (!default_use_cache()) { assert(_hot_cache == NULL, "Logic"); return; } assert(_hot_cache != NULL, "Logic"); assert(!use_cache(), "cache should be disabled"); int start_idx; while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once int end_idx = start_idx + _hot_cache_par_chunk_size; if (start_idx == Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) { // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _n_hot); for (int i = start_idx; i < end_idx; i++) { jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { if (g1rs->refine_card(card_ptr, worker_i, true)) { // The part of the heap spanned by the card contains references // that point into the current collection set. // We need to record the card pointer in the DirtyCardQueueSet // that we use for such cards. // // The only time we care about recording cards that contain // references that point into the collection set is during // RSet updating while within an evacuation pause. // In this case worker_i should be the id of a GC worker thread assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint"); assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), err_msg("incorrect worker id: "INT32_FORMAT, worker_i)); into_cset_dcq->enqueue(card_ptr); } } } } } // The existing entries in the hot card cache, which were just refined // above, are discarded prior to re-enabling the cache near the end of the GC. }
void set_use_cache(bool b) { _use_cache = (b ? default_use_cache() : false); }
G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC); } }