void G1RemSet::cleanup_after_oops_into_collection_set_do() { guarantee( _cards_scanned != NULL, "invariant" ); _total_cards_scanned = 0; for (uint i = 0; i < n_workers(); ++i) { _total_cards_scanned += _cards_scanned[i]; } FREE_C_HEAP_ARRAY(size_t, _cards_scanned); _cards_scanned = NULL; // Cleanup after copy _g1->set_refine_cte_cl_concurrency(true); // Set all cards back to clean. _g1->cleanUpCardTable(); DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); if (_g1->evacuation_failed()) { double restore_remembered_set_start = os::elapsedTime(); // Restore remembered sets for the regions pointing into the collection set. // We just need to transfer the completed buffers from the DirtyCardQueueSet // used to hold cards that contain references that point into the collection set // to the DCQS used to hold the deferred RS updates. _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0); } // Free any completed buffers in the DirtyCardQueueSet used to hold cards // which contain references that point into the collection. _g1->into_cset_dirty_card_queue_set().clear(); assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0, "all buffers should be freed"); _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers(); }
void OopMapCacheEntry::deallocate_bit_mask() { if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), "This bit mask should not be in the resource area"); FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); debug_only(_bit_mask[0] = 0;) }
KlassInfoTable::~KlassInfoTable() { for (int index = 0; index < _size; index++) { _buckets[index].empty(); } FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets); _size = 0; }
CardTableRS::~CardTableRS() { if (_ct_bs) { delete _ct_bs; _ct_bs = NULL; } if (_last_cur_val_in_gen) { FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen, mtInternal); } }
CardTableModRefBSForCTRS::~CardTableModRefBSForCTRS() { if (_lowest_non_clean) { FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); _lowest_non_clean = NULL; } if (_lowest_non_clean_chunk_size) { FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); _lowest_non_clean_chunk_size = NULL; } if (_lowest_non_clean_base_chunk_index) { FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); _lowest_non_clean_base_chunk_index = NULL; } if (_last_LNC_resizing_collection) { FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); _last_LNC_resizing_collection = NULL; } }
KlassInfoTable::~KlassInfoTable() { if (_buckets != NULL) { for (int index = 0; index < _size; index++) { _buckets[index].empty(); } FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal); _size = 0; } }
void LoaderConstraintTable::merge_loader_constraints( LoaderConstraintEntry** pp1, LoaderConstraintEntry** pp2, klassOop klass) { // make sure *pp1 has higher capacity if ((*pp1)->max_loaders() < (*pp2)->max_loaders()) { LoaderConstraintEntry** tmp = pp2; pp2 = pp1; pp1 = tmp; } LoaderConstraintEntry* p1 = *pp1; LoaderConstraintEntry* p2 = *pp2; ensure_loader_constraint_capacity(p1, p2->num_loaders()); for (int i = 0; i < p2->num_loaders(); i++) { int num = p1->num_loaders(); p1->set_loader(num, p2->loader(i)); p1->set_num_loaders(num + 1); } if (TraceLoaderConstraints) { ResourceMark rm; tty->print_cr("[Merged constraints for name %s, new loader list:", p1->name()->as_C_string() ); for (int i = 0; i < p1->num_loaders(); i++) { tty->print_cr("[ [%d]: %s", i, SystemDictionary::loader_name(p1->loader(i))); } if (p1->klass() == NULL) { tty->print_cr("[... and setting class object]"); } } // p1->klass() will hold NULL if klass, p2->klass(), and old // p1->klass() are all NULL. In addition, all three must have // matching non-NULL values, otherwise either the constraints would // have been violated, or the constraints had been corrupted (and an // assertion would fail). if (p2->klass() != NULL) { assert(p2->klass() == klass, "constraints corrupted"); } if (p1->klass() == NULL) { p1->set_klass(klass); } else { assert(p1->klass() == klass, "constraints corrupted"); } *pp2 = p2->next(); FREE_C_HEAP_ARRAY(oop, p2->loaders()); free_entry(p2); return; }
void CardTableModRefBS:: get_LNC_array_for_space(Space* sp, jbyte**& lowest_non_clean, uintptr_t& lowest_non_clean_base_chunk_index, size_t& lowest_non_clean_chunk_size) { int i = find_covering_region_containing(sp->bottom()); MemRegion covered = _covered[i]; size_t n_chunks = chunks_to_cover(covered); // Only the first thread to obtain the lock will resize the // LNC array for the covered region. Any later expansion can't affect // the used_at_save_marks region. // (I observed a bug in which the first thread to execute this would // resize, and then it would cause "expand_and_allocates" that would // Increase the number of chunks in the covered region. Then a second // thread would come and execute this, see that the size didn't match, // and free and allocate again. So the first thread would be using a // freed "_lowest_non_clean" array.) // Do a dirty read here. If we pass the conditional then take the rare // event lock and do the read again in case some other thread had already // succeeded and done the resize. int cur_collection = Universe::heap()->total_collections(); if (_last_LNC_resizing_collection[i] != cur_collection) { MutexLocker x(ParGCRareEvent_lock); if (_last_LNC_resizing_collection[i] != cur_collection) { if (_lowest_non_clean[i] == NULL || n_chunks != _lowest_non_clean_chunk_size[i]) { // Should we delete the old? if (_lowest_non_clean[i] != NULL) { assert(n_chunks != _lowest_non_clean_chunk_size[i], "logical consequence"); FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); _lowest_non_clean[i] = NULL; } // Now allocate a new one if necessary. if (_lowest_non_clean[i] == NULL) { _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks); _lowest_non_clean_chunk_size[i] = n_chunks; _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); for (int j = 0; j < (int)n_chunks; j++) _lowest_non_clean[i][j] = NULL; } } _last_LNC_resizing_collection[i] = cur_collection; } } // In any case, now do the initialization. lowest_non_clean = _lowest_non_clean[i]; lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; }
~nmethodCollector() { if (_nmethods != NULL) { for (int i=0; i<_nmethods->length(); i++) { nmethodDesc* blob = _nmethods->at(i); if (blob->map()!= NULL) { FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, blob->map()); } } delete _nmethods; } }
void LoaderConstraintTable::ensure_loader_constraint_capacity( LoaderConstraintEntry *p, int nfree) { if (p->max_loaders() - p->num_loaders() < nfree) { int n = nfree + p->num_loaders(); oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n); memcpy(new_loaders, p->loaders(), sizeof(oop) * p->num_loaders()); p->set_max_loaders(n); FREE_C_HEAP_ARRAY(oop, p->loaders()); p->set_loaders(new_loaders); } }
CompactHashtableWriter::~CompactHashtableWriter() { for (int index = 0; index < _num_buckets; index++) { Entry* next = NULL; for (Entry* tent = _buckets[index]; tent; tent = next) { next = tent->next(); delete tent; } } FREE_C_HEAP_ARRAY(juint, _bucket_sizes); FREE_C_HEAP_ARRAY(Entry*, _buckets); }
// @requires UseG1GC TEST_VM(FreeRegionList, length) { if (!UseG1GC) { return; } FreeRegionList l("test"); const uint num_regions_in_test = 5; // Create a fake heap. It does not need to be valid, as the HeapRegion constructor // does not access it. MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords); // Allocate a fake BOT because the HeapRegion constructor initializes // the BOT. size_t bot_size = G1BlockOffsetTable::compute_size(heap.word_size()); HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC); ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size())); G1RegionToSpaceMapper* bot_storage = G1RegionToSpaceMapper::create_mapper(bot_rs, bot_rs.size(), os::vm_page_size(), HeapRegion::GrainBytes, BOTConstants::N_bytes, mtGC); G1BlockOffsetTable bot(heap, bot_storage); bot_storage->commit_regions(0, num_regions_in_test); // Set up memory regions for the heap regions. MemRegion mr0(heap.start(), HeapRegion::GrainWords); MemRegion mr1(mr0.end(), HeapRegion::GrainWords); MemRegion mr2(mr1.end(), HeapRegion::GrainWords); MemRegion mr3(mr2.end(), HeapRegion::GrainWords); MemRegion mr4(mr3.end(), HeapRegion::GrainWords); HeapRegion hr0(0, &bot, mr0); HeapRegion hr1(1, &bot, mr1); HeapRegion hr2(2, &bot, mr2); HeapRegion hr3(3, &bot, mr3); HeapRegion hr4(4, &bot, mr4); l.add_ordered(&hr1); l.add_ordered(&hr0); l.add_ordered(&hr3); l.add_ordered(&hr4); l.add_ordered(&hr2); EXPECT_EQ(l.length(), num_regions_in_test) << "Wrong free region list length"; l.verify_list(); bot_storage->uncommit_regions(0, num_regions_in_test); delete bot_storage; FREE_C_HEAP_ARRAY(HeapWord, bot_data); }
void G1RemSet::cleanup_after_oops_into_collection_set_do() { guarantee( _cards_scanned != NULL, "invariant" ); _total_cards_scanned = 0; for (uint i = 0; i < n_workers(); ++i) _total_cards_scanned += _cards_scanned[i]; FREE_C_HEAP_ARRAY(size_t, _cards_scanned); _cards_scanned = NULL; // Cleanup after copy #if G1_REM_SET_LOGGING PrintRSClosure cl; _g1->heap_region_iterate(&cl); #endif _g1->set_refine_cte_cl_concurrency(true); cleanUpIteratorsClosure iterClosure; _g1->collection_set_iterate(&iterClosure); // Set all cards back to clean. _g1->cleanUpCardTable(); DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); if (_g1->evacuation_failed()) { // Restore remembered sets for the regions pointing into the collection set. if (G1DeferredRSUpdate) { // If deferred RS updates are enabled then we just need to transfer // the completed buffers from (a) the DirtyCardQueueSet used to hold // cards that contain references that point into the collection set // to (b) the DCQS used to hold the deferred RS updates _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); } else { CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set(); UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs); int n_completed_buffers = 0; while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate, 0, 0, true)) { n_completed_buffers++; } assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers"); } } // Free any completed buffers in the DirtyCardQueueSet used to hold cards // which contain references that point into the collection. _g1->into_cset_dirty_card_queue_set().clear(); assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0, "all buffers should be freed"); _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers(); }
void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { guarantee( _cards_scanned != NULL, "invariant" ); _total_cards_scanned = 0; for (uint i = 0; i < n_workers(); ++i) _total_cards_scanned += _cards_scanned[i]; FREE_C_HEAP_ARRAY(size_t, _cards_scanned); _cards_scanned = NULL; // Cleanup after copy #if G1_REM_SET_LOGGING PrintRSClosure cl; _g1->heap_region_iterate(&cl); #endif _g1->set_refine_cte_cl_concurrency(true); cleanUpIteratorsClosure iterClosure; _g1->collection_set_iterate(&iterClosure); // Set all cards back to clean. _g1->cleanUpCardTable(); if (ParallelGCThreads > 0) { set_par_traversal(false); } if (_g1->evacuation_failed()) { // Restore remembered sets for the regions pointing into // the collection set. if (G1DeferredRSUpdate) { DirtyCardQueue dcq(&_g1->dirty_card_queue_set()); UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq); new_refs_iterate(&deferred_update); } else { UpdateRSetOopsIntoCSImmediate immediate_update(_g1); new_refs_iterate(&immediate_update); } } for (uint i = 0; i < n_workers(); i++) { _new_refs[i]->clear(); } assert(!_par_traversal_in_progress, "Invariant between iterations."); }
void GrowableCache::recache() { int len = _elements->length(); FREE_C_HEAP_ARRAY(address, _cache, mtInternal); _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal); for (int i=0; i<len; i++) { _cache[i] = _elements->at(i)->getCacheValue(); // // The cache entry has gone bad. Without a valid frame pointer // value, the entry is useless so we simply delete it in product // mode. The call to remove() will rebuild the cache again // without the bad entry. // if (_cache[i] == NULL) { assert(false, "cannot recache NULL elements"); remove(i); return; } } _cache[len] = NULL; _listener_fun(_this_obj,_cache); }
G1ParScanThreadState::~G1ParScanThreadState() { delete _plab_allocator; delete _closures; FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); }
HeapRegionClaimer::~HeapRegionClaimer() { if (_claims != NULL) { FREE_C_HEAP_ARRAY(uint, _claims); } }
GCTaskThread::~GCTaskThread() { if (_time_stamps != NULL) { FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps); } }
ArtaObjectPool::~ArtaObjectPool() { FREE_C_HEAP_ARRAY(objectRef, _object_map); }
inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() { FREE_C_HEAP_ARRAY(E, _elems); }
void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); // Remove unloaded entries from constraint table for (int index = 0; index < table_size(); index++) { LoaderConstraintEntry** p = bucket_addr(index); while(*p) { LoaderConstraintEntry* probe = *p; klassOop klass = probe->klass(); // Remove klass that is no longer alive if (klass != NULL && !is_alive->do_object_b(klass)) { probe->set_klass(NULL); if (TraceLoaderConstraints) { ResourceMark rm; tty->print_cr("[Purging class object from constraint for name %s," " loader list:", probe->name()->as_C_string()); for (int i = 0; i < probe->num_loaders(); i++) { tty->print_cr("[ [%d]: %s", i, SystemDictionary::loader_name(probe->loader(i))); } } } // Remove entries no longer alive from loader array int n = 0; while (n < probe->num_loaders()) { if (probe->loader(n) != NULL) { if (!is_alive->do_object_b(probe->loader(n))) { if (TraceLoaderConstraints) { ResourceMark rm; tty->print_cr("[Purging loader %s from constraint for name %s", SystemDictionary::loader_name(probe->loader(n)), probe->name()->as_C_string() ); } // Compact array int num = probe->num_loaders() - 1; probe->set_num_loaders(num); probe->set_loader(n, probe->loader(num)); probe->set_loader(num, NULL); if (TraceLoaderConstraints) { ResourceMark rm; tty->print_cr("[New loader list:"); for (int i = 0; i < probe->num_loaders(); i++) { tty->print_cr("[ [%d]: %s", i, SystemDictionary::loader_name(probe->loader(i))); } } continue; // current element replaced, so restart without // incrementing n } } n++; } // Check whether entry should be purged if (probe->num_loaders() < 2) { if (TraceLoaderConstraints) { ResourceMark rm; tty->print("[Purging complete constraint for name %s\n", probe->name()->as_C_string()); } // Purge entry *p = probe->next(); FREE_C_HEAP_ARRAY(oop, probe->loaders()); free_entry(probe); } else { #ifdef ASSERT assert(is_alive->do_object_b(probe->name()), "name should be live"); if (probe->klass() != NULL) { assert(is_alive->do_object_b(probe->klass()), "klass should be live"); } for (n = 0; n < probe->num_loaders(); n++) { if (probe->loader(n) != NULL) { assert(is_alive->do_object_b(probe->loader(n)), "loader should be live"); } } #endif // Go to next entry p = probe->next_addr(); } } } }
WorkerDataArray<T>::~WorkerDataArray() { FREE_C_HEAP_ARRAY(T, _data); }
G1ParScanThreadState::~G1ParScanThreadState() { _plab_allocator->retire_alloc_buffers(); delete _plab_allocator; FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); }
SubTasksDone::~SubTasksDone() { if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks, mtInternal); }
SubTasksDone::~SubTasksDone() { if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks); }
ThreadCookieManager::~ThreadCookieManager() { FREE_C_HEAP_ARRAY(TCEntry, _entries); }
FreeIdSet::~FreeIdSet() { FREE_C_HEAP_ARRAY(uint, _ids); }
GrowableCache::~GrowableCache() { clear(); delete _elements; FREE_C_HEAP_ARRAY(address, _cache, mtInternal); }