bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { assert_locked_or_safepoint(Heap_lock); if (bytes == 0) { return true; // That's what grow_by(0) would return } size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); if (aligned_bytes == 0) { // The alignment caused the number of bytes to wrap. An expand_by(0) will // return true with the implication that an expansion was done when it // was not. A call to expand implies a best effort to expand by "bytes" // but not a guarantee. Align down to give a best effort. This is likely // the most that the generation can expand since it has some capacity to // start with. aligned_bytes = ReservedSpace::page_align_size_down(bytes); } size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); bool success = false; if (aligned_expand_bytes > aligned_bytes) { success = grow_by(aligned_expand_bytes); } if (!success) { success = grow_by(aligned_bytes); } if (!success) { success = grow_to_reserved(); } if (PrintGC && Verbose) { if (success && GC_locker::is_active_and_needs_gc()) { gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); } } return success; }
void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { assert_locked_or_safepoint(CodeCache_lock); nm->set_on_scavenge_root_list(); nm->set_scavenge_root_link(_scavenge_root_nmethods); set_scavenge_root_nmethods(nm); print_trace("add_scavenge_root", nm); }
// Walk the list of methods which might contain non-perm oops. void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); debug_only(mark_scavenge_root_nmethods()); for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { debug_only(cur->clear_scavenge_root_marked()); assert(cur->scavenge_root_not_marked(), ""); assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); #ifndef PRODUCT if (TraceScavenge) { cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); } #endif //PRODUCT if (is_live) { // Perform cur->oops_do(f), maybe just once per nmethod. f->do_code_blob(cur); cur->fix_oop_relocations(); } } // Check for stray marks. debug_only(verify_perm_nmethods(NULL)); }
CodeBlob* CodeCache::allocate(int size) { // Do not seize the CodeCache lock here--if the caller has not // already done so, we are going to lose bigtime, since the code // cache will contain a garbage CodeBlob until the caller can // run the constructor for the CodeBlob subclass he is busy // instantiating. guarantee(size >= 0, "allocation request must be reasonable"); assert_locked_or_safepoint(CodeCache_lock); CodeBlob* cb = NULL; _number_of_blobs++; while (true) { cb = (CodeBlob*)_heap->allocate(size); if (cb != NULL) break; if (!_heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm; tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", (intptr_t)_heap->begin(), (intptr_t)_heap->end(), (address)_heap->end() - (address)_heap->begin()); } } verify_if_often(); print_trace("allocation", cb, size); return cb; }
void OneContigSpaceCardGeneration::shrink(size_t bytes) { assert_locked_or_safepoint(ExpandHeap_lock); size_t size = ReservedSpace::page_align_size_down(bytes); if (size > 0) { shrink_by(size); } }
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { assert_locked_or_safepoint(CodeCache_lock); if (UseG1GC) { return; } print_trace("drop_scavenge_root", nm); nmethod* last = NULL; nmethod* cur = scavenge_root_nmethods(); while (cur != NULL) { nmethod* next = cur->scavenge_root_link(); if (cur == nm) { if (last != NULL) last->set_scavenge_root_link(next); else set_scavenge_root_nmethods(next); nm->set_scavenge_root_link(NULL); nm->clear_on_scavenge_root_list(); return; } last = cur; cur = next; } assert(false, "should have been on list"); }
void CodeBlobCollector::collect() { assert_locked_or_safepoint(CodeCache_lock); assert(_global_code_blobs == NULL, "checking"); // create the global list _global_code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(50,true); // iterate over the stub code descriptors and put them in the list first. int index = 0; StubCodeDesc* desc; while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) { _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end())); } // next iterate over all the non-nmethod code blobs and add them to // the list - as noted above this will filter out duplicates and // enclosing blobs. Unimplemented(); //CodeCache::blobs_do(do_blob); // make the global list the instance list so that it can be used // for other iterations. _code_blobs = _global_code_blobs; _global_code_blobs = NULL; }
// // Reclaim all unused buckets. // void DependencyContext::expunge_stale_entries() { assert_locked_or_safepoint(CodeCache_lock); if (!has_stale_entries()) { assert(!find_stale_entries(), "inconsistent info"); return; } nmethodBucket* first = dependencies(); nmethodBucket* last = NULL; int removed = 0; for (nmethodBucket* b = first; b != NULL;) { assert(b->count() >= 0, "bucket count: %d", b->count()); nmethodBucket* next = b->next(); if (b->count() == 0) { if (last == NULL) { first = next; } else { last->set_next(next); } removed++; delete b; // last stays the same. } else { last = b; } b = next; } set_dependencies(first); set_has_stale_entries(false); if (UsePerfData && removed > 0) { _perf_total_buckets_deallocated_count->inc(removed); _perf_total_buckets_stale_count->dec(removed); } }
void CodeBlobCollector::collect() { assert_locked_or_safepoint(CodeCache_lock); assert(_global_code_blobs == NULL, "checking"); // create the global list _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true); // iterate over the stub code descriptors and put them in the list first. for (StubCodeDesc* desc = StubCodeDesc::first(); desc != NULL; desc = StubCodeDesc::next(desc)) { _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end())); } // Vtable stubs are not described with StubCodeDesc, // process them separately VtableStubs::vtable_stub_do(do_vtable_stub); // next iterate over all the non-nmethod code blobs and add them to // the list - as noted above this will filter out duplicates and // enclosing blobs. CodeCache::blobs_do(do_blob); // make the global list the instance list so that it can be used // for other iterations. _code_blobs = _global_code_blobs; _global_code_blobs = NULL; }
void CodeCache::prune_scavenge_root_nmethods() { assert_locked_or_safepoint(CodeCache_lock); debug_only(mark_scavenge_root_nmethods()); nmethod* last = NULL; nmethod* cur = scavenge_root_nmethods(); while (cur != NULL) { nmethod* next = cur->scavenge_root_link(); debug_only(cur->clear_scavenge_root_marked()); assert(cur->scavenge_root_not_marked(), ""); assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); if (!cur->is_zombie() && !cur->is_unloaded() && cur->detect_scavenge_root_oops()) { // Keep it. Advance 'last' to prevent deletion. last = cur; } else { // Prune it from the list, so we don't have to look at it any more. print_trace("prune_scavenge_root", cur); cur->set_scavenge_root_link(NULL); cur->clear_on_scavenge_root_list(); if (last != NULL) last->set_scavenge_root_link(next); else set_scavenge_root_nmethods(next); } cur = next; } // Check for stray marks. debug_only(verify_perm_nmethods(NULL)); }
void CodeCache::gc_epilogue() { assert_locked_or_safepoint(CodeCache_lock); bool needs_cache_clean = false; FOR_ALL_ALIVE_BLOBS(cb) { #ifndef CORE if (cb->is_nmethod()) { nmethod *nm = (nmethod*)cb; if (nm->is_marked_for_unloading()) { if (nm->is_in_use()) { // transitioning directly from live to unloaded so we need // to force a cache cleanup. needs_cache_clean = true; } // the GC may have discovered nmethods which should be unloaded, so // make them unloaded, so the nmethod sweeper will eventually flush them nm->make_unloaded(); // no need to fix oop relocations in unloaded nmethods, so continue; continue; } else if (nm->is_patched_for_deopt()) { // no need to fix oop relocations in deopted nmethods, so continue; continue; } else { debug_only(nm->verify();) } } #endif // CORE cb->fix_oop_relocations(); }
Klass* Dictionary::find_class(int index, unsigned int hash, Symbol* name, ClassLoaderData* loader_data) { assert_locked_or_safepoint(SystemDictionary_lock); assert (index == index_for(name, loader_data), "incorrect index?"); DictionaryEntry* entry = get_entry(index, hash, name, loader_data); return (entry != NULL) ? entry->klass() : (Klass*)NULL; }
bool OneContigSpaceCardGeneration::grow_to_reserved() { assert_locked_or_safepoint(ExpandHeap_lock); bool success = true; const size_t remaining_bytes = _virtual_space.uncommitted_size(); if (remaining_bytes > 0) { success = grow_by(remaining_bytes); DEBUG_ONLY(if (!success) warning("grow to reserved failed");) }
// Mark nmethods for unloading if they contain otherwise unreachable // oops. void CodeCache::do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool unloading_occurred) { assert_locked_or_safepoint(CodeCache_lock); FOR_ALL_ALIVE_NMETHODS(nm) { nm->do_unloading(is_alive, keep_alive, unloading_occurred); } }
nmethod* CodeCache::next_nmethod (CodeBlob* cb) { assert_locked_or_safepoint(CodeCache_lock); cb = next(cb); while (cb != NULL && !cb->is_nmethod()) { cb = next(cb); } return (nmethod*)cb; }
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) { assert_locked_or_safepoint(SystemDictionary_lock); assert(index == index_for(protection_domain), "incorrect index?"); assert(find_entry(index, protection_domain) == NULL, "no double entry"); ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain); Hashtable<oop, mtClass>::add_entry(index, p); return p; }
void PSOldGen::expand_to_reserved() { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); size_t remaining_bytes = _virtual_space.uncommitted_size(); if (remaining_bytes > 0) { bool success = expand_by(remaining_bytes); assert(success, "grow to reserved failed"); } }
SymbolPropertyEntry* SymbolPropertyTable::add_entry(int index, unsigned int hash, Symbol* sym, intptr_t sym_mode) { assert_locked_or_safepoint(SystemDictionary_lock); assert(index == index_for(sym, sym_mode), "incorrect index?"); assert(find_entry(index, hash, sym, sym_mode) == NULL, "no double entry"); SymbolPropertyEntry* p = new_entry(hash, sym, sym_mode); Hashtable<Symbol*, mtSymbol>::add_entry(index, p); return p; }
HeapWord* allocate_noexpand(size_t word_size) { // We assume the heap lock is held here. assert_locked_or_safepoint(Heap_lock); HeapWord* res = object_space()->allocate(word_size); if (res != NULL) { DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size))); _start_array.allocate_block(res); } return res; }
HeapWord* PSPermGen::allocate_permanent(size_t size) { assert_locked_or_safepoint(Heap_lock); HeapWord* obj = allocate_noexpand(size, false); if (obj == NULL) { obj = expand_and_allocate(size, false); } return obj; }
HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) { // We assume the heap lock is held here. assert(!is_tlab, "Does not support TLAB allocation"); assert_locked_or_safepoint(Heap_lock); HeapWord* res = object_space()->allocate(word_size); if (res != NULL) { _start_array.allocate_block(res); } return res; }
// Make checks on the current sizes of the generations and // the constraints on the sizes of the generations. Push // up the boundary within the constraints. A partial // push can occur. void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) { assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check"); assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); // These sizes limit the amount the boundaries can move. Effectively, // the generation says how much it is willing to yield to the other // generation. const size_t young_gen_available = young_gen()->available_for_contraction(); const size_t old_gen_available = old_gen()->available_for_expansion(); const size_t alignment = virtual_spaces()->alignment(); size_t change_in_bytes = MIN3(young_gen_available, old_gen_available, align_size_up_(expand_in_bytes, alignment)); if (change_in_bytes == 0) { return; } if (TraceAdaptiveGCBoundary) { gclog_or_tty->print_cr("Before expansion of old gen with boundary move"); gclog_or_tty->print_cr(" Requested change: " SIZE_FORMAT_HEX " Attempted change: " SIZE_FORMAT_HEX, expand_in_bytes, change_in_bytes); if (!PrintHeapAtGC) { Universe::print_on(gclog_or_tty); } gclog_or_tty->print_cr(" PSOldGen max size: " SIZE_FORMAT "K", old_gen()->max_gen_size()/K); } // Move the boundary between the generations up (smaller young gen). if (virtual_spaces()->adjust_boundary_up(change_in_bytes)) { young_gen()->reset_after_change(); old_gen()->reset_after_change(); } // The total reserved for the generations should match the sum // of the two even if the boundary is moving. assert(reserved_byte_size() == old_gen()->max_gen_size() + young_gen()->max_size(), "Space is missing"); young_gen()->space_invariants(); old_gen()->space_invariants(); if (TraceAdaptiveGCBoundary) { gclog_or_tty->print_cr("After expansion of old gen with boundary move"); if (!PrintHeapAtGC) { Universe::print_on(gclog_or_tty); } gclog_or_tty->print_cr(" PSOldGen max size: " SIZE_FORMAT "K", old_gen()->max_gen_size()/K); } }
// This method currently does not expect to expand into eden (i.e., // the virtual space boundaries is expected to be consistent // with the eden boundaries.. void PSYoungGen::post_resize() { assert_locked_or_safepoint(Heap_lock); assert((eden_space()->bottom() < to_space()->bottom()) && (eden_space()->bottom() < from_space()->bottom()), "Eden is assumed to be below the survivor spaces"); MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); space_invariants(); }
void DependencyContext::wipe() { assert_locked_or_safepoint(CodeCache_lock); nmethodBucket* b = dependencies(); set_dependencies(NULL); set_has_stale_entries(false); while (b != NULL) { nmethodBucket* next = b->next(); delete b; b = next; } }
// Placeholder objects represent classes currently being loaded. // All threads examining the placeholder table must hold the // SystemDictionary_lock, so we don't need special precautions // on store ordering here. void PlaceholderTable::add_entry(int index, unsigned int hash, symbolHandle class_name, Handle class_loader, bool havesupername, symbolHandle supername){ assert_locked_or_safepoint(SystemDictionary_lock); assert(!class_name.is_null(), "adding NULL obj"); // Both readers and writers are locked so it's safe to just // create the placeholder and insert it in the list without a membar. PlaceholderEntry* entry = new_entry(hash, class_name(), class_loader(), havesupername, supername()); add_entry(index, entry); }
// Placeholder objects represent classes currently being loaded. // All threads examining the placeholder table must hold the // SystemDictionary_lock, so we don't need special precautions // on store ordering here. void PlaceholderTable::add_entry(int index, unsigned int hash, Symbol* class_name, ClassLoaderData* loader_data, bool havesupername, Symbol* supername){ assert_locked_or_safepoint(SystemDictionary_lock); assert(class_name != NULL, "adding NULL obj"); // Both readers and writers are locked so it's safe to just // create the placeholder and insert it in the list without a membar. PlaceholderEntry* entry = new_entry(hash, class_name, loader_data, havesupername, supername); add_entry(index, entry); }
void CodeCache::commit(CodeBlob* cb) { // this is called by nmethod::nmethod, which must already own CodeCache_lock assert_locked_or_safepoint(CodeCache_lock); #ifndef CORE if (cb->is_nmethod() && ((nmethod *)cb)->number_of_dependents() > 0) { _number_of_nmethods_with_dependencies++; } #endif // flush the hardware I-cache ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size()); }
void CodeCache::blobs_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); FOR_ALL_ALIVE_BLOBS(cb) { f->do_code_blob(cb); #ifdef ASSERT if (cb->is_nmethod()) ((nmethod*)cb)->verify_scavenge_root_oops(); #endif //ASSERT } }
void DictionaryEntry::add_protection_domain(oop protection_domain) { assert_locked_or_safepoint(SystemDictionary_lock); if (!contains_protection_domain(protection_domain)) { ProtectionDomainEntry* new_head = new ProtectionDomainEntry(protection_domain, _pd_set); // Warning: Preserve store ordering. The SystemDictionary is read // without locks. The new ProtectionDomainEntry must be // complete before other threads can be allowed to see it // via a store to _pd_set. OrderAccess::release_store_ptr(&_pd_set, new_head); } }
// Follow all roots in the compiled code, unless they are the only // ones keeping a class alive. In that case, we NULL out the roots, // mark the CodeBlob for unloading and set the boolean flag // marked_for_unloading to true. void CodeCache::do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool unloading_occurred, bool& marked_for_unloading) { assert_locked_or_safepoint(CodeCache_lock); marked_for_unloading = false; FOR_ALL_ALIVE_BLOBS(cb) { cb->follow_roots_or_mark_for_unloading( is_alive, keep_alive, unloading_occurred, marked_for_unloading); } }