// Revised lookup semantics introduced 1.3 (Kestral beta) void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) { // Note: Arrays can have intermediate array supers. Use java_super to skip them. KlassHandle super (THREAD, klass()->java_super()); int nofNewEntries = 0; if (PrintVtables && !klass()->oop_is_array()) { ResourceMark rm(THREAD); tty->print_cr("Initializing: %s", _klass->name()->as_C_string()); } #ifdef ASSERT oop* end_of_obj = (oop*)_klass() + _klass()->size(); oop* end_of_vtable = (oop*)&table()[_length]; assert(end_of_vtable <= end_of_obj, "vtable extends beyond end"); #endif if (Universe::is_bootstrapping()) { // just clear everything for (int i = 0; i < _length; i++) table()[i].clear(); return; } int super_vtable_len = initialize_from_super(super); if (klass()->oop_is_array()) { assert(super_vtable_len == _length, "arrays shouldn't introduce new methods"); } else { assert(_klass->oop_is_instance(), "must be instanceKlass"); objArrayHandle methods(THREAD, ik()->methods()); int len = methods()->length(); int initialized = super_vtable_len; // update_inherited_vtable can stop for gc - ensure using handles for (int i = 0; i < len; i++) { HandleMark hm(THREAD); assert(methods()->obj_at(i)->is_method(), "must be a methodOop"); methodHandle mh(THREAD, (methodOop)methods()->obj_at(i)); bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK); if (needs_new_entry) { put_method_at(mh(), initialized); mh()->set_vtable_index(initialized); // set primary vtable index initialized++; } } // add miranda methods; it will also update the value of initialized fill_in_mirandas(initialized); // In class hierarchies where the accessibility is not increasing (i.e., going from private -> // package_private -> publicprotected), the vtable might actually be smaller than our initial // calculation. assert(initialized <= _length, "vtable initialization failed"); for(;initialized < _length; initialized++) { put_method_at(NULL, initialized); } NOT_PRODUCT(verify(tty, true)); } }
void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); switch (_root_type) { case universe: Universe::oops_do(&mark_and_push_closure); break; case jni_handles: JNIHandles::oops_do(&mark_and_push_closure); break; case threads: { ResourceMark rm; MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations); CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure); Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob); } break; case object_synchronizer: ObjectSynchronizer::oops_do(&mark_and_push_closure); break; case flat_profiler: FlatProfiler::oops_do(&mark_and_push_closure); break; case management: Management::oops_do(&mark_and_push_closure); break; case jvmti: JvmtiExport::oops_do(&mark_and_push_closure); break; case system_dictionary: SystemDictionary::always_strong_oops_do(&mark_and_push_closure); break; case class_loader_data: ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true); break; case code_cache: // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure)); break; default: fatal("Unknown root type"); } // Do the real work cm->follow_marking_stacks(); }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); GCIdMark gc_id_mark; _gc_timer->register_gc_start(); _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); // Increment the invocation count heap->increment_total_collections(true /* full */); // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); // We need to track unique mark sweep invocations as well. _total_invocations++; AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } heap->pre_full_gc_dump(_gc_timer); // Filled in below to track the state of the young gen after the collection. bool eden_empty; bool survivors_empty; bool young_gen_empty; { HandleMark hm; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceOldGenTime) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); CodeCache::gc_prologue(); BiasedLocking::preserve_marks(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. size_t metadata_prev_used = MetaspaceAux::used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); size_t young_gen_prev_used = young_gen->used_in_bytes(); allocate_stacks(); #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::clear(); #endif ref_processor()->enable_discovery(); ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); #if defined(COMPILER2) || INCLUDE_JVMCI // Don't add any more derived pointers during phase3 assert(DerivedPointerTable::is_active(), "Sanity"); DerivedPointerTable::set_active(false); #endif mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); if (ZapUnusedHeapArea) { // Do a complete mangle (top to end) because the usage for // scratch does not maintain a top pointer. young_gen->to_space()->mangle_unused_area_complete(); } eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); survivors_empty = young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set()); MemRegion old_mr = heap->old_gen()->reserved(); if (young_gen_empty) { modBS->clear(MemRegion(old_mr.start(), old_mr.end())); } else { modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); } // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::update_pointers(); #endif ref_processor()->enqueue_discovered_references(NULL); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } // Don't check if the size_policy is ready here. Let // the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMajorCollection && AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) { // Swap the survivor spaces if from_space is empty. The // resize_young_gen() called below is normally used after // a successful young GC and swapping of survivor spaces; // otherwise, it will fail to resize the young gen with // the current implementation. if (young_gen->from_space()->is_empty()) { young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); } // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t old_live = old_gen->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_generations_free_space(young_live, eden_live, old_live, cur_eden, max_old_gen_size, max_eden_size, true /* full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, true /* full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(true /* full gc*/); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); } if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } if (UsePerfData) { heap->gc_policy_counters()->update_counters(); heap->gc_policy_counters()->update_old_capacity( old_gen->capacity_in_bytes()); heap->gc_policy_counters()->update_young_capacity( young_gen->capacity_in_bytes()); } heap->resize_all_tlabs(); // We collected the heap, recalculate the metaspace capacity MetaspaceGC::compute_new_size(); if (TraceOldGenTime) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_prev_used); old_gen->print_used_change(old_gen_prev_used); } heap->print_heap_change(prev_used); if (PrintGCDetails) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } if (ZapUnusedHeapArea) { old_gen->object_space()->check_mangled_unused_area_complete(); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); heap->trace_heap_after_gc(_gc_tracer); heap->post_full_gc_dump(_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer->register_gc_end(); _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); return true; }
KNIEXPORT void KNI_FatalError(const char* message) { NOT_PRODUCT(tty->print("JVM_FATAL ERROR in native method: ")); tty->print_cr("%s", message); JVM_FATAL(native_method_error); }
// Should only be called when we actually have the start of an object // Otherwise it is an internal error bool JSON::parse_json_object() { NOT_PRODUCT(const char* prev_pos); int c; mark_pos(); // Check that we are not called in error if (expect_any("{", "object start", INTERNAL_ERROR) <= 0) { return false; } if (!callback(JSON_OBJECT_BEGIN, NULL, level++)) { return false; } for (;;) { mark_pos(); c = skip_to_token(); if (c == 0) { error(SYNTAX_ERROR, "EOS when expecting an object key or object end"); return false; } else if (c < 0) { return false; } else if (c == '}') { // We got here from either empty object "{}" or ending comma "{a:1,}" next(); break; } NOT_PRODUCT(prev_pos = pos); if (parse_json_key() == false) { return false; } assert(pos > prev_pos, "parsing stalled"); skip_to_token(); mark_pos(); if (expect_any(":", "object key-value separator") <= 0) { return false; } skip_to_token(); mark_pos(); NOT_PRODUCT(prev_pos = pos); if (parse_json_value() == false) { return false; } assert(pos > prev_pos, "parsing stalled"); c = skip_to_token(); mark_pos(); if (expect_any(",}", "value separator or object end") <= 0) { return false; } if (c == '}') { break; } } assert(c == '}', "array parsing ended without object end token ('}')"); return callback(JSON_OBJECT_END, NULL, --level); }
void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool, Bytecodes::Code invoke_code, const CallInfo &call_info) { // NOTE: This CPCE can be the subject of data races. // There are three words to update: flags, refs[f2], f1 (in that order). // Writers must store all other values before f1. // Readers must test f1 first for non-null before reading other fields. // Competing writers must acquire exclusive access via a lock. // A losing writer waits on the lock until the winner writes f1 and leaves // the lock, so that when the losing writer returns, he can use the linked // cache entry. objArrayHandle resolved_references = cpool->resolved_references(); // Use the resolved_references() lock for this cpCache entry. // resolved_references are created for all classes with Invokedynamic, MethodHandle // or MethodType constant pool cache entries. assert(resolved_references() != NULL, "a resolved_references array should have been created for this class"); ObjectLocker ol(resolved_references, Thread::current()); if (!is_f1_null()) { return; } const methodHandle adapter = call_info.resolved_method(); const Handle appendix = call_info.resolved_appendix(); const Handle method_type = call_info.resolved_method_type(); const bool has_appendix = appendix.not_null(); const bool has_method_type = method_type.not_null(); // Write the flags. set_method_flags(as_TosState(adapter->result_type()), ((has_appendix ? 1 : 0) << has_appendix_shift ) | ((has_method_type ? 1 : 0) << has_method_type_shift) | ( 1 << is_final_shift ), adapter->size_of_parameters()); if (TraceInvokeDynamic) { ttyLocker ttyl; tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method_type=" PTR_FORMAT "%s method=" PTR_FORMAT " ", invoke_code, p2i(appendix()), (has_appendix ? "" : " (unused)"), p2i(method_type()), (has_method_type ? "" : " (unused)"), p2i(adapter())); adapter->print(); if (has_appendix) appendix()->print(); } // Method handle invokes and invokedynamic sites use both cp cache words. // refs[f2], if not null, contains a value passed as a trailing argument to the adapter. // In the general case, this could be the call site's MethodType, // for use with java.lang.Invokers.checkExactType, or else a CallSite object. // f1 contains the adapter method which manages the actual call. // In the general case, this is a compiled LambdaForm. // (The Java code is free to optimize these calls by binding other // sorts of methods and appendices to call sites.) // JVM-level linking is via f1, as if for invokespecial, and signatures are erased. // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits. // Even with the appendix, the method will never take more than 255 parameter slots. // // This means that given a call site like (List)mh.invoke("foo"), // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;', // not '(Ljava/lang/String;)Ljava/util/List;'. // The fact that String and List are involved is encoded in the MethodType in refs[f2]. // This allows us to create fewer Methods, while keeping type safety. // // Store appendix, if any. if (has_appendix) { const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset; assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob"); assert(resolved_references->obj_at(appendix_index) == NULL, "init just once"); resolved_references->obj_at_put(appendix_index, appendix()); } // Store MethodType, if any. if (has_method_type) { const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset; assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob"); assert(resolved_references->obj_at(method_type_index) == NULL, "init just once"); resolved_references->obj_at_put(method_type_index, method_type()); } release_set_f1(adapter()); // This must be the last one to set (see NOTE above)! // The interpreter assembly code does not check byte_2, // but it is used by is_resolved, method_if_resolved, etc. set_bytecode_1(invoke_code); NOT_PRODUCT(verify(tty)); if (TraceInvokeDynamic) { ttyLocker ttyl; this->print(tty, 0); } }
klassOop instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, TRAPS) { int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size); // Allocation KlassHandle h_this_klass(THREAD, as_klassOop()); KlassHandle k; if (rt == REF_NONE) { // regular klass instanceKlass o; k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_0); } else { // reference klass instanceRefKlass o; k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_0); } instanceKlass* ik = (instanceKlass*) k()->klass_part(); assert(!k()->is_parsable(), "not expecting parsability yet."); // The sizes of these these three variables are used for determining the // size of the instanceKlassOop. It is critical that these are set to the right // sizes before the first GC, i.e., when we allocate the mirror. ik->set_vtable_length(vtable_len); ik->set_itable_length(itable_len); ik->set_static_field_size(static_field_size); ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size); assert(k()->size() == size, "wrong size for object"); ik->set_array_klasses(NULL); ik->set_methods(NULL); ik->set_method_ordering(NULL); ik->set_local_interfaces(NULL); ik->set_transitive_interfaces(NULL); ik->init_implementor(); ik->set_fields(NULL); ik->set_constants(NULL); ik->set_class_loader(NULL); ik->set_protection_domain(NULL); ik->set_signers(NULL); ik->set_source_file_name(NULL); ik->set_source_debug_extension(NULL); ik->set_inner_classes(NULL); ik->set_static_oop_field_size(0); ik->set_nonstatic_field_size(0); ik->set_is_marked_dependent(false); ik->set_init_state(instanceKlass::allocated); ik->set_init_thread(NULL); ik->set_reference_type(rt); ik->set_oop_map_cache(NULL); ik->set_jni_ids(NULL); ik->set_osr_nmethods_head(NULL); ik->set_breakpoints(NULL); ik->init_previous_version(); // initialize the non-header words to zero intptr_t* p = (intptr_t*)k(); for (int index = instanceKlass::header_size(); index < size; index++) { p[index] = NULL; } // To get verify to work - must be set to partial loaded before first GC point. NOT_PRODUCT(k()->set_partially_loaded()); assert(k()->is_parsable(), "should be parsable here."); // GC can happen here java_lang_Class::create_mirror(k, CHECK_0); // Allocate mirror return k(); }
inline void CMSBitMap::par_clear_large_range(MemRegion mr) { NOT_PRODUCT(region_invariant(mr)); // Range size must be greater than 32 bytes. _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), BitMap::large_range); }
inline void CMSBitMap::par_clear_range(MemRegion mr) { NOT_PRODUCT(region_invariant(mr)); // Range size is usually just 1 bit. _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), BitMap::small_range); }
// This method contains no policy. You should probably // be calling invoke() instead. void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::is_active()) return; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); // Increment the invocation count heap->increment_total_collections(); // We need to track unique mark sweep invocations as well. _total_invocations++; if (PrintHeapAtGC) { gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections()); Universe::print(); } // Fill in TLABs heap->ensure_parseability(); if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification tty->print(" VerifyBeforeGC:"); Universe::verify(true); } { HandleMark hm; TraceTime t1("Full GC", PrintGC, true, gclog_or_tty); TraceCollectorStats tcs(counters()); if (TraceGen1Time) accumulated_time()->start(); // Let the size policy know we're starting AdaptiveSizePolicy* size_policy = heap->size_policy(); size_policy->major_collection_begin(); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. NOT_CORE(CodeCache::gc_prologue()); Threads::gc_prologue(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture perm gen size before collection for sizing. size_t perm_gen_prev_used = perm_gen->used_in_bytes(); bool marked_for_unloading = false; allocate_stacks(); NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); COMPILER2_ONLY(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_ONLY(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); // "free at last gc" is calculated from these. Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); Universe::set_heap_used_at_last_gc(Universe::heap()->used()); bool all_empty = young_gen->eden_space()->is_empty() && young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); BarrierSet* bs = heap->barrier_set(); if (bs->is_a(BarrierSet::ModRef)) { ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; MemRegion old_mr = heap->old_gen()->reserved(); MemRegion perm_mr = heap->perm_gen()->reserved(); assert(old_mr.end() <= perm_mr.start(), "Generations out of order"); if (all_empty) { modBS->clear(MemRegion(old_mr.start(), perm_mr.end())); } else { modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end())); } } Threads::gc_epilogue(); NOT_CORE(CodeCache::gc_epilogue()); COMPILER2_ONLY(DerivedPointerTable::update_pointers()); notify_ref_lock |= ref_processor()->enqueue_discovered_references(); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes()); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStart: collection: %d ", heap->total_collections()); } // Calculate optimial free space amounts size_policy->compute_generation_free_space(young_gen->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), true /* full gc*/); // Resize old and young generations old_gen->resize(size_policy->calculated_old_free_size_in_bytes()); young_gen->resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // We collected the perm gen, so we'll resize it here. perm_gen->compute_new_size(perm_gen_prev_used); if (TraceGen1Time) accumulated_time()->stop(); if (PrintGC) { heap->print_heap_change(prev_used); } heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification tty->print(" VerifyAfterGC:"); Universe::verify(false); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); if (PrintHeapAtGC) { gclog_or_tty->print_cr(" Heap after GC invocations=%d:", heap->total_collections()); Universe::print(); gclog_or_tty->print("} "); } }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(_preserved_mark_stack.is_empty(), "should be empty"); assert(_preserved_oop_stack.is_empty(), "should be empty"); _gc_timer.register_gc_start(); TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return false; } _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC) { // Gather the feedback data for eden occupancy. young_gen->eden_space()->accumulate_statistics(); } if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); } heap->print_heap_before_gc(); heap->trace_heap_before_gc(&_gc_tracer); assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } { ResourceMark rm; HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); if (TraceGen0Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify the object start arrays. if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } // Verify no unmarked old->young roots if (VerifyRememberedSets) { CardTableExtension::verify_all_young_refs_imprecise(); } if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(SpaceDecorator::Mangle); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } save_to_space_top_before_gc(); COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); reference_processor()->setup_policy(false); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); // For PrintGCDetails size_t young_gen_used_before = young_gen->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); // Get the active number of workers here and use that value // throughout the methods. uint active_workers = gc_task_manager()->active_workers(); heap->set_par_threads(active_workers); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); if (!old_gen->object_space()->is_empty()) { // There are only old-to-young pointers if there are objects // in the old gen. uint stripe_total = active_workers; for(uint i=0; i < stripe_total; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); } } q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); ParallelTaskTerminator terminator( active_workers, (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); if (active_workers > 1) { for (uint j = 0; j < active_workers; j++) { q->enqueue(new StealTask(&terminator)); } } gc_task_manager()->execute_and_wait(q); } scavenge_midpoint.update(); // Process reference objects discovered during scavenge { GCTraceTime tm("References", false, false, &_gc_timer); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); ReferenceProcessorStats stats; if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, &_gc_timer); } else { stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); } _gc_tracer.report_gc_reference_stats(stats); // Enqueue reference objects discovered during scavenge. if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->enqueue_discovered_references(&task_executor); } else { reference_processor()->enqueue_discovered_references(NULL); } } { GCTraceTime tm("StringTable", false, false, &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); } // Finally, flush the promotion_manager's labs, and deallocate its stacks. promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); if (promotion_failure_occurred) { clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time). size_policy->minor_collection_end(gc_cause); if (!promotion_failure_occurred) { // Swap the survivor spaces. young_gen->eden_space()->clear(SpaceDecorator::Mangle); young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); // A successful scavenge should restart the GC time limit count which is // for full GC's. size_policy->reset_gc_overhead_limit_count(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_old_eden_size( size_policy->calculated_eden_size_in_bytes()); counters->update_old_promo_size( size_policy->calculated_promo_size_in_bytes()); counters->update_old_capacity(old_gen->capacity_in_bytes()); counters->update_young_capacity(young_gen->capacity_in_bytes()); counters->update_survived(survived); counters->update_promoted(promoted); counters->update_survivor_overflowed(_survivor_overflow); } size_t max_young_size = young_gen->max_size(); // Deciding a free ratio in the young generation is tricky, so if // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating // that the old generation size may have been limited because of them) we // should then limit our young generation size using NewRatio to have it // follow the old generation size. if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); } size_t survivor_limit = size_policy->max_survivor_size(max_young_size); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)", size_policy->calculated_survivor_size_in_bytes(), _tenuring_threshold, MaxTenuringThreshold); } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_size_counters(); } // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimial free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = max_young_size - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_eden_space_size(young_live, eden_live, cur_eden, max_eden_size, false /* not full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, false /* not full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(false /* not full gc*/); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. // Resizing the old gen at minor collects can cause increases // that don't feed back to the generation sizing policy until // a major collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing. assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); young_gen->eden_space()->update(); heap->gc_policy_counters()->update_counters(); heap->resize_all_tlabs(); assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); CodeCache::prune_scavenge_root_nmethods(); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_used_before); } heap->print_heap_change(prev_used); } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); gc_task_manager()->release_idle_workers(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); heap->trace_heap_after_gc(&_gc_tracer); _gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (ZapUnusedHeapArea) { young_gen->eden_space()->check_mangled_unused_area_complete(); young_gen->from_space()->check_mangled_unused_area_complete(); young_gen->to_space()->check_mangled_unused_area_complete(); } scavenge_exit.update(); if (PrintGCTaskTimeStamps) { tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, scavenge_entry.ticks(), scavenge_midpoint.ticks(), scavenge_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer.register_gc_end(); _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); return !promotion_failure_occurred; }
// This method contains no policy. You should probably // be calling invoke() instead. void PSScavenge::invoke_no_policy(bool& notify_ref_lock) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::is_active()) return; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return; } PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); AdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); if (PrintHeapAtGC){ gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections()); Universe::print(); } assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); assert(promotion_failed() == false, "Sanity"); // Fill in TLABs heap->ensure_parseability(); if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification tty->print(" VerifyBeforeGC:"); Universe::verify(true); } { ResourceMark rm; HandleMark hm; TraceTime t1("GC", PrintGC, true, gclog_or_tty); TraceCollectorStats tcs(counters()); if (TraceGen0Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify no unmarked old->young roots if (VerifyRememberedSets) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); CardTableExtension::verify_all_young_refs_imprecise(); } assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); COMPILER2_ONLY(DerivedPointerTable::clear();); reference_processor()->enable_discovery(); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->object_space()->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old/perm top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); HeapWord* perm_top = perm_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { // TraceTime("Roots"); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<ParallelGCThreads; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); } q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); if (ParallelGCThreads>1) { for (uint j=0; j<ParallelGCThreads-1; j++) { q->enqueue(new StealTask(false)); } q->enqueue(new StealTask(true)); } WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); q->enqueue(fin); gc_task_manager()->add_list(q); fin->wait_for(); // We have to release the barrier tasks! WaitForBarrierGCTask::destroy(fin); } scavenge_midpoint.update(); NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy()); COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy()); PSIsAliveClosure is_alive; PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); // Process reference objects discovered during scavenge reference_processor()->process_discovered_references(soft_ref_policy, &is_alive, &keep_alive, &evac_followers); // Enqueue reference objects discovered during scavenge. notify_ref_lock = reference_processor()->enqueue_discovered_references(); // Finally, flush the promotion_manager's labs, and deallocate its stacks. assert(promotion_manager->claimed_stack()->size() == 0, "Sanity"); PSPromotionManager::post_scavenge(); bool scavenge_promotion_failure = promotion_failed(); if (scavenge_promotion_failure) { clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're implicitly // saying it's mutator time). size_policy->minor_collection_end(); if (!scavenge_promotion_failure) { // Swap the survivor spaces. young_gen->eden_space()->clear(); young_gen->from_space()->clear(); young_gen->swap_spaces(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStart: collection: %d ", heap->total_collections()); } size_t survivor_limit = size_policy->max_survivor_size(young_gen->max_size()); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold(survived, promoted, _survivor_overflow, _tenuring_threshold, survivor_limit); // Calculate optimial free space amounts size_policy->compute_generation_free_space(young_gen->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), false /* full gc*/); // Resize the old and young generations old_gen->resize(size_policy->calculated_old_free_size_in_bytes()); young_gen->resize(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } COMPILER2_ONLY(DerivedPointerTable::update_pointers()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) accumulated_time()->stop(); if (PrintGC) { heap->print_heap_change(prev_used); } heap->update_counters(); }
void SharedHeap::process_strong_roots(bool activate_scope, bool collecting_perm_gen, ScanningOption so, OopClosure* roots, CodeBlobClosure* code_roots, OopsInGenClosure* perm_blk) { StrongRootsScope srs(this, activate_scope); // General strong roots. assert(_strong_roots_parity != 0, "must have called prologue code"); if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { Universe::oops_do(roots); ReferenceProcessor::oops_do(roots); // Consider perm-gen discovered lists to be strong. perm_gen()->ref_processor()->weak_oops_do(roots); } // Global (strong) JNI handles if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) JNIHandles::oops_do(roots); // All threads execute this; the individual threads are task groups. if (ParallelGCThreads > 0) { Threads::possibly_parallel_oops_do(roots, code_roots); } else { Threads::oops_do(roots, code_roots); } if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) ObjectSynchronizer::oops_do(roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) FlatProfiler::oops_do(roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) Management::oops_do(roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) JvmtiExport::oops_do(roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { if (so & SO_AllClasses) { SystemDictionary::oops_do(roots); } else if (so & SO_SystemClasses) { SystemDictionary::always_strong_oops_do(roots); } } if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) { StringTable::oops_do(roots); } if (JavaObjectsInPerm) { // Verify the string table contents are in the perm gen NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure)); } } if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { if (so & SO_CodeCache) { // (Currently, CMSCollector uses this to do intermediate-strength collections.) assert(collecting_perm_gen, "scanning all of code cache"); assert(code_roots != NULL, "must supply closure for code cache"); if (code_roots != NULL) { CodeCache::blobs_do(code_roots); } } else if (so & (SO_SystemClasses|SO_AllClasses)) { if (!collecting_perm_gen) { // If we are collecting from class statics, but we are not going to // visit all of the CodeCache, collect from the non-perm roots if any. // This makes the code cache function temporarily as a source of strong // roots for oops, until the next major collection. // // If collecting_perm_gen is true, we require that this phase will call // CodeCache::do_unloading. This will kill off nmethods with expired // weak references, such as stale invokedynamic targets. CodeCache::scavenge_root_nmethods_do(code_roots); } } // Verify that the code cache contents are not subject to // movement by a scavenging collection. DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false)); DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } if (!collecting_perm_gen) { // All threads perform this; coordination is handled internally. rem_set()->younger_refs_iterate(perm_gen(), perm_blk); } _process_strong_tasks->all_tasks_completed(); }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change // CollectorPolicy::_should_clear_all_soft_refs. ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); // Increment the invocation count heap->increment_total_collections(true /* full */); // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); // We need to track unique mark sweep invocations as well. _total_invocations++; AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(); } // Verify object start arrays if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } heap->pre_full_gc_dump(); // Filled in below to track the state of the young gen after the collection. bool eden_empty; bool survivors_empty; bool young_gen_empty; { HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceGen1Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. CodeCache::gc_prologue(); Threads::gc_prologue(); BiasedLocking::preserve_marks(); // Capture heap size before collection for printing. size_t prev_used = heap->used(); // Capture perm gen size before collection for sizing. size_t perm_gen_prev_used = perm_gen->used_in_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); size_t young_gen_prev_used = young_gen->used_in_bytes(); allocate_stacks(); COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); // Don't add any more derived pointers during phase3 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); mark_sweep_phase3(); mark_sweep_phase4(); restore_marks(); deallocate_stacks(); if (ZapUnusedHeapArea) { // Do a complete mangle (top to end) because the usage for // scratch does not maintain a top pointer. young_gen->to_space()->mangle_unused_area_complete(); } eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); survivors_empty = young_gen->from_space()->is_empty() && young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; BarrierSet* bs = heap->barrier_set(); if (bs->is_a(BarrierSet::ModRef)) { ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; MemRegion old_mr = heap->old_gen()->reserved(); MemRegion perm_mr = heap->perm_gen()->reserved(); assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); if (young_gen_empty) { modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); } else { modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); } } BiasedLocking::restore_marks(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); JvmtiExport::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ref_processor()->enqueue_discovered_references(NULL); // Update time of last GC reset_millis_since_last_gc(); // Let the size policy know we're done size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); if (UseAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" " perm_gen_capacity: %d ", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), perm_gen->capacity_in_bytes()); } } // Don't check if the size_policy is ready here. Let // the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMajorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimal free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); size_policy->compute_generation_free_space(young_gen->used_in_bytes(), young_gen->eden_space()->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), young_gen->eden_space()->capacity_in_bytes(), old_gen->max_gen_size(), max_eden_size, true /* full gc*/, gc_cause, heap->collector_policy()); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); // Don't resize the young generation at an major collection. A // desired young generation size may have been calculated but // resizing the young generation complicates the code because the // resizing of the old generation may have moved the boundary // between the young generation and the old generation. Let the // young generation resizing happen at the minor collections. } if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } if (UsePerfData) { heap->gc_policy_counters()->update_counters(); heap->gc_policy_counters()->update_old_capacity( old_gen->capacity_in_bytes()); heap->gc_policy_counters()->update_young_capacity( young_gen->capacity_in_bytes()); } heap->resize_all_tlabs(); // We collected the perm gen, so we'll resize it here. perm_gen->compute_new_size(perm_gen_prev_used); if (TraceGen1Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_prev_used); old_gen->print_used_change(old_gen_prev_used); } heap->print_heap_change(prev_used); // Do perm gen after heap becase prev_used does // not include the perm gen (done this way in the other // collectors). if (PrintGCDetails) { perm_gen->print_used_change(perm_gen_prev_used); } } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); Universe::verify(); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } if (ZapUnusedHeapArea) { old_gen->object_space()->check_mangled_unused_area_complete(); perm_gen->object_space()->check_mangled_unused_area_complete(); } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); heap->post_full_gc_dump(); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif return true; }
void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, methodHandle method, int vtable_index) { assert(!is_secondary_entry(), ""); assert(method->interpreter_entry() != NULL, "should have been set at this point"); assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); int byte_no = -1; bool change_to_virtual = false; switch (invoke_code) { case Bytecodes::_invokeinterface: // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface // instruction somehow links to a non-interface method (in Object). // In that case, the method has no itable index and must be invoked as a virtual. // Set a flag to keep track of this corner case. change_to_virtual = true; // ...and fall through as if we were handling invokevirtual: case Bytecodes::_invokevirtual: { if (method->can_be_statically_bound()) { // set_f2_as_vfinal_method checks if is_vfinal flag is true. set_method_flags(as_TosState(method->result_type()), ( 1 << is_vfinal_shift) | ((method->is_final_method() ? 1 : 0) << is_final_shift) | ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), method()->size_of_parameters()); set_f2_as_vfinal_method(method()); } else { assert(vtable_index >= 0, "valid index"); assert(!method->is_final_method(), "sanity"); set_method_flags(as_TosState(method->result_type()), ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), method()->size_of_parameters()); set_f2(vtable_index); } byte_no = 2; break; } case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: // Note: Read and preserve the value of the is_vfinal flag on any // invokevirtual bytecode shared with this constant pool cache entry. // It is cheap and safe to consult is_vfinal() at all times. // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. set_method_flags(as_TosState(method->result_type()), ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | ((method->is_final_method() ? 1 : 0) << is_final_shift), method()->size_of_parameters()); set_f1(method()); byte_no = 1; break; default: ShouldNotReachHere(); break; } // Note: byte_no also appears in TemplateTable::resolve. if (byte_no == 1) { assert(invoke_code != Bytecodes::_invokevirtual && invoke_code != Bytecodes::_invokeinterface, ""); set_bytecode_1(invoke_code); } else if (byte_no == 2) { if (change_to_virtual) { assert(invoke_code == Bytecodes::_invokeinterface, ""); // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! // // Workaround for the case where we encounter an invokeinterface, but we // should really have an _invokevirtual since the resolved method is a // virtual method in java.lang.Object. This is a corner case in the spec // but is presumably legal. javac does not generate this code. // // We set bytecode_1() to _invokeinterface, because that is the // bytecode # used by the interpreter to see if it is resolved. // We set bytecode_2() to _invokevirtual. // See also interpreterRuntime.cpp. (8/25/2000) // Only set resolved for the invokeinterface case if method is public. // Otherwise, the method needs to be reresolved with caller for each // interface call. if (method->is_public()) set_bytecode_1(invoke_code); } else { assert(invoke_code == Bytecodes::_invokevirtual, ""); } // set up for invokevirtual, even if linking for invokeinterface also: set_bytecode_2(Bytecodes::_invokevirtual); } else { ShouldNotReachHere(); } NOT_PRODUCT(verify(tty)); }
// We get called with "mr" representing the dirty region // that we want to process. Because of imprecise marking, // we may need to extend the incoming "mr" to the right, // and scan more. However, because we may already have // scanned some of that extended region, we may need to // trim its right-end back some so we do not scan what // we (or another worker thread) may already have scanned // or planning to scan. void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { // Some collectors need to do special things whenever their dirty // cards are processed. For instance, CMS must remember mutator updates // (i.e. dirty cards) so as to re-scan mutated objects. // Such work can be piggy-backed here on dirty card scanning, so as to make // it slightly more efficient than doing a complete non-detructive pre-scan // of the card table. MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); if (pCl != NULL) { pCl->do_MemRegion(mr); } HeapWord* bottom = mr.start(); HeapWord* last = mr.last(); HeapWord* top = mr.end(); HeapWord* bottom_obj; HeapWord* top_obj; assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || _precision == CardTableModRefBS::Precise, "Only ones we deal with for now."); assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || _cl->idempotent() || _last_bottom == NULL || top <= _last_bottom, "Not decreasing"); NOT_PRODUCT(_last_bottom = mr.start()); bottom_obj = _sp->block_start(bottom); top_obj = _sp->block_start(last); assert(bottom_obj <= bottom, "just checking"); assert(top_obj <= top, "just checking"); // Given what we think is the top of the memory region and // the start of the object at the top, get the actual // value of the top. top = get_actual_top(top, top_obj); // If the previous call did some part of this region, don't redo. if (_precision == CardTableModRefBS::ObjHeadPreciseArray && _min_done != NULL && _min_done < top) { top = _min_done; } // Top may have been reset, and in fact may be below bottom, // e.g. the dirty card region is entirely in a now free object // -- something that could happen with a concurrent sweeper. bottom = MIN2(bottom, top); MemRegion extended_mr = MemRegion(bottom, top); assert(bottom <= top && (_precision != CardTableModRefBS::ObjHeadPreciseArray || _min_done == NULL || top <= _min_done), "overlap!"); // Walk the region if it is not empty; otherwise there is nothing to do. if (!extended_mr.is_empty()) { walk_mem_region(extended_mr, bottom_obj, top); } // An idempotent closure might be applied in any order, so we don't // record a _min_done for it. if (!_cl->idempotent()) { _min_done = bottom; } else { assert(_min_done == _last_explicit_min_done, "Don't update _min_done for idempotent cl"); } }
void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool, Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix, Handle method_type) { // NOTE: This CPCE can be the subject of data races. // There are three words to update: flags, f2, f1 (in that order). // Writers must store all other values before f1. // Readers must test f1 first for non-null before reading other fields. // Competing writers must acquire exclusive access via a lock. // A losing writer waits on the lock until the winner writes f1 and leaves // the lock, so that when the losing writer returns, he can use the linked // cache entry. Thread* THREAD = Thread::current(); ObjectLocker ol(cpool, THREAD); if (!is_f1_null()) { return; } const bool has_appendix = appendix.not_null(); const bool has_method_type = method_type.not_null(); if (!has_appendix) { // The extra argument is not used, but we need a non-null value to signify linkage state. // Set it to something benign that will never leak memory. appendix = Universe::void_mirror(); } // Write the flags. set_method_flags(as_TosState(adapter->result_type()), ((has_appendix ? 1 : 0) << has_appendix_shift) | ((has_method_type ? 1 : 0) << has_method_type_shift) | ( 1 << is_vfinal_shift) | ( 1 << is_final_shift), adapter->size_of_parameters()); if (TraceInvokeDynamic) { tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ", invoke_code, (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), (intptr_t)method_type(), (has_method_type ? "" : " (unused)"), (intptr_t)adapter()); adapter->print(); if (has_appendix) appendix()->print(); } // Method handle invokes and invokedynamic sites use both cp cache words. // f1, if not null, contains a value passed as a trailing argument to the adapter. // In the general case, this could be the call site's MethodType, // for use with java.lang.Invokers.checkExactType, or else a CallSite object. // f2 contains the adapter method which manages the actual call. // In the general case, this is a compiled LambdaForm. // (The Java code is free to optimize these calls by binding other // sorts of methods and appendices to call sites.) // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased. // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits. // In principle this means that the method (with appendix) could take up to 256 parameter slots. // // This means that given a call site like (List)mh.invoke("foo"), // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;', // not '(Ljava/lang/String;)Ljava/util/List;'. // The fact that String and List are involved is encoded in the MethodType in f1. // This allows us to create fewer method oops, while keeping type safety. // set_f2_as_vfinal_method(adapter()); // Store MethodType, if any. if (has_method_type) { ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(this); // Write the flags. e2->set_method_flags(as_TosState(adapter->result_type()), ((has_method_type ? 1 : 0) << has_method_type_shift) | ( 1 << is_vfinal_shift) | ( 1 << is_final_shift), adapter->size_of_parameters()); e2->release_set_f1(method_type()); } assert(appendix.not_null(), "needed for linkage state"); release_set_f1(appendix()); // This must be the last one to set (see NOTE above)! if (!is_secondary_entry()) { // The interpreter assembly code does not check byte_2, // but it is used by is_resolved, method_if_resolved, etc. set_bytecode_2(invoke_code); } NOT_PRODUCT(verify(tty)); if (TraceInvokeDynamic) { this->print(tty, 0); } }
Canonicalizer(Compilation* c, Value x, int bci) : _compilation(c), _canonical(x), _bci(bci) { NOT_PRODUCT(x->set_printable_bci(bci)); if (CanonicalizeNodes) x->visit(this); }