void PerformanceDebugger::report_context(InlinedScope* s) { if (!DebugPerformance) return; Reporter r(this); GrowableArray<Expr*>* temps = s->contextTemporaries(); const int len = temps->length(); int nused = 0; for (int i = 0; i < len; i++) { PReg* r = temps->at(i)->preg(); if (r->uplevelR() || r->uplevelW() || (r->isBlockPReg() && !r->isUnused())) nused++; } if (nused == 0) { str->print(" could not eliminate context of scope %s (fixable compiler restriction; should be eliminated)\n", s->key()->print_string()); } else { str->print(" could not eliminate context of scope %s; temp(s) still used: ", s->key()->print_string()); for (int j = 0; j < len; j++) { PReg* r = temps->at(j)->preg(); if (r->uplevelR() || r->uplevelW()) { str->print("%d ", j); } else if (r->isBlockPReg() && !r->isUnused()) { str->print("%d (non-inlined block)", j); } } str->print("\n"); } }
// Write the compact table's buckets void CompactHashtableWriter::dump_table(NumberSeq* summary) { u4 offset = 0; for (int index = 0; index < _num_buckets; index++) { GrowableArray<Entry>* bucket = _buckets[index]; int bucket_size = bucket->length(); if (bucket_size == 1) { // bucket with one entry is compacted and only has the symbol offset _compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE)); Entry ent = bucket->at(0); _compact_entries->at_put(offset++, ent.value()); _num_value_only_buckets++; } else { // regular bucket, each entry is a symbol (hash, offset) pair _compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE)); for (int i=0; i<bucket_size; i++) { Entry ent = bucket->at(i); _compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash _compact_entries->at_put(offset++, ent.value()); } if (bucket_size == 0) { _num_empty_buckets++; } else { _num_other_buckets++; } } summary->add(bucket_size); } // Mark the end of the buckets _compact_buckets->at_put(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE)); assert(offset == (u4)_compact_entries->length(), "sanity"); }
void ScopeDesc::print_on(outputStream* st) const { // header st->print("ScopeDesc[%d]@0x%lx ", _decode_offset, _code->instructions_begin()); print_value_on(st); // decode offsets if (WizardMode) { st->print_cr("offset: %d", _decode_offset); st->print_cr("bci: %d", bci()); st->print_cr("locals: %d", _locals_decode_offset); st->print_cr("stack: %d", _expressions_decode_offset); st->print_cr("monitor: %d", _monitors_decode_offset); st->print_cr("sender: %d", _sender_decode_offset); } // locals { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->locals(); if (l != NULL) { tty->print_cr("Locals"); for (int index = 0; index < l->length(); index++) { st->print(" - l%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // expressions { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->expressions(); if (l != NULL) { st->print_cr("Expression stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // monitors { GrowableArray<MonitorValue*>* l = ((ScopeDesc*) this)->monitors(); if (l != NULL) { st->print_cr("Monitor stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } if (!is_top()) { st->print_cr("Sender:"); sender()->print_on(st); } }
// iteration support - return next code blob nmethodDesc* next() { assert(_pos >= 0, "iteration not started"); if (_pos+1 >= _nmethods->length()) { return NULL; } return _nmethods->at(++_pos); }
void javaVFrame::print() { ResourceMark rm; vframe::print(); tty->print("\t"); method()->print_value(); tty->cr(); tty->print_cr("\tbci: %d", bci()); print_stack_values("locals", locals()); print_stack_values("expressions", expressions()); GrowableArray<MonitorInfo*>* list = monitors(); if (list->is_empty()) return; tty->print_cr("\tmonitor list:"); for (int index = (list->length()-1); index >= 0; index--) { MonitorInfo* monitor = list->at(index); tty->print("\t obj\t"); if (monitor->owner_is_scalar_replaced()) { Klass* k = java_lang_Class::as_Klass(monitor->owner_klass()); tty->print("( is scalar replaced %s)", k->external_name()); } else if (monitor->owner() == NULL) { tty->print("( null )"); } else { monitor->owner()->print_value(); tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner()); } if (monitor->eliminated() && is_compiled_frame()) tty->print(" ( lock is eliminated )"); tty->cr(); tty->print("\t "); monitor->lock()->print_on(tty); tty->cr(); } }
// iteration support - return next code blob JvmtiCodeBlobDesc* next() { assert(_pos >= 0, "iteration not started"); if (_pos+1 >= _code_blobs->length()) { return NULL; } return _code_blobs->at(++_pos); }
void PCRecorder::print() { if (counters == NULL) return; tty->cr(); tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold); tty->print_cr("==================================================================="); tty->cr(); GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20); int s; { MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag); s = size(); } for (int index = 0; index < s; index++) { int count = counters[index]; if (count > ProfilerPCTickThreshold) { address pc = pc_for(index); CodeBlob* cb = CodeCache::find_blob_unsafe(pc); if (cb != NULL && candidates->find(cb) < 0) { candidates->push(cb); } } } for (int i = 0; i < candidates->length(); i++) { print_blobs(candidates->at(i)); } }
void InlinedScope::markLocalsDebugVisible(GrowableArray<PReg*>* exprStack) { // this scope has at least one send - mark params & locals as debug-visible int i; if (_nofSends <= 1) { // first time we're called self()->preg()->debug = true; for (i = nofArguments() - 1; i >= 0; i--) { argument(i)->preg()->debug = true; } for (i = nofTemporaries() - 1; i >= 0; i--) { temporary(i)->preg()->debug = true; } // if there's a context, mark all context variables as debug-visible too. GrowableArray<Expr*>* ct = contextTemporaries(); if (ct != NULL) { for (i = 0; i < ct->length(); i++) { ct->at(i)->preg()->debug = true; } } } // also mark expression stack as debug-visible (excluding arguments to // current send) (the args are already excluded from the CallNode's // expression stack, so just use that one instead of this->exprStack) for (i = 0; i < exprStack->length(); i++) { exprStack->at(i)->debug = true; } }
void SendInfo::computeNSends(RScope* rscope, int bci) { GrowableArray<RScope*>* lst = rscope->subScopes(bci); nsends = 0; for (int i = lst->length() - 1; i >= 0; i--) { nsends += lst->at(i)->nsends; } }
void CodeBlobCollector::do_blob(CodeBlob* cb) { // ignore nmethods if (cb->is_nmethod()) { return; } // exclude VtableStubs, which are processed separately if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) { return; } // check if this starting address has been seen already - the // assumption is that stubs are inserted into the list before the // enclosing BufferBlobs. address addr = cb->code_begin(); for (int i=0; i<_global_code_blobs->length(); i++) { JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i); if (addr == scb->code_begin()) { return; } } // record the CodeBlob details as a JvmtiCodeBlobDesc JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->code_begin(), cb->code_end()); _global_code_blobs->append(scb); }
void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree) { begin_head(METHOD_ELEMENT); stringStream str; method->print_name(&str); stringStream shortStr; method->print_short_name(&shortStr); print_attr(METHOD_NAME_PROPERTY, str.as_string()); print_attr(METHOD_SHORT_NAME_PROPERTY, shortStr.as_string()); print_attr(METHOD_BCI_PROPERTY, bci); end_head(); head(BYTECODES_ELEMENT); _xml->print_cr("<![CDATA["); method->print_codes_on(_xml); _xml->print_cr("]]>"); tail(BYTECODES_ELEMENT); if (tree != NULL && tree->subtrees().length() > 0) { head(INLINE_ELEMENT); GrowableArray<InlineTree *> subtrees = tree->subtrees(); for (int i = 0; i < subtrees.length(); i++) { print_inline_tree(subtrees.at(i)); } tail(INLINE_ELEMENT); } tail(METHOD_ELEMENT); _xml->flush(); }
void BytecodeHistogram::print(float cutoff) { ResourceMark rm; GrowableArray<HistoEntry*>* profile = sorted_array(_counters, Bytecodes::number_of_codes); // print profile int tot = total_count(profile); int abs_sum = 0; tty->cr(); //0123456789012345678901234567890123456789012345678901234567890123456789 tty->print_cr("Histogram of %d executed bytecodes:", tot); tty->cr(); tty->print_cr(" absolute relative code name"); tty->print_cr("----------------------------------------------------------------------"); int i = profile->length(); while (i-- > 0) { HistoEntry* e = profile->at(i); int abs = e->count(); float rel = abs * 100.0F / tot; if (cutoff <= rel) { tty->print_cr("%10d %7.2f%% %02x %s", abs, rel, e->index(), name_for(e->index())); abs_sum += abs; } } tty->print_cr("----------------------------------------------------------------------"); float rel_sum = abs_sum * 100.0F / tot; tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff); tty->cr(); }
GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() { assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current() == thread(), "must be at safepoint or it's a java frame of the current thread"); GrowableArray<MonitorInfo*>* mons = monitors(); GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(mons->length()); if (mons->is_empty()) return result; bool found_first_monitor = false; ObjectMonitor *pending_monitor = thread()->current_pending_monitor(); ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor(); oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : (oop) NULL); oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : (oop) NULL); for (int index = (mons->length()-1); index >= 0; index--) { MonitorInfo* monitor = mons->at(index); if (monitor->eliminated() && is_compiled_frame()) continue; // skip eliminated monitor oop obj = monitor->owner(); if (obj == NULL) continue; // skip unowned monitor // // Skip the monitor that the thread is blocked to enter or waiting on // if (!found_first_monitor && (obj == pending_obj || obj == waiting_obj)) { continue; } found_first_monitor = true; result->append(monitor); } return result; }
// // Count the number of objects for a lightweight monitor. The hobj // parameter is object that owns the monitor so this routine will // count the number of times the same object was locked by frames // in java_thread. // jint JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) { jint ret = 0; if (!java_thread->has_last_Java_frame()) { return ret; // no Java frames so no monitors } ResourceMark rm; HandleMark hm; RegisterMap reg_map(java_thread); for(javaVFrame *jvf=java_thread->last_java_vframe(®_map); jvf != NULL; jvf = jvf->java_sender()) { GrowableArray<MonitorInfo*>* mons = jvf->monitors(); if (!mons->is_empty()) { for (int i = 0; i < mons->length(); i++) { MonitorInfo *mi = mons->at(i); if (mi->owner_is_scalar_replaced()) continue; // see if owner of the monitor is our object if (mi->owner() != NULL && mi->owner() == hobj()) { ret++; } } } } return ret; }
void BytecodePairHistogram::print(float cutoff) { ResourceMark rm; GrowableArray<HistoEntry*>* profile = sorted_array(_counters, number_of_pairs); // print profile int tot = total_count(profile); int abs_sum = 0; tty->cr(); //0123456789012345678901234567890123456789012345678901234567890123456789 tty->print_cr("Histogram of %d executed bytecode pairs:", tot); tty->cr(); tty->print_cr(" absolute relative codes 1st bytecode 2nd bytecode"); tty->print_cr("----------------------------------------------------------------------"); int i = profile->length(); while (i-- > 0) { HistoEntry* e = profile->at(i); int abs = e->count(); float rel = abs * 100.0F / tot; if (cutoff <= rel) { int c1 = e->index() % number_of_codes; int c2 = e->index() / number_of_codes; tty->print_cr("%10d %6.3f%% %02x %02x %-19s %s", abs, rel, c1, c2, name_for(c1), name_for(c2)); abs_sum += abs; } } tty->print_cr("----------------------------------------------------------------------"); float rel_sum = abs_sum * 100.0F / tot; tty->print_cr("%10d %6.3f%% (cutoff = %.3f%%)", abs_sum, rel_sum, cutoff); tty->cr(); }
// Returns MonitorInfos for all objects locked on this thread in youngest to oldest order static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); if (info != NULL) { return info; } info = new GrowableArray<MonitorInfo*>(); // It's possible for the thread to not have any Java frames on it, // i.e., if it's the main thread and it's already returned from main() if (thread->has_last_Java_frame()) { RegisterMap rm(thread); for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { GrowableArray<MonitorInfo*> *monitors = vf->monitors(); if (monitors != NULL) { int len = monitors->length(); // Walk monitors youngest to oldest for (int i = len - 1; i >= 0; i--) { MonitorInfo* mon_info = monitors->at(i); if (mon_info->eliminated()) continue; oop owner = mon_info->owner(); if (owner != NULL) { info->append(mon_info); } } } } } thread->set_cached_monitor_info(info); return info; }
void vframe::update_local(JavaThread* thread, BasicType type, int index, jvalue value) { frame fr = this->get_frame(); // AZUL - We use extra slots to accomodate tags for longs and doubles // in the compiler as well. if(type==T_LONG||type==T_DOUBLE){ index=index+1; } #ifdef ASSERT Unimplemented(); //CodeBlob* b = CodeCache::find_blob(fr.pc()); //assert(b->is_patched_for_deopt(), "frame must be scheduled for deoptimization"); #endif /* ASSERT */ GrowableArray<jvmtiDeferredLocalVariableSet*>*deferred=thread->deferred_locals(); if (deferred != NULL ) { // See if this vframe has already had locals with deferred writes int f; for ( f = 0 ; f < deferred->length() ; f++ ) { if (deferred->at(f)->matches(this)) { // Matching, vframe now see if the local already had deferred write GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals(); int l; for (l = 0 ; l < locals->length() ; l++ ) { if (locals->at(l)->index() == index) { locals->at(l)->set_value(value); return; } } // No matching local already present. Push a new value onto the deferred collection locals->push(new jvmtiDeferredLocalVariable(index, type, value)); return; } } // No matching vframe must push a new vframe } else { // No deferred updates pending for this thread. // allocate in C heap deferred = new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true); thread->set_deferred_locals(deferred); } // Because the frame is patched for deopt and we will push in // registers in uncommon_trap, we will use the sender's sp to compare deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr.pd_sender().sp())); assert(deferred->top()->id() == fr.pd_sender().sp(), "Huh? Must match"); deferred->top()->set_local_at(index, type, value); }
~CodeBlobCollector() { if (_code_blobs != NULL) { for (int i=0; i<_code_blobs->length(); i++) { FreeHeap(_code_blobs->at(i)); } delete _code_blobs; } }
~KeepAliveRegistrar() { for (int i = _keep_alive.length() - 1; i >= 0; --i) { ConstantPool* cp = _keep_alive.at(i); int idx = _thread->metadata_handles()->find_from_end(cp); assert(idx > 0, "Must be in the list"); _thread->metadata_handles()->remove_at(idx); } }
// iteration support - return first code blob nmethodDesc* first() { assert(_nmethods != NULL, "not collected"); if (_nmethods->length() == 0) { return NULL; } _pos = 0; return _nmethods->at(0); }
bool contains_signature(Symbol* query) { for (int i = 0; i < _members.length(); ++i) { if (query == _members.at(i).first->signature()) { return true; } } return false; }
// iteration support - return first code blob JvmtiCodeBlobDesc* first() { assert(_code_blobs != NULL, "not collected"); if (_code_blobs->length() == 0) { return NULL; } _pos = 0; return _code_blobs->at(0); }
static Thread* findThread(int thread_id) { for (int index = 0; index < threads->length(); index++) { Thread* thread = threads->at(index); if (thread == NULL) continue; if (thread->thread_id == thread_id) return thread; } return NULL; }
void JvmtiBreakpoint::each_method_version_do(method_action meth_act) { ((methodOopDesc*)_method->*meth_act)(_bci); // add/remove breakpoint to/from versions of the method that // are EMCP. Directly or transitively obsolete methods are // not saved in the PreviousVersionInfo. Thread *thread = Thread::current(); instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder()); Symbol* m_name = _method->name(); Symbol* m_signature = _method->signature(); { ResourceMark rm(thread); // PreviousVersionInfo objects returned via PreviousVersionWalker // contain a GrowableArray of handles. We have to clean up the // GrowableArray _after_ the PreviousVersionWalker destructor // has destroyed the handles. { // search previous versions if they exist PreviousVersionWalker pvw((instanceKlass *)ikh()->klass_part()); for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); pv_info != NULL; pv_info = pvw.next_previous_version()) { GrowableArray<methodHandle>* methods = pv_info->prev_EMCP_method_handles(); if (methods == NULL) { // We have run into a PreviousVersion generation where // all methods were made obsolete during that generation's // RedefineClasses() operation. At the time of that // operation, all EMCP methods were flushed so we don't // have to go back any further. // // A NULL methods array is different than an empty methods // array. We cannot infer any optimizations about older // generations from an empty methods array for the current // generation. break; } for (int i = methods->length() - 1; i >= 0; i--) { methodHandle method = methods->at(i); if (method->name() == m_name && method->signature() == m_signature) { RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)", meth_act == &methodOopDesc::set_breakpoint ? "sett" : "clear", method->name()->as_C_string(), method->signature()->as_C_string())); assert(!method->is_obsolete(), "only EMCP methods here"); ((methodOopDesc*)method()->*meth_act)(_bci); break; } } } } // pvw is cleaned up } // rm is cleaned up }
void CompiledLoop::hoistTypeTests() { // collect all type tests that can be hoisted out of the loop _loopHeader->_tests = _hoistableTests = new GrowableArray<HoistedTypeTest*>(10); TTHoister tth(this, _hoistableTests); _scope->subScopesDo(&tth); // add type tests to loop header for testing (avoid duplicates) // (currently quadratic algorithm but there should be very few) GrowableArray<HoistedTypeTest*>* headerTests = new GrowableArray<HoistedTypeTest*>(_hoistableTests->length()); for (int i = _hoistableTests->length() - 1; i >= 0; i--) { HoistedTypeTest* t = _hoistableTests->at(i); PReg* tested = t->testedPR; for (int j = headerTests->length() - 1; j >= 0; j--) { if (headerTests->at(j)->testedPR == tested) { // already testing this PReg if (isEquivalentType(headerTests->at(j)->klasses, t->klasses)) { // nothing to do } else { // Whoa! The same PReg is tested for different types in different places. // Possible but rare. headerTests->at(j)->invalid = t->invalid = true; if (WizardMode) { compiler_warning("CompiledLoop::hoistTypeTests: PReg tested for different types\n"); t->print(); headerTests->at(j)->print(); } } tested = NULL; // don't add it to list break; } } if (tested) headerTests->append(t); } // now delete all hoisted type tests from loop body for (i = _hoistableTests->length() - 1; i >= 0; i--) { HoistedTypeTest* t = _hoistableTests->at(i); if (!t->invalid) { t->node->assert_preg_type(t->testedPR, t->klasses, _loopHeader); } } if (!_loopHeader->isActivated()) _loopHeader->activate(); }
void print_sig_on(outputStream* str, Symbol* signature, int indent) const { streamIndentor si(str, indent * 2); str->indent().print_cr("Logical Method %s:", signature->as_C_string()); streamIndentor si2(str); for (int i = 0; i < _members.length(); ++i) { str->indent(); print_method(str, _members.at(i).first); if (_members.at(i).second == DISQUALIFIED) { str->print(" (disqualified)"); } str->cr(); } if (_selected_target != NULL) { print_selected(str, 1); } }
~nmethodCollector() { if (_nmethods != NULL) { for (int i=0; i<_nmethods->length(); i++) { nmethodDesc* blob = _nmethods->at(i); if (blob->map()!= NULL) { FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, blob->map()); } } delete _nmethods; } }
int width(prettyPrintStream* output) { int w = 0; if (begin_sym) w += output->width_of_string(begin_sym) + output->width_of_space(); for (int index = 0; index < elements->length(); index++) { w += elements->at(index)->width(output) + output->width_of_space(); } if (end_sym) w += output->width_of_string(end_sym); return w; }
void compiledVFrame::update_local(BasicType type, int index, jvalue value) { #ifdef ASSERT assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization"); #endif /* ASSERT */ GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = thread()->deferred_locals(); if (deferred != NULL ) { // See if this vframe has already had locals with deferred writes int f; for ( f = 0 ; f < deferred->length() ; f++ ) { if (deferred->at(f)->matches(this)) { // Matching, vframe now see if the local already had deferred write GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals(); int l; for (l = 0 ; l < locals->length() ; l++ ) { if (locals->at(l)->index() == index) { locals->at(l)->set_value(value); return; } } // No matching local already present. Push a new value onto the deferred collection locals->push(new jvmtiDeferredLocalVariable(index, type, value)); return; } } // No matching vframe must push a new vframe } else { // No deferred updates pending for this thread. // allocate in C heap deferred = new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true); thread()->set_deferred_locals(deferred); } deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id())); assert(deferred->top()->id() == fr().id(), "Huh? Must match"); deferred->top()->set_local_at(index, type, value); }
// Either sets the target or the exception error message void determine_target(InstanceKlass* root, TRAPS) { if (has_target() || throws_exception()) { return; } // Qualified methods are maximally-specific methods // These include public, instance concrete (=default) and abstract methods GrowableArray<Method*> qualified_methods; int num_defaults = 0; int default_index = -1; int qualified_index = -1; for (int i = 0; i < _members.length(); ++i) { Pair<Method*,QualifiedState> entry = _members.at(i); if (entry.second == QUALIFIED) { qualified_methods.append(entry.first); qualified_index++; if (entry.first->is_default_method()) { num_defaults++; default_index = qualified_index; } } } if (num_defaults == 0) { // If the root klass has a static method with matching name and signature // then do not generate an overpass method because it will hide the // static method during resolution. if (qualified_methods.length() == 0) { _exception_message = generate_no_defaults_message(CHECK); } else { assert(root != NULL, "Null root class"); _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK); } _exception_name = vmSymbols::java_lang_AbstractMethodError(); // If only one qualified method is default, select that } else if (num_defaults == 1) { _selected_target = qualified_methods.at(default_index); } else if (num_defaults > 1) { _exception_message = generate_conflicts_message(&qualified_methods,CHECK); _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); if (TraceDefaultMethods) { _exception_message->print_value_on(tty); tty->cr(); } } }