void trace(methodOop method, address bcp, uintptr_t tos, uintptr_t tos2) { #ifndef PRODUCT MutexLocker ml(BytecodeTrace_lock); if (_current_method != method) { // Note 1: This code will not work as expected with true MT/MP. // Need an explicit lock or a different solution. ResourceMark rm; tty->cr(); tty->print("[%d] ", (int) Thread::current()->osthread()->thread_id()); method->print_name(tty); tty->cr(); _current_method = method; } if (Verbose) { const char* format; switch (Bytecodes::length_at(bcp)) { case 1: format = "%x %02x " ; break; case 2: format = "%x %02x %02x " ; break; case 3: format = "%x %02x %02x %02x "; break; default: format = "%x %02x %02x %02x .."; break; } tty->print(format, bcp, *bcp, *(bcp+1), *(bcp+2)); } Bytecodes::Code code; if (_previous_bytecode == Bytecodes::_wide) { code = Bytecodes::cast(*(bcp+1)); } else { code = Bytecodes::cast(*bcp); } int bci = bcp - method->code_base(); const char* format = _previous_bytecode == Bytecodes::_wide ? Bytecodes::wide_format(code) : Bytecodes::format(code); tty->print("[%d] ", (int) Thread::current()->osthread()->thread_id()); if (Verbose) { tty->print("%8d %4d 0x%016lx 0x%016lx %s", BytecodeCounter::counter_value(), bci, tos, tos2, Bytecodes::name(code)); } else { tty->print("%8d %4d %s", BytecodeCounter::counter_value(), bci, Bytecodes::name(code)); } print_attributes(bcp, bci, format); tty->cr(); _previous_bytecode = code; #endif }
// Given that a new (potential) event has come in, // maintain the current JVMTI location on a per-thread per-env basis // and use it to filter out duplicate events: // - instruction rewrites // - breakpoint followed by single step // - single step at a breakpoint void JvmtiEnvThreadState::compare_and_set_current_location(methodOop new_method, address new_location, jvmtiEvent event) { int new_bci = new_location - new_method->code_base(); // The method is identified and stored as a jmethodID which is safe in this // case because the class cannot be unloaded while a method is executing. jmethodID new_method_id = new_method->jmethod_id(); // the last breakpoint or single step was at this same location if (_current_bci == new_bci && _current_method_id == new_method_id) { switch (event) { case JVMTI_EVENT_BREAKPOINT: // Repeat breakpoint is complicated. If we previously posted a breakpoint // event at this location and if we also single stepped at this location // then we skip the duplicate breakpoint. _breakpoint_posted = _breakpoint_posted && _single_stepping_posted; break; case JVMTI_EVENT_SINGLE_STEP: // Repeat single step is easy: just don't post it again. // If step is pending for popframe then it may not be // a repeat step. The new_bci and method_id is same as current_bci // and current method_id after pop and step for recursive calls. // This has been handled by clearing the location _single_stepping_posted = true; break; default: assert(false, "invalid event value passed"); break; } return; } set_current_location(new_method_id, new_bci); _breakpoint_posted = false; _single_stepping_posted = false; }
int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) { JavaThread *thread = (JavaThread *) THREAD; ZeroStack *stack = thread->zero_stack(); intptr_t *locals = stack->sp(); // Drop into the slow path if we need a safepoint check if (SafepointSynchronize::do_call_back()) { return normal_entry(method, 0, THREAD); } // Load the object pointer and drop into the slow path // if we have a NullPointerException oop object = LOCALS_OBJECT(0); if (object == NULL) { return normal_entry(method, 0, THREAD); } // Read the field index from the bytecode, which looks like this: // 0: aload_0 // 1: getfield // 2: index // 3: index // 4: ireturn/areturn // NB this is not raw bytecode: index is in machine order u1 *code = method->code_base(); assert(code[0] == Bytecodes::_aload_0 && code[1] == Bytecodes::_getfield && (code[4] == Bytecodes::_ireturn || code[4] == Bytecodes::_areturn), "should do"); u2 index = Bytes::get_native_u2(&code[2]); // Get the entry from the constant pool cache, and drop into // the slow path if it has not been resolved constantPoolCacheOop cache = method->constants()->cache(); ConstantPoolCacheEntry* entry = cache->entry_at(index); if (!entry->is_resolved(Bytecodes::_getfield)) { return normal_entry(method, 0, THREAD); } // Get the result and push it onto the stack switch (entry->flag_state()) { case ltos: case dtos: stack->overflow_check(1, CHECK_0); stack->alloc(wordSize); break; } if (entry->is_volatile()) { switch (entry->flag_state()) { case ctos: SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0); break; case btos: SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0); break; case stos: SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0); break; case itos: SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0); break; case ltos: SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0); break; case ftos: SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0); break; case dtos: SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0); break; case atos: SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0); break; default: ShouldNotReachHere(); } } else { switch (entry->flag_state()) { case ctos: SET_LOCALS_INT(object->char_field(entry->f2()), 0); break; case btos: SET_LOCALS_INT(object->byte_field(entry->f2()), 0); break; case stos: SET_LOCALS_INT(object->short_field(entry->f2()), 0); break; case itos: SET_LOCALS_INT(object->int_field(entry->f2()), 0); break; case ltos: SET_LOCALS_LONG(object->long_field(entry->f2()), 0); break; case ftos: SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0); break; case dtos: SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0); break; case atos: SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0); break; default: ShouldNotReachHere(); } } // No deoptimized frames on the stack return 0; }
// Rewrites a method given the index_map information void Rewriter::scan_method(methodOop method, bool reverse) { int nof_jsrs = 0; bool has_monitor_bytecodes = false; { // We cannot tolerate a GC in this block, because we've // cached the bytecodes in 'code_base'. If the methodOop // moves, the bytecodes will also move. No_Safepoint_Verifier nsv; Bytecodes::Code c; // Bytecodes and their length const address code_base = method->code_base(); const int code_length = method->code_size(); int bc_length; for (int bci = 0; bci < code_length; bci += bc_length) { address bcp = code_base + bci; int prefix_length = 0; c = (Bytecodes::Code)(*bcp); // Since we have the code, see if we can get the length // directly. Some more complicated bytecodes will report // a length of zero, meaning we need to make another method // call to calculate the length. bc_length = Bytecodes::length_for(c); if (bc_length == 0) { bc_length = Bytecodes::length_at(method, bcp); // length_at will put us at the bytecode after the one modified // by 'wide'. We don't currently examine any of the bytecodes // modified by wide, but in case we do in the future... if (c == Bytecodes::_wide) { prefix_length = 1; c = (Bytecodes::Code)bcp[1]; } } assert(bc_length != 0, "impossible bytecode length"); switch (c) { case Bytecodes::_lookupswitch : { #ifndef CC_INTERP Bytecode_lookupswitch bc(method, bcp); (*bcp) = ( bc.number_of_pairs() < BinarySwitchThreshold ? Bytecodes::_fast_linearswitch : Bytecodes::_fast_binaryswitch ); #endif break; } case Bytecodes::_fast_linearswitch: case Bytecodes::_fast_binaryswitch: { #ifndef CC_INTERP (*bcp) = Bytecodes::_lookupswitch; #endif break; } case Bytecodes::_getstatic : // fall through case Bytecodes::_putstatic : // fall through case Bytecodes::_getfield : // fall through case Bytecodes::_putfield : // fall through case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : case Bytecodes::_invokeinterface: rewrite_member_reference(bcp, prefix_length+1, reverse); break; case Bytecodes::_invokedynamic: rewrite_invokedynamic(bcp, prefix_length+1, reverse); break; case Bytecodes::_ldc: case Bytecodes::_fast_aldc: maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse); break; case Bytecodes::_ldc_w: case Bytecodes::_fast_aldc_w: maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse); break; case Bytecodes::_jsr : // fall through case Bytecodes::_jsr_w : nof_jsrs++; break; case Bytecodes::_monitorenter : // fall through case Bytecodes::_monitorexit : has_monitor_bytecodes = true; break; } } } // Update access flags if (has_monitor_bytecodes) { method->set_has_monitor_bytecodes(); } // The present of a jsr bytecode implies that the method might potentially // have to be rewritten, so we run the oopMapGenerator on the method if (nof_jsrs > 0) { method->set_has_jsrs(); // Second pass will revisit this method. assert(method->has_jsrs(), "didn't we just set this?"); } }