/* * Get Method Modifiers * * For the method indicated by method, return the access flags * via modifiers_ptr. * * REQUIRED Functionality. */ jvmtiError JNICALL jvmtiGetMethodModifiers(jvmtiEnv* env, jmethodID method, jint* modifiers_ptr) { TRACE("GetMethodModifiers called"); SuspendEnabledChecker sec; /* * Check given env & current phase. */ jvmtiPhase phases[] = {JVMTI_PHASE_START, JVMTI_PHASE_LIVE}; CHECK_EVERYTHING(); if( !method ) return JVMTI_ERROR_NULL_POINTER; if( !modifiers_ptr ) return JVMTI_ERROR_NULL_POINTER; *modifiers_ptr = 0; Method* mtd = reinterpret_cast<Method*>(method); if( mtd->is_public() ) *modifiers_ptr |= ACC_PUBLIC; if( mtd->is_private() ) *modifiers_ptr |= ACC_PRIVATE; if( mtd->is_protected() ) *modifiers_ptr |= ACC_PROTECTED; if( mtd->is_static() ) *modifiers_ptr |= ACC_STATIC; if( mtd->is_final() ) *modifiers_ptr |= ACC_FINAL; if( mtd->is_synchronized() ) *modifiers_ptr |= ACC_SYNCHRONIZED; if( mtd->is_native() ) *modifiers_ptr |= ACC_NATIVE; if( mtd->is_abstract() ) *modifiers_ptr |= ACC_ABSTRACT; return JVMTI_ERROR_NONE; }
void javaVFrame::print_value() const { Method* m = method(); InstanceKlass* k = m->method_holder(); tty->print_cr("frame( sp=" INTPTR_FORMAT ", unextended_sp=" INTPTR_FORMAT ", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT ")", _fr.sp(), _fr.unextended_sp(), _fr.fp(), _fr.pc()); tty->print("%s.%s", k->internal_name(), m->name()->as_C_string()); if (!m->is_native()) { Symbol* source_name = k->source_file_name(); int line_number = m->line_number_from_bci(bci()); if (source_name != NULL && (line_number != -1)) { tty->print("(%s:%d)", source_name->as_C_string(), line_number); } } else { tty->print("(Native Method)"); } // Check frame size and print warning if it looks suspiciously large if (fr().sp() != NULL) { RegisterMap map = *register_map(); uint size = fr().frame_size(&map); #ifdef _LP64 if (size > 8*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size); #else if (size > 4*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size); #endif } }
/* * Get Bytecodes * * For the method indicated by method, return the byte codes that * implement the method. The number of bytecodes is returned via * bytecode_count_ptr. The byte codes themselves are returned via * bytecodes_ptr. * * OPTIONAL Functionality. */ jvmtiError JNICALL jvmtiGetBytecodes(jvmtiEnv* env, jmethodID method, jint* bytecode_count_ptr, unsigned char** bytecodes_ptr) { TRACE("GetBytecodes called"); SuspendEnabledChecker sec; /* * Check given env & current phase. */ jvmtiPhase phases[] = {JVMTI_PHASE_START, JVMTI_PHASE_LIVE}; CHECK_EVERYTHING(); CHECK_CAPABILITY(can_get_bytecodes); /** * Check is_native_ptr */ if( !bytecode_count_ptr || !bytecodes_ptr ) { return JVMTI_ERROR_NULL_POINTER; } /** * Check method */ if( !method ) { return JVMTI_ERROR_INVALID_METHODID; } Method* mtd = (Method*)method; if( mtd->is_native() ) return JVMTI_ERROR_NATIVE_METHOD; if( mtd->get_byte_code_addr() == NULL ) return JVMTI_ERROR_OUT_OF_MEMORY; *bytecode_count_ptr = mtd->get_byte_code_size(); jvmtiError err = _allocate( *bytecode_count_ptr, bytecodes_ptr ); if( err != JVMTI_ERROR_NONE ) return err; memcpy( *bytecodes_ptr, mtd->get_byte_code_addr(), *bytecode_count_ptr ); if (interpreter_enabled()) { TIEnv *p_env = (TIEnv *)env; VMBreakPoints* vm_brpt = p_env->vm->vm_env->TI->vm_brpt; LMAutoUnlock lock(vm_brpt->get_lock()); for (VMBreakPoint* bpt = vm_brpt->find_method_breakpoint(method); bpt; bpt = vm_brpt->find_next_method_breakpoint(bpt, method)) { (*bytecodes_ptr)[bpt->location] = (unsigned char)bpt->saved_byte; } } return JVMTI_ERROR_NONE; }
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { assert(is_interpreted_frame(), "interpreted frame expected"); Method* method = interpreter_frame_method(); BasicType type = method->result_type(); intptr_t* tos_addr = (intptr_t *) interpreter_frame_tos_address(); oop obj; switch (type) { case T_VOID: break; case T_BOOLEAN: value_result->z = *(jboolean *) tos_addr; break; case T_BYTE: value_result->b = *(jbyte *) tos_addr; break; case T_CHAR: value_result->c = *(jchar *) tos_addr; break; case T_SHORT: value_result->s = *(jshort *) tos_addr; break; case T_INT: value_result->i = *(jint *) tos_addr; break; case T_LONG: value_result->j = *(jlong *) tos_addr; break; case T_FLOAT: value_result->f = *(jfloat *) tos_addr; break; case T_DOUBLE: value_result->d = *(jdouble *) tos_addr; break; case T_OBJECT: case T_ARRAY: if (method->is_native()) { obj = get_interpreterState()->oop_temp(); } else { oop* obj_p = (oop *) tos_addr; obj = (obj_p == NULL) ? (oop) NULL : *obj_p; } assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; break; default: ShouldNotReachHere(); } return type; }
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { assert(is_interpreted_frame(), "interpreted frame expected"); Method* method = interpreter_frame_method(); BasicType type = method->result_type(); if (method->is_native()) { // Prior to calling into the runtime to notify the method exit the possible // result value is saved into the interpreter frame. address lresult = (address)&(get_ijava_state()->lresult); address fresult = (address)&(get_ijava_state()->fresult); switch (method->result_type()) { case T_OBJECT: case T_ARRAY: { *oop_result = JNIHandles::resolve(*(jobject*)lresult); break; } // We use std/stfd to store the values. case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break; case T_INT : value_result->i = (jint) *(long*)lresult; break; case T_CHAR : value_result->c = (jchar) *(unsigned long*)lresult; break; case T_SHORT : value_result->s = (jshort) *(long*)lresult; break; case T_BYTE : value_result->z = (jbyte) *(long*)lresult; break; case T_LONG : value_result->j = (jlong) *(long*)lresult; break; case T_FLOAT : value_result->f = (jfloat) *(double*)fresult; break; case T_DOUBLE : value_result->d = (jdouble) *(double*)fresult; break; case T_VOID : /* Nothing to do */ break; default : ShouldNotReachHere(); } } else { intptr_t* tos_addr = interpreter_frame_tos_address(); switch (method->result_type()) { case T_OBJECT: case T_ARRAY: { oop obj = *(oop*)tos_addr; assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; } case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break; case T_BYTE : value_result->b = (jbyte) *(jint*)tos_addr; break; case T_CHAR : value_result->c = (jchar) *(jint*)tos_addr; break; case T_SHORT : value_result->s = (jshort) *(jint*)tos_addr; break; case T_INT : value_result->i = *(jint*)tos_addr; break; case T_LONG : value_result->j = *(jlong*)tos_addr; break; case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break; case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; case T_VOID : /* Nothing to do */ break; default : ShouldNotReachHere(); } } return type; }
void ncai_report_method_exit(jmethodID method, jboolean exc_popped, jvalue ret_val) { GlobalNCAI* ncai = VM_Global_State::loader_env->NCAI; if (!GlobalNCAI::isEnabled()) return; assert(method); Method* m = reinterpret_cast<Method*>(method); bool is_native = m->is_native(); if (is_native && ncai->step_enabled) ncai_step_native_method_exit(m); }
GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const { // Natives has no scope if (scope() == NULL) { nmethod* nm = code(); Method* method = nm->method(); assert(method->is_native(), ""); if (!method->is_synchronized()) { return new GrowableArray<MonitorInfo*>(0); } // This monitor is really only needed for UseBiasedLocking, but // return it in all cases for now as it might be useful for stack // traces and tools as well GrowableArray<MonitorInfo*> *monitors = new GrowableArray<MonitorInfo*>(1); // Casting away const frame& fr = (frame&) _fr; MonitorInfo* info = new MonitorInfo( fr.get_native_receiver(), fr.get_native_monitor(), false, false); monitors->push(info); return monitors; } GrowableArray<MonitorValue*>* monitors = scope()->monitors(); if (monitors == NULL) { return new GrowableArray<MonitorInfo*>(0); } GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(monitors->length()); for (int index = 0; index < monitors->length(); index++) { MonitorValue* mv = monitors->at(index); ScopeValue* ov = mv->owner(); StackValue *owner_sv = create_stack_value(ov); // it is an oop if (ov->is_object() && owner_sv->obj_is_scalar_replaced()) { // The owner object was scalar replaced assert(mv->eliminated(), "monitor should be eliminated for scalar replaced object"); // Put klass for scalar replaced object. ScopeValue* kv = ((ObjectValue *)ov)->klass(); assert(kv->is_constant_oop(), "klass should be oop constant for scalar replaced object"); Handle k(((ConstantOopReadValue*)kv)->value()()); assert(java_lang_Class::is_instance(k()), "must be"); result->push(new MonitorInfo(k(), resolve_monitor_lock(mv->basic_lock()), mv->eliminated(), true)); } else { result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock()), mv->eliminated(), false)); } } return result; }
jvmtiError JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth, jmethodID* method_ptr, jlocation* location_ptr) { #ifdef ASSERT uint32_t debug_bits = 0; #endif assert((SafepointSynchronize::is_at_safepoint() || is_thread_fully_suspended(java_thread, false, &debug_bits)), "at safepoint or target thread is suspended"); Thread* current_thread = Thread::current(); ResourceMark rm(current_thread); vframe *vf = vframeFor(java_thread, depth); if (vf == NULL) { return JVMTI_ERROR_NO_MORE_FRAMES; } // vframeFor should return a java frame. If it doesn't // it means we've got an internal error and we return the // error in product mode. In debug mode we will instead // attempt to cast the vframe to a javaVFrame and will // cause an assertion/crash to allow further diagnosis. #ifdef PRODUCT if (!vf->is_java_frame()) { return JVMTI_ERROR_INTERNAL; } #endif HandleMark hm(current_thread); javaVFrame *jvf = javaVFrame::cast(vf); Method* method = jvf->method(); if (method->is_native()) { *location_ptr = -1; } else { *location_ptr = jvf->bci(); } *method_ptr = method->jmethod_id(); return JVMTI_ERROR_NONE; }
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { #ifdef CC_INTERP // Needed for JVMTI. The result should always be in the // interpreterState object interpreterState istate = get_interpreterState(); #endif // CC_INTERP assert(is_interpreted_frame(), "interpreted frame expected"); Method* method = interpreter_frame_method(); BasicType type = method->result_type(); intptr_t* tos_addr; if (method->is_native()) { // Prior to calling into the runtime to report the method_exit the possible // return value is pushed to the native stack. If the result is a jfloat/jdouble // then ST0 is saved before EAX/EDX. See the note in generate_native_result tos_addr = (intptr_t*)sp(); if (type == T_FLOAT || type == T_DOUBLE) { // QQQ seems like this code is equivalent on the two platforms #ifdef AMD64 // This is times two because we do a push(ltos) after pushing XMM0 // and that takes two interpreter stack slots. tos_addr += 2 * Interpreter::stackElementWords; #else tos_addr += 2; #endif // AMD64 } } else { tos_addr = (intptr_t*)interpreter_frame_tos_address(); } switch (type) { case T_OBJECT : case T_ARRAY : { oop obj; if (method->is_native()) { #ifdef CC_INTERP obj = istate->_oop_temp; #else obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); #endif // CC_INTERP } else { oop* obj_p = (oop*)tos_addr; obj = (obj_p == NULL) ? (oop)NULL : *obj_p; } assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; break; } case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break; case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; case T_CHAR : value_result->c = *(jchar*)tos_addr; break; case T_SHORT : value_result->s = *(jshort*)tos_addr; break; case T_INT : value_result->i = *(jint*)tos_addr; break; case T_LONG : value_result->j = *(jlong*)tos_addr; break; case T_FLOAT : { #ifdef AMD64 value_result->f = *(jfloat*)tos_addr; #else if (method->is_native()) { jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat value_result->f = (jfloat)d; } else { value_result->f = *(jfloat*)tos_addr; } #endif // AMD64 break; } case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; case T_VOID : /* Nothing to do */ break; default : ShouldNotReachHere(); } return type; }
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { assert(is_interpreted_frame(), "interpreted frame expected"); Method* method = interpreter_frame_method(); BasicType type = method->result_type(); if (method->is_native()) { // Prior to notifying the runtime of the method_exit the possible result // value is saved to l_scratch and d_scratch. #ifdef CC_INTERP interpreterState istate = get_interpreterState(); intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult; intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult; #else /* CC_INTERP */ intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset; intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset; #endif /* CC_INTERP */ address l_addr = (address)l_scratch; #ifdef _LP64 // On 64-bit the result for 1/8/16/32-bit result types is in the other // word half l_addr += wordSize/2; #endif switch (type) { case T_OBJECT: case T_ARRAY: { #ifdef CC_INTERP *oop_result = istate->_oop_temp; #else oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; #endif // CC_INTERP break; } case T_BOOLEAN : { jint* p = (jint*)l_addr; value_result->z = (jboolean)((*p) & 0x1); break; } case T_BYTE : { jint* p = (jint*)l_addr; value_result->b = (jbyte)((*p) & 0xff); break; } case T_CHAR : { jint* p = (jint*)l_addr; value_result->c = (jchar)((*p) & 0xffff); break; } case T_SHORT : { jint* p = (jint*)l_addr; value_result->s = (jshort)((*p) & 0xffff); break; } case T_INT : value_result->i = *(jint*)l_addr; break; case T_LONG : value_result->j = *(jlong*)l_scratch; break; case T_FLOAT : value_result->f = *(jfloat*)d_scratch; break; case T_DOUBLE : value_result->d = *(jdouble*)d_scratch; break; case T_VOID : /* Nothing to do */ break; default : ShouldNotReachHere(); } } else { intptr_t* tos_addr = interpreter_frame_tos_address(); switch(type) { case T_OBJECT: case T_ARRAY: { oop obj = cast_to_oop(*tos_addr); assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; break; } case T_BOOLEAN : { jint* p = (jint*)tos_addr; value_result->z = (jboolean)((*p) & 0x1); break; } case T_BYTE : { jint* p = (jint*)tos_addr; value_result->b = (jbyte)((*p) & 0xff); break; } case T_CHAR : { jint* p = (jint*)tos_addr; value_result->c = (jchar)((*p) & 0xffff); break; } case T_SHORT : { jint* p = (jint*)tos_addr; value_result->s = (jshort)((*p) & 0xffff); break; } case T_INT : value_result->i = *(jint*)tos_addr; break; case T_LONG : value_result->j = *(jlong*)tos_addr; break; case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break; case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; case T_VOID : /* Nothing to do */ break; default : ShouldNotReachHere(); } }; return type; }
static inline Boolean interp_si_method_is_native(Method_Handle m) { assert(m); Method *meth = (Method *)m; return meth->is_native(); }
void interp_ti_enumerate_root_set_single_thread_on_stack(jvmtiEnv* ti_env, VM_thread *thread) { TRACE2("enumeration", "interp_enumerate_root_set_single_thread_on_stack()"); StackIterator_interp* si; si = interp_si_create_from_native(thread); int i; int depth; DEBUG_GC("\n\nGC enumeration in interpreter stack:\n"); for (depth = 0; !interp_si_is_past_end(si); depth++) { Method* method = (Method*)interp_si_get_method(si); jmethodID method_id = (jmethodID)method; int slot = 0; if (si->This) { vm_ti_enumerate_stack_root(ti_env, (void**)&si->This, si->This, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); DEBUG_GC(" [THIS]: " << si->This); } if (si->exc) { vm_ti_enumerate_stack_root(ti_env, (void**)&si->exc, si->exc, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); DEBUG_GC(" [EXCEPTION]: " << si->exc); } if (method->is_native()) { DEBUG_GC("[METHOD <native>]: " << method); interp_si_goto_previous(si); continue; } DEBUG_GC("[METHOD "<< si->stack.size << " " << (int)si->locals.varNum << "]: " << method); if (si->stack.size) for(i = 0; i <= si->stack.index; i++) { if (si->stack.refs[i] == FLAG_OBJECT) { DEBUG_GC(" Stack[" << i << "] "); REF* ref = &si->stack.data[i].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL"); } else { DEBUG_GC(obj); vm_ti_enumerate_stack_root(ti_env, ref, (Managed_Object_Handle)obj, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); } } } unsigned j; if (si->locals.varNum) for(j = 0; j < si->locals.varNum; j++) { if (si->locals.refs[j] == FLAG_OBJECT) { DEBUG_GC(" Locals[" << j << "] "); REF* ref = &si->locals.vars[j].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL\n"); } else { DEBUG_GC(obj); vm_ti_enumerate_stack_root(ti_env, ref, (Managed_Object_Handle)obj, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); } } } MonitorList *ml = si->locked_monitors; while(ml) { vm_ti_enumerate_stack_root(ti_env, &ml->monitor, ml->monitor, JVMTI_HEAP_ROOT_MONITOR, depth, method_id, slot++); ml = ml->next; } interp_si_goto_previous(si); } // enumerate m2n frames M2nFrame *m2n = m2n_get_last_frame(thread); while(m2n) { oh_enumerate_handles(m2n_get_local_handles(m2n)); m2n = m2n_get_previous_frame(m2n); } }
void interp_enumerate_root_set_single_thread_on_stack(VM_thread *thread) { TRACE2("enumeration", "interp_enumerate_root_set_single_thread_on_stack()"); StackIterator_interp* si; si = interp_si_create_from_native(thread); int i; DEBUG_GC("\n\nGC enumeration in interpreter stack:\n"); while(!interp_si_is_past_end(si)) { Method* method = (Method*)interp_si_get_method(si); method = method; if (si->This) { vm_enumerate_root_reference((void**)&si->This, FALSE); DEBUG_GC(" [THIS]: " << si->This); } if (si->exc) { vm_enumerate_root_reference((void**)&si->exc, FALSE); DEBUG_GC(" [EXCEPTION]: " << si->exc); } if (method->is_native()) { DEBUG_GC("[METHOD <native>]: " << method); interp_si_goto_previous(si); continue; } DEBUG_GC("[METHOD "<< si->stack.size << " " << (int)si->locals.varNum << "]: " << method); if (si->stack.size) for(i = 0; i <= si->stack.index; i++) { if (si->stack.refs[i] == FLAG_OBJECT) { DEBUG_GC(" Stack[" << i << "] "); REF* ref = &si->stack.data[i].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL"); } else { DEBUG_GC(obj); vm_enumerate(ref, FALSE); // CHECK!!! can we enumerate uncompressed ref in compressed mode } } } unsigned j; if (si->locals.varNum) for(j = 0; j < si->locals.varNum; j++) { if (si->locals.refs[j] == FLAG_OBJECT) { DEBUG_GC(" Locals[" << j << "] "); REF* ref = &si->locals.vars[j].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL\n"); } else { DEBUG_GC(obj); vm_enumerate(ref, FALSE); // CHECK!!! can we enumerate uncompressed ref in compressed mode } } } MonitorList *ml = si->locked_monitors; while(ml) { vm_enumerate_root_reference((void**)&ml->monitor, FALSE); ml = ml->next; } interp_si_goto_previous(si); } // enumerate m2n frames M2nFrame *m2n = m2n_get_last_frame(thread); while(m2n) { oh_enumerate_handles(m2n_get_local_handles(m2n)); m2n = m2n_get_previous_frame(m2n); } }
static void forte_fill_call_trace_given_top(JavaThread* thd, ASGCT_CallTrace* trace, int depth, frame top_frame) { NoHandleMark nhm; frame initial_Java_frame; Method* method; int bci; int count; count = 0; assert(trace->frames != NULL, "trace->frames must be non-NULL"); bool fully_decipherable = find_initial_Java_frame(thd, &top_frame, &initial_Java_frame, &method, &bci); // The frame might not be walkable but still recovered a method // (e.g. an nmethod with no scope info for the pc) if (method == NULL) return; if (!method->is_valid_method()) { trace->num_frames = ticks_GC_active; // -2 return; } // We got a Java frame however it isn't fully decipherable // so it won't necessarily be safe to use it for the // initial frame in the vframe stream. if (!fully_decipherable) { // Take whatever method the top-frame decoder managed to scrape up. // We look further at the top frame only if non-safepoint // debugging information is available. count++; trace->num_frames = count; trace->frames[0].method_id = method->find_jmethod_id_or_null(); if (!method->is_native()) { trace->frames[0].lineno = bci; } else { trace->frames[0].lineno = -3; } if (!initial_Java_frame.safe_for_sender(thd)) return; RegisterMap map(thd, false); initial_Java_frame = initial_Java_frame.sender(&map); } vframeStreamForte st(thd, initial_Java_frame, false); for (; !st.at_end() && count < depth; st.forte_next(), count++) { bci = st.bci(); method = st.method(); if (!method->is_valid_method()) { // we throw away everything we've gathered in this sample since // none of it is safe trace->num_frames = ticks_GC_active; // -2 return; } trace->frames[count].method_id = method->find_jmethod_id_or_null(); if (!method->is_native()) { trace->frames[count].lineno = bci; } else { trace->frames[count].lineno = -3; } } trace->num_frames = count; return; }