static ncaiError ncai_check_stop_single_step(NCAIEnv* env) { DebugUtilsTI* ti = env->ti_env->vm->vm_env->TI; GlobalNCAI* ncai = env->ti_env->vm->vm_env->NCAI; if (!ncai->step_enabled) return NCAI_ERROR_NONE; // Check that no environment has SingleStep enabled LMAutoUnlock lock(&ti->TIenvs_lock); bool disable = true; for (TIEnv *ti_env = ti->getEnvironments(); ti_env; ti_env = ti_env->next) { if (!ti_env->ncai_env) continue; if (ti_env->ncai_env->global_events[NCAI_EVENT_STEP - NCAI_MIN_EVENT_TYPE_VAL] || ti_env->ncai_env->event_threads[NCAI_EVENT_STEP - NCAI_MIN_EVENT_TYPE_VAL]) { disable = false; break; } } return disable ? ncai_stop_single_step(env) : NCAI_ERROR_NONE; }
NativeCodePtr compile_me(Method* method) { ASSERT_RAISE_AREA; ASSERT_NO_INTERPRETER; TRACE("compile_me " << method); GcFrame gc; compile_protect_arguments(method, &gc); if (exn_raised()) { return NULL; } tmn_suspend_enable(); if (method->is_abstract()) { compile_raise_exception("java/lang/AbstractMethodError", "", method); tmn_suspend_disable(); return NULL; } DebugUtilsTI *ti = VM_Global_State::loader_env->TI; JIT_Result res = compile_do_compilation(method); if (res != JIT_SUCCESS) { INFO2("compile", "Cannot compile " << method); if (!exn_raised()) { compile_raise_exception("java/lang/InternalError", "Cannot compile ", method); } tmn_suspend_disable(); return NULL; } tmn_suspend_disable(); NativeCodePtr entry_point = method->get_code_addr(); INFO2("compile.code", "Compiled method " << method << ", entry " << entry_point); if (method->get_pending_breakpoints() != 0) jvmti_set_pending_breakpoints(method); if(ti->isEnabled() && ti->is_single_step_enabled() && !method->is_native()) { jvmti_thread_t jvmti_thread = jthread_self_jvmti(); assert(jvmti_thread); jvmti_set_single_step_breakpoints_for_method(ti, jvmti_thread, method); } return entry_point; } // compile_me
VMEXPORT void vm_compiled_method_load(Method_Handle method, U_32 codeSize, void* codeAddr, U_32 mapLength, AddrLocation* addrLocationMap, void* compileInfo, Method_Handle outer_method) { assert(method); assert(outer_method); outer_method->add_inline_info_entry(method, codeSize, codeAddr, mapLength, addrLocationMap); // Find TI environment DebugUtilsTI *ti = VM_Global_State::loader_env->TI; // Call TI callbacks if (jvmti_should_report_event(JVMTI_EVENT_COMPILED_METHOD_LOAD) && ti->getPhase() == JVMTI_PHASE_LIVE) { jvmti_send_region_compiled_method_load_event(method, codeSize, codeAddr, mapLength, addrLocationMap, NULL); } }
JIT_Result compile_do_compilation_jit(Method* method, JIT* jit) { // Time stamp for counting the total compilation time apr_time_t start; Global_Env * vm_env = VM_Global_State::loader_env; assert(method); assert(jit); if (!parallel_jit) { vm_env->p_jit_a_method_lock->_lock(); // MikhailF reports that each JIT in recompilation chain has its own // JIT* pointer. // If in addition to recompilation chains one adds recompilation loops, // this check can be skipped, or main_code_chunk_id should be // modified. if (NULL != method->get_chunk_info_no_create_mt(jit, CodeChunkInfo::main_code_chunk_id)) { vm_env->p_jit_a_method_lock->_unlock(); return JIT_SUCCESS; } } OpenMethodExecutionParams flags = {0}; jvmti_get_compilation_flags(&flags); flags.exe_insert_write_barriers = gc_requires_barriers(); Compilation_Handle ch; ch.env = VM_Global_State::loader_env; ch.jit = jit; start = apr_time_now(); TRACE("compile_do_compilation_jit(): calling jit->compile_method_with_params() for method " << method ); JIT_Result res = jit->compile_method_with_params(&ch, method, flags); TRACE("compile_do_compilation_jit(): returned from jit->compile_method_with_params() for method " << method ); UNSAFE_REGION_START // Non-atomic increment of statistic counter // Conversion from microseconds to milliseconds vm_env->total_compilation_time += ((apr_time_now() - start)/1000); UNSAFE_REGION_END if (JIT_SUCCESS != res) { if (!parallel_jit) { vm_env->p_jit_a_method_lock->_unlock(); } return res; } method->lock(); for (CodeChunkInfo* cci = method->get_first_JIT_specific_info(); cci; cci = cci->_next) { if (cci->get_jit() == jit) { compile_flush_generated_code_block((U_8*)cci->get_code_block_addr(), cci->get_code_block_size()); // We assume the main chunk starts from entry point if (cci->get_id() == CodeChunkInfo::main_code_chunk_id) { method->set_code_addr(cci->get_code_block_addr()); } } } // Commit the compilation by setting the method's code address method->set_state(Method::ST_Compiled); method->do_jit_recompiled_method_callbacks(); method->apply_vtable_patches(); method->unlock(); if (!parallel_jit) { vm_env->p_jit_a_method_lock->_unlock(); } // Find TI environment DebugUtilsTI *ti = vm_env->TI; // Call TI callbacks if (jvmti_should_report_event(JVMTI_EVENT_COMPILED_METHOD_LOAD) && ti->getPhase() == JVMTI_PHASE_LIVE) { jvmti_send_chunks_compiled_method_load_event(method); } return JIT_SUCCESS; }
// Implementation note: don't use l2 (use l3, l4 instead if required) since its // space can be used in case of 64-bit return value. NativeCodePtr compile_create_lil_jni_stub(Method_Handle method, void* func, NativeStubOverride nso) { ASSERT_NO_INTERPRETER; const Class_Handle clss = method->get_class(); bool is_static = method->is_static(); bool is_synchronised = method->is_synchronized(); Method_Signature_Handle msh = method_get_signature(method); unsigned num_args = method->get_num_args(); Type_Info_Handle ret_tih = method_ret_type_get_type_info(msh); VM_Data_Type ret_type = type_info_get_type(ret_tih); unsigned i; unsigned num_ref_args = 0; // among original args, does not include jclass for static methods for(i=0; i<num_args; i++) if (is_reference(method_args_get_type_info(msh, i))) num_ref_args++; //***** Part 1: Entry, Stats, Override, push m2n, allocate space for handles LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:%0m;", method); assert(cs); // Increment stats (total number of calls) #ifdef VM_STATS cs = lil_parse_onto_end(cs, "inc [%0i:pint];", &((Method*)method)->num_accesses); assert(cs); #endif //VM_STATS // Do stub override here if (nso) cs = nso(cs, method); assert(cs); // Increment stats (number of nonoverridden calls) #ifdef VM_STATS cs = lil_parse_onto_end(cs, "inc [%0i:pint];", &((Method*)method)->num_slow_accesses); assert(cs); #endif // Push M2nFrame cs = lil_parse_onto_end(cs, "push_m2n %0i, %1i, handles; locals 3;", method, (POINTER_SIZE_INT)FRAME_JNI); assert(cs); // Allocate space for handles unsigned number_of_object_handles = num_ref_args + (is_static ? 1 : 0); cs = oh_gen_allocate_handles(cs, number_of_object_handles, "l0", "l1"); assert(cs); //***** Part 2: Initialize object handles if (is_static) { void *jlc = clss->get_class_handle(); cs = lil_parse_onto_end(cs, //"ld l1,[%0i:pint];" "ld l1,[%0i:ref];", jlc); assert(cs); cs = oh_gen_init_handle(cs, "l0", 0, "l1", false); assert(cs); } else { cs = oh_gen_init_handle(cs, "l0", 0, "i0", true); } // The remaining handles are for the proper arguments (not including this) // Loop over the arguments, skipping 0th argument for instance methods. If argument is a reference, generate code unsigned hn = 1; for(i=(is_static?0:1); i<num_args; i++) { if (is_reference(method_args_get_type_info(msh, i))) { char buf[20]; sprintf(buf, "i%d", i); cs = oh_gen_init_handle(cs, "l0", hn, buf, true); assert(cs); hn++; } } //***** Part 3: Synchronize if (is_synchronised) { if (is_static) { cs = lil_parse_onto_end(cs, "out stdcall:pint:pint;" "o0=%0i;" "call %1i;" "out stdcall:pint:void;" "o0=r;" "call %2i;", clss, lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER))); assert(cs); } else { cs = lil_parse_onto_end(cs, "out stdcall:ref:void;" "o0=i0;" "call %0i;", lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER))); assert(cs); } } //***** Call JVMTI MethodEntry DebugUtilsTI* ti = VM_Global_State::loader_env->TI; if (ti->isEnabled() && ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_ENTRY)) { cs = lil_parse_onto_end(cs, "out platform:pint:void;" "o0=%0i:pint;" "call %1i;", (jmethodID)method, jvmti_process_method_entry_event); assert(cs); } //***** Part 4: Enable GC cs = lil_parse_onto_end(cs, "out platform::void;" "call %0i;", hythread_suspend_enable); assert(cs); //***** Part 5: Set up arguments // Setup outputs, set JNIEnv, set class/this handle cs = lil_parse_onto_end(cs, "out jni:%0j;" "l1=ts;" "ld o0,[l1 + %1i:pint];" "o1=l0+%2i;", method, (POINTER_SIZE_INT)APR_OFFSETOF(VM_thread, jni_env), oh_get_handle_offset(0)); assert(cs); // Loop over arguments proper, setting rest of outputs unsigned int arg_base = 1 + (is_static ? 1 : 0); hn = 1; for(i=(is_static?0:1); i<num_args; i++) { if (is_reference(method_args_get_type_info(msh, i))) { POINTER_SIZE_INT handle_offset = oh_get_handle_offset(hn); REFS_RUNTIME_SWITCH_IF #ifdef REFS_RUNTIME_OR_COMPRESSED cs = lil_parse_onto_end(cs, "jc i%0i=%1i:ref,%n;" "o%2i=l0+%3i;" "j %o;" ":%g;" "o%4i=0;" ":%g;", i, VM_Global_State::loader_env->managed_null, arg_base+i, handle_offset, arg_base+i); #endif // REFS_RUNTIME_OR_COMPRESSED REFS_RUNTIME_SWITCH_ELSE #ifdef REFS_RUNTIME_OR_UNCOMPRESSED cs = lil_parse_onto_end(cs, "jc i%0i=0:ref,%n;" "o%1i=l0+%2i;" "j %o;" ":%g;" "o%3i=0;" ":%g;", i, arg_base+i, handle_offset, arg_base+i); #endif // REFS_RUNTIME_OR_UNCOMPRESSED REFS_RUNTIME_SWITCH_ENDIF hn++; } else { cs = lil_parse_onto_end(cs, "o%0i=i%1i;", arg_base+i, i); } assert(cs); } //***** Part 6: Call cs = lil_parse_onto_end(cs, "call %0i;", func); assert(cs); //***** Part 7: Save return, widening if necessary switch (ret_type) { case VM_DATA_TYPE_VOID: break; case VM_DATA_TYPE_INT32: cs = lil_parse_onto_end(cs, "l1=r;"); break; case VM_DATA_TYPE_BOOLEAN: cs = lil_parse_onto_end(cs, "l1=zx1 r;"); break; case VM_DATA_TYPE_INT16: cs = lil_parse_onto_end(cs, "l1=sx2 r;"); break; case VM_DATA_TYPE_INT8: cs = lil_parse_onto_end(cs, "l1=sx1 r;"); break; case VM_DATA_TYPE_CHAR: cs = lil_parse_onto_end(cs, "l1=zx2 r;"); break; default: cs = lil_parse_onto_end(cs, "l1=r;"); break; } assert(cs); //***** Part 8: Disable GC cs = lil_parse_onto_end(cs, "out platform::void;" "call %0i;", hythread_suspend_disable); assert(cs); // Exception offsets POINTER_SIZE_INT eoo = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_object; POINTER_SIZE_INT eco = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_class; //***** Call JVMTI MethodExit if (ti->isEnabled() && ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_EXIT)) { cs = lil_parse_onto_end(cs, "out platform:pint,g1,g8:void;" "l2=ts;" "ld l2,[l2+%0i:ref];" "jc l2!=0,_mex_exn_raised;" "l2=ts;" "ld l2,[l2+%1i:ref];" "jc l2!=0,_mex_exn_raised;" "o1=%2i:g1;" "o2=l1:g8;" "j _mex_exn_cont;" ":_mex_exn_raised;" "o1=%3i:g1;" "o2=0:g8;" ":_mex_exn_cont;" "o0=%4i:pint;" "call %5i;", eoo, eco, (POINTER_SIZE_INT)JNI_FALSE, (POINTER_SIZE_INT)JNI_TRUE, (jmethodID)method, jvmti_process_method_exit_event); assert(cs); } //***** Part 9: Synchronize if (is_synchronised) { if (is_static) { cs = lil_parse_onto_end(cs, "out stdcall:pint:pint;" "o0=%0i;" "call %1i;" "out stdcall:pint:void;" "o0=r;" "call %2i;", clss, lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT))); } else { cs = lil_parse_onto_end(cs, "ld l0,[l0+%0i:ref];" "out stdcall:ref:void; o0=l0; call %1i;", oh_get_handle_offset(0), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT))); } assert(cs); } //***** Part 10: Unhandle the return if it is a reference if (is_reference(ret_tih)) { cs = lil_parse_onto_end(cs, "jc l1=0,ret_done;" "ld l1,[l1+0:ref];" ":ret_done;"); #ifdef REFS_RUNTIME_OR_COMPRESSED REFS_RUNTIME_SWITCH_IF cs = lil_parse_onto_end(cs, "jc l1!=0,done_translating_ret;" "l1=%0i:ref;" ":done_translating_ret;", VM_Global_State::loader_env->managed_null); REFS_RUNTIME_SWITCH_ENDIF #endif // REFS_RUNTIME_OR_UNCOMPRESSED assert(cs); } //***** Part 11: Rethrow exception cs = lil_parse_onto_end(cs, "l0=ts;" "ld l2,[l0+%0i:ref];" "jc l2!=0,_exn_raised;" "ld l2,[l0+%1i:ref];" "jc l2=0,_no_exn;" ":_exn_raised;" "m2n_save_all;" "out platform::void;" "call.noret %2i;" ":_no_exn;", eoo, eco, exn_rethrow); assert(cs); //***** Part 12: Restore return variable, pop_m2n, return if (ret_type != VM_DATA_TYPE_VOID) { cs = lil_parse_onto_end(cs, "r=l1;"); assert(cs); } cs = lil_parse_onto_end(cs, "pop_m2n;" "ret;"); assert(cs); //***** Now generate code assert(lil_is_valid(cs)); NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->get_class_loader()->GetCodePool()); #ifndef NDEBUG char buf[100]; apr_snprintf(buf, sizeof(buf)-1, "jni_stub.%s::%s", clss->get_name()->bytes, method->get_name()->bytes); DUMP_STUB(addr, buf, lil_cs_get_code_size(cs)); #endif #ifdef VM_STATS VM_Statistics::get_vm_stats().jni_stub_bytes += lil_cs_get_code_size(cs); #endif lil_free_code_stub(cs); return addr; } // compile_create_lil_jni_stub
static void report_loaded_unloaded_module(ncaiModule module, bool loaded) { DebugUtilsTI *ti = VM_Global_State::loader_env->TI; hythread_t hythread = hythread_self(); ncaiThread thread = reinterpret_cast<ncaiThread>(hythread); bool suspend_enabled = hythread_is_suspend_enabled(); if (!suspend_enabled) hythread_suspend_enable(); TIEnv *ti_env = ti->getEnvironments(); TIEnv *next_ti_env; const char* trace_text = loaded ? "ModuleLoad" : "ModuleUnload"; while (NULL != ti_env) { next_ti_env = ti_env->next; NCAIEnv* env = ti_env->ncai_env; if (NULL == env) { ti_env = next_ti_env; continue; } ncaiModuleLoad func_l = (ncaiModuleLoad)env->get_event_callback(NCAI_EVENT_MODULE_LOAD); ncaiModuleLoad func_u = (ncaiModuleUnload)env->get_event_callback(NCAI_EVENT_MODULE_UNLOAD); ncaiModule env_module = NULL; ncaiModLU func = loaded ? (ncaiModLU)func_l : (ncaiModLU)func_u; ncaiEventKind event = loaded ? NCAI_EVENT_MODULE_LOAD : NCAI_EVENT_MODULE_UNLOAD; if (NULL != func) { if (env->global_events[event - NCAI_MIN_EVENT_TYPE_VAL]) { TRACE2("ncai.modules", "Calling global " << trace_text << " callback for module " << module->info->name); find_init_module_record(env, module, &env_module); func((ncaiEnv*)env, thread, env_module); TRACE2("ncai.modules", "Finished global " << trace_text << " callback for module " << module->info->name); ti_env = next_ti_env; continue; } ncaiEventThread* next_et; ncaiEventThread* first_et = env->event_threads[event - NCAI_MIN_EVENT_TYPE_VAL]; for (ncaiEventThread* et = first_et; NULL != et; et = next_et) { next_et = et->next; if (et->thread == thread) { TRACE2("ncai.modules", "Calling local " << trace_text << " callback for module " << module->info->name); find_init_module_record(env, module, &env_module); func((ncaiEnv*)env, thread, env_module); TRACE2("ncai.modules", "Finished local " << trace_text << " callback for module " << module->info->name); } et = next_et; } } ti_env = next_ti_env; } if (!suspend_enabled) hythread_suspend_disable(); }
// function can be safe point & should be called with disable reqursion = 1 void exn_athrow_regs(Registers * regs, Class_Handle exn_class, bool java_code, bool transfer_control) { assert(!hythread_is_suspend_enabled()); assert(exn_class); #ifndef _IPF_ M2nFrame *cur_m2nf = (M2nFrame *) STD_ALLOCA(m2n_get_size()); M2nFrame *unw_m2nf; ManagedObject *exn_obj = NULL; StackIterator *si; DebugUtilsTI* ti = VM_Global_State::loader_env->TI; VM_thread* vmthread = p_TLS_vmthread; if (java_code) m2n_push_suspended_frame(vmthread, cur_m2nf, regs); else // Gregory - // Initialize cur_m2nf pointer in case we've crashed in native code that is unwindable, // e.g. in the code that sets non-unwindable state for the native code area cur_m2nf = m2n_get_last_frame(); BEGIN_RAISE_AREA; si = (StackIterator*) STD_ALLOCA(si_size()); si_fill_from_native(si); ManagedObject *local_exn_obj = NULL; exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, NULL, NULL, NULL); //free local handles ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(cur_m2nf); if (last_m2n_frame_handles) { free_local_object_handles2(last_m2n_frame_handles); } if (ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_EXCEPTION_EVENT)) { VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) jvmti_exception_catch_callback; si_copy_to_registers(si, regs); vm_set_exception_registers(thread, *regs); si_set_callback(si, &callback); } else if (p_TLS_vmthread->restore_guard_page) { VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) exception_catch_callback; si_copy_to_registers(si, regs); vm_set_exception_registers(thread, *regs); si_set_callback(si, &callback); } si_copy_to_registers(si, regs); if (transfer_control) { // Let NCAI to continue single stepping in exception handler ncai_setup_signal_step(&vmthread->jvmti_thread, (NativeCodePtr)regs->get_ip()); set_exception_object_internal(exn_obj); si_transfer_control(si); assert(!"si_transfer_control should not return"); } unw_m2nf = si_get_m2n(si); //si_free(si); END_RAISE_AREA; set_exception_object_internal(exn_obj); m2n_set_last_frame(unw_m2nf); #endif } //exn_athrow_regs
// function can be safe point & should be called with disable reqursion = 1 void exn_throw_for_JIT(ManagedObject* exn_obj, Class_Handle exn_class, Method_Handle exn_constr, U_8* jit_exn_constr_args, jvalue* vm_exn_constr_args) { /* * !!!! NO LOGGER IS ALLOWED IN THIS FUNCTION !!! * !!!! RELEASE BUILD WILL BE BROKEN !!! * !!!! NO TRACE2, INFO, WARN, ECHO, ASSERT, ... */ assert(!hythread_is_suspend_enabled()); if(exn_raised()) { return; } ASSERT_NO_INTERPRETER ASSERT_RAISE_AREA; if ((exn_obj == NULL) && (exn_class == NULL)) { exn_class = VM_Global_State::loader_env->java_lang_NullPointerException_Class; } ManagedObject* local_exn_obj = exn_obj; StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); si_fill_from_native(si); if (exn_raised()) { return; } #ifndef _IPF_ assert(is_gc_frame_before_m2n_frame()); #endif // _IPF_ assert(!exn_raised()); if (si_is_past_end(si)) { //FIXME LAZY EXCEPTION (2006.05.12) // should be replaced by lazy version set_exception_object_internal(local_exn_obj); return; } si_transfer_all_preserved_registers(si); assert(!exn_raised()); DebugUtilsTI* ti = VM_Global_State::loader_env->TI; exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); if (exn_raised()) { //si_free(si); return; } M2nFrame* m2nFrame = m2n_get_last_frame(); ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(m2nFrame); if (last_m2n_frame_handles) { free_local_object_handles2(last_m2n_frame_handles); } set_exception_object_internal(exn_obj); if (ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_EXCEPTION_EVENT)) { Registers regs = {0}; VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) jvmti_exception_catch_callback; si_copy_to_registers(si, ®s); vm_set_exception_registers(thread, regs); si_set_callback(si, &callback); } else if (p_TLS_vmthread->restore_guard_page) { Registers regs = {0}; VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) exception_catch_callback; si_copy_to_registers(si, ®s); vm_set_exception_registers(thread, regs); si_set_callback(si, &callback); } // don't put any call here si_transfer_control(si); } //exn_throw_for_JIT
// function can be safe point & should be called with disable recursion = 1 static ManagedObject * exn_propagate_exception( StackIterator * si, ManagedObject ** exn_obj, Class_Handle exn_class, Method_Handle exn_constr, U_8 * jit_exn_constr_args, jvalue* vm_exn_constr_args) { assert(!hythread_is_suspend_enabled()); ASSERT_RAISE_AREA; ASSERT_NO_INTERPRETER; assert(*exn_obj || exn_class); // Save the throw context StackIterator *throw_si = (StackIterator*) STD_ALLOCA(si_size()); memcpy(throw_si, si, si_size()); // Skip first frame if it is an M2nFrame (which is always a transition from managed to the throw code). // The M2nFrame will be removed from the thread's M2nFrame list but transfer control or copy to registers. if (si_is_native(si)) { si_goto_previous(si); } Method *interrupted_method; NativeCodePtr interrupted_method_location; JIT *interrupted_method_jit; bool restore_guard_page = p_TLS_vmthread->restore_guard_page; if (!si_is_native(si)) { CodeChunkInfo *interrupted_cci = si_get_code_chunk_info(si); assert(interrupted_cci); interrupted_method = interrupted_cci->get_method(); interrupted_method_location = si_get_ip(si); interrupted_method_jit = interrupted_cci->get_jit(); } else { interrupted_method = m2n_get_method(si_get_m2n(si)); interrupted_method_location = 0; interrupted_method_jit = 0; } if (NULL != *exn_obj) { // Gregory - When *exn_obj is NULL it means we're called from exn_athrow_regs // which means that IP points exactly to the right location. But // when *exn_obj is not NULL, it means that we're called from exn_throw_for_JIT // where *exn_obj is already constructed and is thrown by code via athrow. // So in this case IP reported by stack iterator is past the athrow bytecode // and should be moved back to be inside of bytecode location for interrupted // method. interrupted_method_location = (NativeCodePtr)((POINTER_SIZE_INT)interrupted_method_location - 1); // Determine the type of the exception for the type tests below. exn_class = (*exn_obj)->vt()->clss; } #ifdef VM_STATS assert(exn_class); exn_class->class_thrown(); UNSAFE_REGION_START VM_Statistics::get_vm_stats().num_exceptions++; UNSAFE_REGION_END #endif // VM_STATS // Remove single step breakpoints which could have been set on the // exception bytecode DebugUtilsTI *ti = VM_Global_State::loader_env->TI; if (ti->isEnabled() && ti->is_single_step_enabled()) { jvmti_thread_t jvmti_thread = jthread_self_jvmti(); ti->vm_brpt->lock(); if (NULL != jvmti_thread->ss_state) { jvmti_remove_single_step_breakpoints(ti, jvmti_thread); } ti->vm_brpt->unlock(); } // When VM is in shutdown stage we need to execute "finally" clause to // release monitors and propagate an exception to the upper frames. Class_Handle search_exn_class = !VM_Global_State::loader_env->IsVmShutdowning() ? exn_class : VM_Global_State::loader_env->JavaLangObject_Class; if (!si_is_native(si)) { bool same_frame = true; while (!si_is_past_end(si) && !si_is_native(si)) { CodeChunkInfo *cci = si_get_code_chunk_info(si); assert(cci); Method *method = cci->get_method(); JIT *jit = cci->get_jit(); assert(method && jit); NativeCodePtr ip = si_get_ip(si); bool is_ip_past = !!si_get_jit_context(si)->is_ip_past; #ifdef VM_STATS cci->num_throws++; #endif // VM_STATS // Examine this frame's exception handlers looking for a match unsigned num_handlers = cci->get_num_target_exception_handlers(); for (unsigned i = 0; i < num_handlers; i++) { Target_Exception_Handler_Ptr handler = cci->get_target_exception_handler_info(i); if (!handler) continue; if (handler->is_in_range(ip, is_ip_past) && handler->is_assignable(search_exn_class)) { // Found a handler that catches the exception. #ifdef VM_STATS cci->num_catches++; if (same_frame) { VM_Statistics::get_vm_stats().num_exceptions_caught_same_frame++; } if (handler->is_exc_obj_dead()) { VM_Statistics::get_vm_stats().num_exceptions_dead_object++; if (!*exn_obj) { VM_Statistics::get_vm_stats().num_exceptions_object_not_created++; } } #endif // VM_STATS if (restore_guard_page) { bool res = check_stack_size_enough_for_exception_catch(si_get_sp(si)); //must always be enough. otherwise program behavior is unspecified: finally blocks, monitor exits are not executed assert(res); if (!res) { break; } } // Setup handler context jit->fix_handler_context(method, si_get_jit_context(si)); si_set_ip(si, handler->get_handler_ip(), false); // Start single step in exception handler if (ti->isEnabled() && ti->is_single_step_enabled()) { jvmti_thread_t jvmti_thread = jthread_self_jvmti(); ti->vm_brpt->lock(); if (NULL != jvmti_thread->ss_state) { uint16 bc; NativeCodePtr ip = handler->get_handler_ip(); OpenExeJpdaError UNREF result = jit->get_bc_location_for_native(method, ip, &bc); assert(EXE_ERROR_NONE == result); jvmti_StepLocation method_start = {(Method *)method, ip, bc, false}; jvmti_set_single_step_breakpoints(ti, jvmti_thread, &method_start, 1); } ti->vm_brpt->unlock(); } // Create exception if necessary if (!*exn_obj && !handler->is_exc_obj_dead()) { assert(!exn_raised()); *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } if (jvmti_is_exception_event_requested()) { // Create exception if necessary if (NULL == *exn_obj) { *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } // Reload exception object pointer because it could have // moved while calling JVMTI callback *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj, interrupted_method_jit, interrupted_method, interrupted_method_location, jit, method, handler->get_handler_ip()); } CTRACE(("setting return pointer to %d", exn_obj)); si_set_return_pointer(si, (void **) exn_obj); //si_free(throw_si); return NULL; } } // No appropriate handler found, undo synchronization vm_monitor_exit_synchronized_method(si); jvalue ret_val = {(jlong)0}; jvmti_process_method_exception_exit_event( reinterpret_cast<jmethodID>(method), JNI_TRUE, ret_val, si); // Goto previous frame si_goto_previous(si); same_frame = false; } } // Exception propagates to the native code assert(si_is_native(si)); // The current thread exception is set to the exception and we return 0/NULL to the native code if (*exn_obj == NULL) { *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } assert(!hythread_is_suspend_enabled()); CodeChunkInfo *catch_cci = si_get_code_chunk_info(si); Method *catch_method = NULL; if (catch_cci) catch_method = catch_cci->get_method(); // Reload exception object pointer because it could have // moved while calling JVMTI callback if (exn_raised()) { //si_free(throw_si); return NULL; } *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj, interrupted_method_jit, interrupted_method, interrupted_method_location, NULL, NULL, NULL); //si_free(throw_si); return *exn_obj; } //exn_propagate_exception