// function can be safe point & should be called with disable reqursion = 1 void exn_athrow_regs(Registers * regs, Class_Handle exn_class, bool java_code, bool transfer_control) { assert(!hythread_is_suspend_enabled()); assert(exn_class); #ifndef _IPF_ M2nFrame *cur_m2nf = (M2nFrame *) STD_ALLOCA(m2n_get_size()); M2nFrame *unw_m2nf; ManagedObject *exn_obj = NULL; StackIterator *si; DebugUtilsTI* ti = VM_Global_State::loader_env->TI; VM_thread* vmthread = p_TLS_vmthread; if (java_code) m2n_push_suspended_frame(vmthread, cur_m2nf, regs); else // Gregory - // Initialize cur_m2nf pointer in case we've crashed in native code that is unwindable, // e.g. in the code that sets non-unwindable state for the native code area cur_m2nf = m2n_get_last_frame(); BEGIN_RAISE_AREA; si = (StackIterator*) STD_ALLOCA(si_size()); si_fill_from_native(si); ManagedObject *local_exn_obj = NULL; exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, NULL, NULL, NULL); //free local handles ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(cur_m2nf); if (last_m2n_frame_handles) { free_local_object_handles2(last_m2n_frame_handles); } if (ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_EXCEPTION_EVENT)) { VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) jvmti_exception_catch_callback; si_copy_to_registers(si, regs); vm_set_exception_registers(thread, *regs); si_set_callback(si, &callback); } else if (p_TLS_vmthread->restore_guard_page) { VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) exception_catch_callback; si_copy_to_registers(si, regs); vm_set_exception_registers(thread, *regs); si_set_callback(si, &callback); } si_copy_to_registers(si, regs); if (transfer_control) { // Let NCAI to continue single stepping in exception handler ncai_setup_signal_step(&vmthread->jvmti_thread, (NativeCodePtr)regs->get_ip()); set_exception_object_internal(exn_obj); si_transfer_control(si); assert(!"si_transfer_control should not return"); } unw_m2nf = si_get_m2n(si); //si_free(si); END_RAISE_AREA; set_exception_object_internal(exn_obj); m2n_set_last_frame(unw_m2nf); #endif } //exn_athrow_regs
// Implementation note: don't use l2 (use l3, l4 instead if required) since its // space can be used in case of 64-bit return value. NativeCodePtr compile_create_lil_jni_stub(Method_Handle method, void* func, NativeStubOverride nso) { ASSERT_NO_INTERPRETER; const Class_Handle clss = method->get_class(); bool is_static = method->is_static(); bool is_synchronised = method->is_synchronized(); Method_Signature_Handle msh = method_get_signature(method); unsigned num_args = method->get_num_args(); Type_Info_Handle ret_tih = method_ret_type_get_type_info(msh); VM_Data_Type ret_type = type_info_get_type(ret_tih); unsigned i; unsigned num_ref_args = 0; // among original args, does not include jclass for static methods for(i=0; i<num_args; i++) if (is_reference(method_args_get_type_info(msh, i))) num_ref_args++; //***** Part 1: Entry, Stats, Override, push m2n, allocate space for handles LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:%0m;", method); assert(cs); // Increment stats (total number of calls) #ifdef VM_STATS cs = lil_parse_onto_end(cs, "inc [%0i:pint];", &((Method*)method)->num_accesses); assert(cs); #endif //VM_STATS // Do stub override here if (nso) cs = nso(cs, method); assert(cs); // Increment stats (number of nonoverridden calls) #ifdef VM_STATS cs = lil_parse_onto_end(cs, "inc [%0i:pint];", &((Method*)method)->num_slow_accesses); assert(cs); #endif // Push M2nFrame cs = lil_parse_onto_end(cs, "push_m2n %0i, %1i, handles; locals 3;", method, (POINTER_SIZE_INT)FRAME_JNI); assert(cs); // Allocate space for handles unsigned number_of_object_handles = num_ref_args + (is_static ? 1 : 0); cs = oh_gen_allocate_handles(cs, number_of_object_handles, "l0", "l1"); assert(cs); //***** Part 2: Initialize object handles if (is_static) { void *jlc = clss->get_class_handle(); cs = lil_parse_onto_end(cs, //"ld l1,[%0i:pint];" "ld l1,[%0i:ref];", jlc); assert(cs); cs = oh_gen_init_handle(cs, "l0", 0, "l1", false); assert(cs); } else { cs = oh_gen_init_handle(cs, "l0", 0, "i0", true); } // The remaining handles are for the proper arguments (not including this) // Loop over the arguments, skipping 0th argument for instance methods. If argument is a reference, generate code unsigned hn = 1; for(i=(is_static?0:1); i<num_args; i++) { if (is_reference(method_args_get_type_info(msh, i))) { char buf[20]; sprintf(buf, "i%d", i); cs = oh_gen_init_handle(cs, "l0", hn, buf, true); assert(cs); hn++; } } //***** Part 3: Synchronize if (is_synchronised) { if (is_static) { cs = lil_parse_onto_end(cs, "out stdcall:pint:pint;" "o0=%0i;" "call %1i;" "out stdcall:pint:void;" "o0=r;" "call %2i;", clss, lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER))); assert(cs); } else { cs = lil_parse_onto_end(cs, "out stdcall:ref:void;" "o0=i0;" "call %0i;", lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER))); assert(cs); } } //***** Call JVMTI MethodEntry DebugUtilsTI* ti = VM_Global_State::loader_env->TI; if (ti->isEnabled() && ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_ENTRY)) { cs = lil_parse_onto_end(cs, "out platform:pint:void;" "o0=%0i:pint;" "call %1i;", (jmethodID)method, jvmti_process_method_entry_event); assert(cs); } //***** Part 4: Enable GC cs = lil_parse_onto_end(cs, "out platform::void;" "call %0i;", hythread_suspend_enable); assert(cs); //***** Part 5: Set up arguments // Setup outputs, set JNIEnv, set class/this handle cs = lil_parse_onto_end(cs, "out jni:%0j;" "l1=ts;" "ld o0,[l1 + %1i:pint];" "o1=l0+%2i;", method, (POINTER_SIZE_INT)APR_OFFSETOF(VM_thread, jni_env), oh_get_handle_offset(0)); assert(cs); // Loop over arguments proper, setting rest of outputs unsigned int arg_base = 1 + (is_static ? 1 : 0); hn = 1; for(i=(is_static?0:1); i<num_args; i++) { if (is_reference(method_args_get_type_info(msh, i))) { POINTER_SIZE_INT handle_offset = oh_get_handle_offset(hn); REFS_RUNTIME_SWITCH_IF #ifdef REFS_RUNTIME_OR_COMPRESSED cs = lil_parse_onto_end(cs, "jc i%0i=%1i:ref,%n;" "o%2i=l0+%3i;" "j %o;" ":%g;" "o%4i=0;" ":%g;", i, VM_Global_State::loader_env->managed_null, arg_base+i, handle_offset, arg_base+i); #endif // REFS_RUNTIME_OR_COMPRESSED REFS_RUNTIME_SWITCH_ELSE #ifdef REFS_RUNTIME_OR_UNCOMPRESSED cs = lil_parse_onto_end(cs, "jc i%0i=0:ref,%n;" "o%1i=l0+%2i;" "j %o;" ":%g;" "o%3i=0;" ":%g;", i, arg_base+i, handle_offset, arg_base+i); #endif // REFS_RUNTIME_OR_UNCOMPRESSED REFS_RUNTIME_SWITCH_ENDIF hn++; } else { cs = lil_parse_onto_end(cs, "o%0i=i%1i;", arg_base+i, i); } assert(cs); } //***** Part 6: Call cs = lil_parse_onto_end(cs, "call %0i;", func); assert(cs); //***** Part 7: Save return, widening if necessary switch (ret_type) { case VM_DATA_TYPE_VOID: break; case VM_DATA_TYPE_INT32: cs = lil_parse_onto_end(cs, "l1=r;"); break; case VM_DATA_TYPE_BOOLEAN: cs = lil_parse_onto_end(cs, "l1=zx1 r;"); break; case VM_DATA_TYPE_INT16: cs = lil_parse_onto_end(cs, "l1=sx2 r;"); break; case VM_DATA_TYPE_INT8: cs = lil_parse_onto_end(cs, "l1=sx1 r;"); break; case VM_DATA_TYPE_CHAR: cs = lil_parse_onto_end(cs, "l1=zx2 r;"); break; default: cs = lil_parse_onto_end(cs, "l1=r;"); break; } assert(cs); //***** Part 8: Disable GC cs = lil_parse_onto_end(cs, "out platform::void;" "call %0i;", hythread_suspend_disable); assert(cs); // Exception offsets POINTER_SIZE_INT eoo = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_object; POINTER_SIZE_INT eco = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_class; //***** Call JVMTI MethodExit if (ti->isEnabled() && ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_EXIT)) { cs = lil_parse_onto_end(cs, "out platform:pint,g1,g8:void;" "l2=ts;" "ld l2,[l2+%0i:ref];" "jc l2!=0,_mex_exn_raised;" "l2=ts;" "ld l2,[l2+%1i:ref];" "jc l2!=0,_mex_exn_raised;" "o1=%2i:g1;" "o2=l1:g8;" "j _mex_exn_cont;" ":_mex_exn_raised;" "o1=%3i:g1;" "o2=0:g8;" ":_mex_exn_cont;" "o0=%4i:pint;" "call %5i;", eoo, eco, (POINTER_SIZE_INT)JNI_FALSE, (POINTER_SIZE_INT)JNI_TRUE, (jmethodID)method, jvmti_process_method_exit_event); assert(cs); } //***** Part 9: Synchronize if (is_synchronised) { if (is_static) { cs = lil_parse_onto_end(cs, "out stdcall:pint:pint;" "o0=%0i;" "call %1i;" "out stdcall:pint:void;" "o0=r;" "call %2i;", clss, lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT))); } else { cs = lil_parse_onto_end(cs, "ld l0,[l0+%0i:ref];" "out stdcall:ref:void; o0=l0; call %1i;", oh_get_handle_offset(0), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT))); } assert(cs); } //***** Part 10: Unhandle the return if it is a reference if (is_reference(ret_tih)) { cs = lil_parse_onto_end(cs, "jc l1=0,ret_done;" "ld l1,[l1+0:ref];" ":ret_done;"); #ifdef REFS_RUNTIME_OR_COMPRESSED REFS_RUNTIME_SWITCH_IF cs = lil_parse_onto_end(cs, "jc l1!=0,done_translating_ret;" "l1=%0i:ref;" ":done_translating_ret;", VM_Global_State::loader_env->managed_null); REFS_RUNTIME_SWITCH_ENDIF #endif // REFS_RUNTIME_OR_UNCOMPRESSED assert(cs); } //***** Part 11: Rethrow exception cs = lil_parse_onto_end(cs, "l0=ts;" "ld l2,[l0+%0i:ref];" "jc l2!=0,_exn_raised;" "ld l2,[l0+%1i:ref];" "jc l2=0,_no_exn;" ":_exn_raised;" "m2n_save_all;" "out platform::void;" "call.noret %2i;" ":_no_exn;", eoo, eco, exn_rethrow); assert(cs); //***** Part 12: Restore return variable, pop_m2n, return if (ret_type != VM_DATA_TYPE_VOID) { cs = lil_parse_onto_end(cs, "r=l1;"); assert(cs); } cs = lil_parse_onto_end(cs, "pop_m2n;" "ret;"); assert(cs); //***** Now generate code assert(lil_is_valid(cs)); NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->get_class_loader()->GetCodePool()); #ifndef NDEBUG char buf[100]; apr_snprintf(buf, sizeof(buf)-1, "jni_stub.%s::%s", clss->get_name()->bytes, method->get_name()->bytes); DUMP_STUB(addr, buf, lil_cs_get_code_size(cs)); #endif #ifdef VM_STATS VM_Statistics::get_vm_stats().jni_stub_bytes += lil_cs_get_code_size(cs); #endif lil_free_code_stub(cs); return addr; } // compile_create_lil_jni_stub
// function can be safe point & should be called with disable reqursion = 1 void exn_throw_for_JIT(ManagedObject* exn_obj, Class_Handle exn_class, Method_Handle exn_constr, U_8* jit_exn_constr_args, jvalue* vm_exn_constr_args) { /* * !!!! NO LOGGER IS ALLOWED IN THIS FUNCTION !!! * !!!! RELEASE BUILD WILL BE BROKEN !!! * !!!! NO TRACE2, INFO, WARN, ECHO, ASSERT, ... */ assert(!hythread_is_suspend_enabled()); if(exn_raised()) { return; } ASSERT_NO_INTERPRETER ASSERT_RAISE_AREA; if ((exn_obj == NULL) && (exn_class == NULL)) { exn_class = VM_Global_State::loader_env->java_lang_NullPointerException_Class; } ManagedObject* local_exn_obj = exn_obj; StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); si_fill_from_native(si); if (exn_raised()) { return; } #ifndef _IPF_ assert(is_gc_frame_before_m2n_frame()); #endif // _IPF_ assert(!exn_raised()); if (si_is_past_end(si)) { //FIXME LAZY EXCEPTION (2006.05.12) // should be replaced by lazy version set_exception_object_internal(local_exn_obj); return; } si_transfer_all_preserved_registers(si); assert(!exn_raised()); DebugUtilsTI* ti = VM_Global_State::loader_env->TI; exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); if (exn_raised()) { //si_free(si); return; } M2nFrame* m2nFrame = m2n_get_last_frame(); ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(m2nFrame); if (last_m2n_frame_handles) { free_local_object_handles2(last_m2n_frame_handles); } set_exception_object_internal(exn_obj); if (ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_EXCEPTION_EVENT)) { Registers regs = {0}; VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) jvmti_exception_catch_callback; si_copy_to_registers(si, ®s); vm_set_exception_registers(thread, regs); si_set_callback(si, &callback); } else if (p_TLS_vmthread->restore_guard_page) { Registers regs = {0}; VM_thread *thread = p_TLS_vmthread; NativeCodePtr callback = (NativeCodePtr) exception_catch_callback; si_copy_to_registers(si, ®s); vm_set_exception_registers(thread, regs); si_set_callback(si, &callback); } // don't put any call here si_transfer_control(si); } //exn_throw_for_JIT