NativeCodePtr compile_me(Method* method) { ASSERT_RAISE_AREA; ASSERT_NO_INTERPRETER; TRACE("compile_me " << method); GcFrame gc; compile_protect_arguments(method, &gc); if (exn_raised()) { return NULL; } tmn_suspend_enable(); if (method->is_abstract()) { compile_raise_exception("java/lang/AbstractMethodError", "", method); tmn_suspend_disable(); return NULL; } DebugUtilsTI *ti = VM_Global_State::loader_env->TI; JIT_Result res = compile_do_compilation(method); if (res != JIT_SUCCESS) { INFO2("compile", "Cannot compile " << method); if (!exn_raised()) { compile_raise_exception("java/lang/InternalError", "Cannot compile ", method); } tmn_suspend_disable(); return NULL; } tmn_suspend_disable(); NativeCodePtr entry_point = method->get_code_addr(); INFO2("compile.code", "Compiled method " << method << ", entry " << entry_point); if (method->get_pending_breakpoints() != 0) jvmti_set_pending_breakpoints(method); if(ti->isEnabled() && ti->is_single_step_enabled() && !method->is_native()) { jvmti_thread_t jvmti_thread = jthread_self_jvmti(); assert(jvmti_thread); jvmti_set_single_step_breakpoints_for_method(ti, jvmti_thread, method); } return entry_point; } // compile_me
// Implementation note: don't use l2 (use l3, l4 instead if required) since its // space can be used in case of 64-bit return value. NativeCodePtr compile_create_lil_jni_stub(Method_Handle method, void* func, NativeStubOverride nso) { ASSERT_NO_INTERPRETER; const Class_Handle clss = method->get_class(); bool is_static = method->is_static(); bool is_synchronised = method->is_synchronized(); Method_Signature_Handle msh = method_get_signature(method); unsigned num_args = method->get_num_args(); Type_Info_Handle ret_tih = method_ret_type_get_type_info(msh); VM_Data_Type ret_type = type_info_get_type(ret_tih); unsigned i; unsigned num_ref_args = 0; // among original args, does not include jclass for static methods for(i=0; i<num_args; i++) if (is_reference(method_args_get_type_info(msh, i))) num_ref_args++; //***** Part 1: Entry, Stats, Override, push m2n, allocate space for handles LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:%0m;", method); assert(cs); // Increment stats (total number of calls) #ifdef VM_STATS cs = lil_parse_onto_end(cs, "inc [%0i:pint];", &((Method*)method)->num_accesses); assert(cs); #endif //VM_STATS // Do stub override here if (nso) cs = nso(cs, method); assert(cs); // Increment stats (number of nonoverridden calls) #ifdef VM_STATS cs = lil_parse_onto_end(cs, "inc [%0i:pint];", &((Method*)method)->num_slow_accesses); assert(cs); #endif // Push M2nFrame cs = lil_parse_onto_end(cs, "push_m2n %0i, %1i, handles; locals 3;", method, (POINTER_SIZE_INT)FRAME_JNI); assert(cs); // Allocate space for handles unsigned number_of_object_handles = num_ref_args + (is_static ? 1 : 0); cs = oh_gen_allocate_handles(cs, number_of_object_handles, "l0", "l1"); assert(cs); //***** Part 2: Initialize object handles if (is_static) { void *jlc = clss->get_class_handle(); cs = lil_parse_onto_end(cs, //"ld l1,[%0i:pint];" "ld l1,[%0i:ref];", jlc); assert(cs); cs = oh_gen_init_handle(cs, "l0", 0, "l1", false); assert(cs); } else { cs = oh_gen_init_handle(cs, "l0", 0, "i0", true); } // The remaining handles are for the proper arguments (not including this) // Loop over the arguments, skipping 0th argument for instance methods. If argument is a reference, generate code unsigned hn = 1; for(i=(is_static?0:1); i<num_args; i++) { if (is_reference(method_args_get_type_info(msh, i))) { char buf[20]; sprintf(buf, "i%d", i); cs = oh_gen_init_handle(cs, "l0", hn, buf, true); assert(cs); hn++; } } //***** Part 3: Synchronize if (is_synchronised) { if (is_static) { cs = lil_parse_onto_end(cs, "out stdcall:pint:pint;" "o0=%0i;" "call %1i;" "out stdcall:pint:void;" "o0=r;" "call %2i;", clss, lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER))); assert(cs); } else { cs = lil_parse_onto_end(cs, "out stdcall:ref:void;" "o0=i0;" "call %0i;", lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER))); assert(cs); } } //***** Call JVMTI MethodEntry DebugUtilsTI* ti = VM_Global_State::loader_env->TI; if (ti->isEnabled() && ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_ENTRY)) { cs = lil_parse_onto_end(cs, "out platform:pint:void;" "o0=%0i:pint;" "call %1i;", (jmethodID)method, jvmti_process_method_entry_event); assert(cs); } //***** Part 4: Enable GC cs = lil_parse_onto_end(cs, "out platform::void;" "call %0i;", hythread_suspend_enable); assert(cs); //***** Part 5: Set up arguments // Setup outputs, set JNIEnv, set class/this handle cs = lil_parse_onto_end(cs, "out jni:%0j;" "l1=ts;" "ld o0,[l1 + %1i:pint];" "o1=l0+%2i;", method, (POINTER_SIZE_INT)APR_OFFSETOF(VM_thread, jni_env), oh_get_handle_offset(0)); assert(cs); // Loop over arguments proper, setting rest of outputs unsigned int arg_base = 1 + (is_static ? 1 : 0); hn = 1; for(i=(is_static?0:1); i<num_args; i++) { if (is_reference(method_args_get_type_info(msh, i))) { POINTER_SIZE_INT handle_offset = oh_get_handle_offset(hn); REFS_RUNTIME_SWITCH_IF #ifdef REFS_RUNTIME_OR_COMPRESSED cs = lil_parse_onto_end(cs, "jc i%0i=%1i:ref,%n;" "o%2i=l0+%3i;" "j %o;" ":%g;" "o%4i=0;" ":%g;", i, VM_Global_State::loader_env->managed_null, arg_base+i, handle_offset, arg_base+i); #endif // REFS_RUNTIME_OR_COMPRESSED REFS_RUNTIME_SWITCH_ELSE #ifdef REFS_RUNTIME_OR_UNCOMPRESSED cs = lil_parse_onto_end(cs, "jc i%0i=0:ref,%n;" "o%1i=l0+%2i;" "j %o;" ":%g;" "o%3i=0;" ":%g;", i, arg_base+i, handle_offset, arg_base+i); #endif // REFS_RUNTIME_OR_UNCOMPRESSED REFS_RUNTIME_SWITCH_ENDIF hn++; } else { cs = lil_parse_onto_end(cs, "o%0i=i%1i;", arg_base+i, i); } assert(cs); } //***** Part 6: Call cs = lil_parse_onto_end(cs, "call %0i;", func); assert(cs); //***** Part 7: Save return, widening if necessary switch (ret_type) { case VM_DATA_TYPE_VOID: break; case VM_DATA_TYPE_INT32: cs = lil_parse_onto_end(cs, "l1=r;"); break; case VM_DATA_TYPE_BOOLEAN: cs = lil_parse_onto_end(cs, "l1=zx1 r;"); break; case VM_DATA_TYPE_INT16: cs = lil_parse_onto_end(cs, "l1=sx2 r;"); break; case VM_DATA_TYPE_INT8: cs = lil_parse_onto_end(cs, "l1=sx1 r;"); break; case VM_DATA_TYPE_CHAR: cs = lil_parse_onto_end(cs, "l1=zx2 r;"); break; default: cs = lil_parse_onto_end(cs, "l1=r;"); break; } assert(cs); //***** Part 8: Disable GC cs = lil_parse_onto_end(cs, "out platform::void;" "call %0i;", hythread_suspend_disable); assert(cs); // Exception offsets POINTER_SIZE_INT eoo = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_object; POINTER_SIZE_INT eco = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_class; //***** Call JVMTI MethodExit if (ti->isEnabled() && ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_EXIT)) { cs = lil_parse_onto_end(cs, "out platform:pint,g1,g8:void;" "l2=ts;" "ld l2,[l2+%0i:ref];" "jc l2!=0,_mex_exn_raised;" "l2=ts;" "ld l2,[l2+%1i:ref];" "jc l2!=0,_mex_exn_raised;" "o1=%2i:g1;" "o2=l1:g8;" "j _mex_exn_cont;" ":_mex_exn_raised;" "o1=%3i:g1;" "o2=0:g8;" ":_mex_exn_cont;" "o0=%4i:pint;" "call %5i;", eoo, eco, (POINTER_SIZE_INT)JNI_FALSE, (POINTER_SIZE_INT)JNI_TRUE, (jmethodID)method, jvmti_process_method_exit_event); assert(cs); } //***** Part 9: Synchronize if (is_synchronised) { if (is_static) { cs = lil_parse_onto_end(cs, "out stdcall:pint:pint;" "o0=%0i;" "call %1i;" "out stdcall:pint:void;" "o0=r;" "call %2i;", clss, lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT))); } else { cs = lil_parse_onto_end(cs, "ld l0,[l0+%0i:ref];" "out stdcall:ref:void; o0=l0; call %1i;", oh_get_handle_offset(0), lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT))); } assert(cs); } //***** Part 10: Unhandle the return if it is a reference if (is_reference(ret_tih)) { cs = lil_parse_onto_end(cs, "jc l1=0,ret_done;" "ld l1,[l1+0:ref];" ":ret_done;"); #ifdef REFS_RUNTIME_OR_COMPRESSED REFS_RUNTIME_SWITCH_IF cs = lil_parse_onto_end(cs, "jc l1!=0,done_translating_ret;" "l1=%0i:ref;" ":done_translating_ret;", VM_Global_State::loader_env->managed_null); REFS_RUNTIME_SWITCH_ENDIF #endif // REFS_RUNTIME_OR_UNCOMPRESSED assert(cs); } //***** Part 11: Rethrow exception cs = lil_parse_onto_end(cs, "l0=ts;" "ld l2,[l0+%0i:ref];" "jc l2!=0,_exn_raised;" "ld l2,[l0+%1i:ref];" "jc l2=0,_no_exn;" ":_exn_raised;" "m2n_save_all;" "out platform::void;" "call.noret %2i;" ":_no_exn;", eoo, eco, exn_rethrow); assert(cs); //***** Part 12: Restore return variable, pop_m2n, return if (ret_type != VM_DATA_TYPE_VOID) { cs = lil_parse_onto_end(cs, "r=l1;"); assert(cs); } cs = lil_parse_onto_end(cs, "pop_m2n;" "ret;"); assert(cs); //***** Now generate code assert(lil_is_valid(cs)); NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->get_class_loader()->GetCodePool()); #ifndef NDEBUG char buf[100]; apr_snprintf(buf, sizeof(buf)-1, "jni_stub.%s::%s", clss->get_name()->bytes, method->get_name()->bytes); DUMP_STUB(addr, buf, lil_cs_get_code_size(cs)); #endif #ifdef VM_STATS VM_Statistics::get_vm_stats().jni_stub_bytes += lil_cs_get_code_size(cs); #endif lil_free_code_stub(cs); return addr; } // compile_create_lil_jni_stub
// function can be safe point & should be called with disable recursion = 1 static ManagedObject * exn_propagate_exception( StackIterator * si, ManagedObject ** exn_obj, Class_Handle exn_class, Method_Handle exn_constr, U_8 * jit_exn_constr_args, jvalue* vm_exn_constr_args) { assert(!hythread_is_suspend_enabled()); ASSERT_RAISE_AREA; ASSERT_NO_INTERPRETER; assert(*exn_obj || exn_class); // Save the throw context StackIterator *throw_si = (StackIterator*) STD_ALLOCA(si_size()); memcpy(throw_si, si, si_size()); // Skip first frame if it is an M2nFrame (which is always a transition from managed to the throw code). // The M2nFrame will be removed from the thread's M2nFrame list but transfer control or copy to registers. if (si_is_native(si)) { si_goto_previous(si); } Method *interrupted_method; NativeCodePtr interrupted_method_location; JIT *interrupted_method_jit; bool restore_guard_page = p_TLS_vmthread->restore_guard_page; if (!si_is_native(si)) { CodeChunkInfo *interrupted_cci = si_get_code_chunk_info(si); assert(interrupted_cci); interrupted_method = interrupted_cci->get_method(); interrupted_method_location = si_get_ip(si); interrupted_method_jit = interrupted_cci->get_jit(); } else { interrupted_method = m2n_get_method(si_get_m2n(si)); interrupted_method_location = 0; interrupted_method_jit = 0; } if (NULL != *exn_obj) { // Gregory - When *exn_obj is NULL it means we're called from exn_athrow_regs // which means that IP points exactly to the right location. But // when *exn_obj is not NULL, it means that we're called from exn_throw_for_JIT // where *exn_obj is already constructed and is thrown by code via athrow. // So in this case IP reported by stack iterator is past the athrow bytecode // and should be moved back to be inside of bytecode location for interrupted // method. interrupted_method_location = (NativeCodePtr)((POINTER_SIZE_INT)interrupted_method_location - 1); // Determine the type of the exception for the type tests below. exn_class = (*exn_obj)->vt()->clss; } #ifdef VM_STATS assert(exn_class); exn_class->class_thrown(); UNSAFE_REGION_START VM_Statistics::get_vm_stats().num_exceptions++; UNSAFE_REGION_END #endif // VM_STATS // Remove single step breakpoints which could have been set on the // exception bytecode DebugUtilsTI *ti = VM_Global_State::loader_env->TI; if (ti->isEnabled() && ti->is_single_step_enabled()) { jvmti_thread_t jvmti_thread = jthread_self_jvmti(); ti->vm_brpt->lock(); if (NULL != jvmti_thread->ss_state) { jvmti_remove_single_step_breakpoints(ti, jvmti_thread); } ti->vm_brpt->unlock(); } // When VM is in shutdown stage we need to execute "finally" clause to // release monitors and propagate an exception to the upper frames. Class_Handle search_exn_class = !VM_Global_State::loader_env->IsVmShutdowning() ? exn_class : VM_Global_State::loader_env->JavaLangObject_Class; if (!si_is_native(si)) { bool same_frame = true; while (!si_is_past_end(si) && !si_is_native(si)) { CodeChunkInfo *cci = si_get_code_chunk_info(si); assert(cci); Method *method = cci->get_method(); JIT *jit = cci->get_jit(); assert(method && jit); NativeCodePtr ip = si_get_ip(si); bool is_ip_past = !!si_get_jit_context(si)->is_ip_past; #ifdef VM_STATS cci->num_throws++; #endif // VM_STATS // Examine this frame's exception handlers looking for a match unsigned num_handlers = cci->get_num_target_exception_handlers(); for (unsigned i = 0; i < num_handlers; i++) { Target_Exception_Handler_Ptr handler = cci->get_target_exception_handler_info(i); if (!handler) continue; if (handler->is_in_range(ip, is_ip_past) && handler->is_assignable(search_exn_class)) { // Found a handler that catches the exception. #ifdef VM_STATS cci->num_catches++; if (same_frame) { VM_Statistics::get_vm_stats().num_exceptions_caught_same_frame++; } if (handler->is_exc_obj_dead()) { VM_Statistics::get_vm_stats().num_exceptions_dead_object++; if (!*exn_obj) { VM_Statistics::get_vm_stats().num_exceptions_object_not_created++; } } #endif // VM_STATS if (restore_guard_page) { bool res = check_stack_size_enough_for_exception_catch(si_get_sp(si)); //must always be enough. otherwise program behavior is unspecified: finally blocks, monitor exits are not executed assert(res); if (!res) { break; } } // Setup handler context jit->fix_handler_context(method, si_get_jit_context(si)); si_set_ip(si, handler->get_handler_ip(), false); // Start single step in exception handler if (ti->isEnabled() && ti->is_single_step_enabled()) { jvmti_thread_t jvmti_thread = jthread_self_jvmti(); ti->vm_brpt->lock(); if (NULL != jvmti_thread->ss_state) { uint16 bc; NativeCodePtr ip = handler->get_handler_ip(); OpenExeJpdaError UNREF result = jit->get_bc_location_for_native(method, ip, &bc); assert(EXE_ERROR_NONE == result); jvmti_StepLocation method_start = {(Method *)method, ip, bc, false}; jvmti_set_single_step_breakpoints(ti, jvmti_thread, &method_start, 1); } ti->vm_brpt->unlock(); } // Create exception if necessary if (!*exn_obj && !handler->is_exc_obj_dead()) { assert(!exn_raised()); *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } if (jvmti_is_exception_event_requested()) { // Create exception if necessary if (NULL == *exn_obj) { *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } // Reload exception object pointer because it could have // moved while calling JVMTI callback *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj, interrupted_method_jit, interrupted_method, interrupted_method_location, jit, method, handler->get_handler_ip()); } CTRACE(("setting return pointer to %d", exn_obj)); si_set_return_pointer(si, (void **) exn_obj); //si_free(throw_si); return NULL; } } // No appropriate handler found, undo synchronization vm_monitor_exit_synchronized_method(si); jvalue ret_val = {(jlong)0}; jvmti_process_method_exception_exit_event( reinterpret_cast<jmethodID>(method), JNI_TRUE, ret_val, si); // Goto previous frame si_goto_previous(si); same_frame = false; } } // Exception propagates to the native code assert(si_is_native(si)); // The current thread exception is set to the exception and we return 0/NULL to the native code if (*exn_obj == NULL) { *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } assert(!hythread_is_suspend_enabled()); CodeChunkInfo *catch_cci = si_get_code_chunk_info(si); Method *catch_method = NULL; if (catch_cci) catch_method = catch_cci->get_method(); // Reload exception object pointer because it could have // moved while calling JVMTI callback if (exn_raised()) { //si_free(throw_si); return NULL; } *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj, interrupted_method_jit, interrupted_method, interrupted_method_location, NULL, NULL, NULL); //si_free(throw_si); return *exn_obj; } //exn_propagate_exception