// exception catch support for JVMTI, also restore stack after Stack Overflow Error void jvmti_exception_catch_callback() { Registers regs = {0}; VM_thread *thread = p_TLS_vmthread; assert(thread); if (thread->regs) { regs = *(Registers*)thread->regs; } M2nFrame* m2n = (M2nFrame *) STD_ALLOCA(m2n_get_size()); m2n_push_suspended_frame(thread, m2n, ®s); M2nFrame* prev_m2n = m2n_get_previous_frame(m2n); StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); si_fill_from_registers(si, ®s, false, prev_m2n); // si_create_from_registers uses large stack space, // so guard page restored after its invoke, // but befor ti agent callback invokation, // because it should work on protected page. if (p_TLS_vmthread->restore_guard_page) { int res = port_thread_restore_guard_page(); if (res != 0) { Global_Env *env = VM_Global_State::loader_env; if (si_is_native(si)) { m2n_set_last_frame(prev_m2n); if ((interpreter_enabled() || (!prev_m2n) || (m2n_get_frame_type(prev_m2n) & FRAME_NON_UNWINDABLE))) { exn_raise_by_class(env->java_lang_StackOverflowError_Class); } else { //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } else { //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } p_TLS_vmthread->restore_guard_page = false; } if (!si_is_native(si)) { CodeChunkInfo* catch_cci = si_get_code_chunk_info(si); assert(catch_cci); Method* catch_method = catch_cci->get_method(); NativeCodePtr catch_method_location = si_get_ip(si); JIT* catch_method_jit = catch_cci->get_jit(); ManagedObject** exn_obj = (ManagedObject**) si_get_return_pointer(si); *exn_obj = jvmti_jit_exception_catch_event_callback_call( *exn_obj, catch_method_jit, catch_method, catch_method_location); } si_transfer_control(si); }
// exception catch callback to restore stack after Stack Overflow Error void exception_catch_callback() { Registers regs = {0}; VM_thread *thread = p_TLS_vmthread; assert(thread); if (thread->regs) { regs = *(Registers*)thread->regs; } M2nFrame* m2n = (M2nFrame *) STD_ALLOCA(m2n_get_size()); m2n_push_suspended_frame(thread, m2n, ®s); M2nFrame* prev_m2n = m2n_get_previous_frame(m2n); StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); si_fill_from_registers(si, ®s, false, prev_m2n); // si_create_from_registers uses large stack space, // so guard page restored after its invoke. if (p_TLS_vmthread->restore_guard_page) { int res = port_thread_restore_guard_page(); if (res != 0) { Global_Env *env = VM_Global_State::loader_env; if (si_is_native(si)) { m2n_set_last_frame(prev_m2n); if ((interpreter_enabled() || (!prev_m2n) || (m2n_get_frame_type(prev_m2n) & FRAME_NON_UNWINDABLE))) { exn_raise_by_class(env->java_lang_StackOverflowError_Class); } else { //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } else { //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } p_TLS_vmthread->restore_guard_page = false; } si_transfer_control(si); }
// Goto the managed frame immediately prior to m2nfl static void si_unwind_from_m2n(StackIterator * si) { #ifdef VM_STATS VM_Statistics::get_vm_stats().num_unwind_native_frames_all++; #endif M2nFrame * current_m2n_frame = si->m2n_frame; assert(current_m2n_frame); si->m2n_frame = m2n_get_previous_frame(current_m2n_frame); TRACE2("si", "si_unwind_from_m2n, ip = " << (void*)current_m2n_frame->rip); // Is it a normal M2nFrame or one for suspended managed code? if (m2n_is_suspended_frame(current_m2n_frame)) { // Suspended managed code, rip is at instruction, // rsp & registers are in regs structure TRACE2("si", "si_unwind_from_m2n from suspended managed code, ip = " << (void*)current_m2n_frame->regs->rip); init_context_from_registers(si->jit_frame_context, *current_m2n_frame->regs, false); } else { // Normal M2nFrame, rip is past instruction, // rsp is implicitly address just beyond the frame, // callee saves registers in M2nFrame si->jit_frame_context.rsp = (uint64)((uint64*) m2n_get_frame_base(current_m2n_frame) + 1); si->jit_frame_context.p_rbp = ¤t_m2n_frame->rbp; si->jit_frame_context.p_rip = ¤t_m2n_frame->rip; #ifdef _WIN64 si->jit_frame_context.p_rdi = ¤t_m2n_frame->rdi; si->jit_frame_context.p_rsi = ¤t_m2n_frame->rsi; #endif si->jit_frame_context.p_rbx = ¤t_m2n_frame->rbx; si->jit_frame_context.p_r12 = ¤t_m2n_frame->r12; si->jit_frame_context.p_r13 = ¤t_m2n_frame->r13; si->jit_frame_context.p_r14 = ¤t_m2n_frame->r14; si->jit_frame_context.p_r15 = ¤t_m2n_frame->r15; si->jit_frame_context.is_ip_past = true; } }
void interp_ti_enumerate_root_set_single_thread_on_stack(jvmtiEnv* ti_env, VM_thread *thread) { TRACE2("enumeration", "interp_enumerate_root_set_single_thread_on_stack()"); StackIterator_interp* si; si = interp_si_create_from_native(thread); int i; int depth; DEBUG_GC("\n\nGC enumeration in interpreter stack:\n"); for (depth = 0; !interp_si_is_past_end(si); depth++) { Method* method = (Method*)interp_si_get_method(si); jmethodID method_id = (jmethodID)method; int slot = 0; if (si->This) { vm_ti_enumerate_stack_root(ti_env, (void**)&si->This, si->This, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); DEBUG_GC(" [THIS]: " << si->This); } if (si->exc) { vm_ti_enumerate_stack_root(ti_env, (void**)&si->exc, si->exc, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); DEBUG_GC(" [EXCEPTION]: " << si->exc); } if (method->is_native()) { DEBUG_GC("[METHOD <native>]: " << method); interp_si_goto_previous(si); continue; } DEBUG_GC("[METHOD "<< si->stack.size << " " << (int)si->locals.varNum << "]: " << method); if (si->stack.size) for(i = 0; i <= si->stack.index; i++) { if (si->stack.refs[i] == FLAG_OBJECT) { DEBUG_GC(" Stack[" << i << "] "); REF* ref = &si->stack.data[i].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL"); } else { DEBUG_GC(obj); vm_ti_enumerate_stack_root(ti_env, ref, (Managed_Object_Handle)obj, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); } } } unsigned j; if (si->locals.varNum) for(j = 0; j < si->locals.varNum; j++) { if (si->locals.refs[j] == FLAG_OBJECT) { DEBUG_GC(" Locals[" << j << "] "); REF* ref = &si->locals.vars[j].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL\n"); } else { DEBUG_GC(obj); vm_ti_enumerate_stack_root(ti_env, ref, (Managed_Object_Handle)obj, JVMTI_HEAP_ROOT_STACK_LOCAL, depth, method_id, slot++); } } } MonitorList *ml = si->locked_monitors; while(ml) { vm_ti_enumerate_stack_root(ti_env, &ml->monitor, ml->monitor, JVMTI_HEAP_ROOT_MONITOR, depth, method_id, slot++); ml = ml->next; } interp_si_goto_previous(si); } // enumerate m2n frames M2nFrame *m2n = m2n_get_last_frame(thread); while(m2n) { oh_enumerate_handles(m2n_get_local_handles(m2n)); m2n = m2n_get_previous_frame(m2n); } }
void interp_enumerate_root_set_single_thread_on_stack(VM_thread *thread) { TRACE2("enumeration", "interp_enumerate_root_set_single_thread_on_stack()"); StackIterator_interp* si; si = interp_si_create_from_native(thread); int i; DEBUG_GC("\n\nGC enumeration in interpreter stack:\n"); while(!interp_si_is_past_end(si)) { Method* method = (Method*)interp_si_get_method(si); method = method; if (si->This) { vm_enumerate_root_reference((void**)&si->This, FALSE); DEBUG_GC(" [THIS]: " << si->This); } if (si->exc) { vm_enumerate_root_reference((void**)&si->exc, FALSE); DEBUG_GC(" [EXCEPTION]: " << si->exc); } if (method->is_native()) { DEBUG_GC("[METHOD <native>]: " << method); interp_si_goto_previous(si); continue; } DEBUG_GC("[METHOD "<< si->stack.size << " " << (int)si->locals.varNum << "]: " << method); if (si->stack.size) for(i = 0; i <= si->stack.index; i++) { if (si->stack.refs[i] == FLAG_OBJECT) { DEBUG_GC(" Stack[" << i << "] "); REF* ref = &si->stack.data[i].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL"); } else { DEBUG_GC(obj); vm_enumerate(ref, FALSE); // CHECK!!! can we enumerate uncompressed ref in compressed mode } } } unsigned j; if (si->locals.varNum) for(j = 0; j < si->locals.varNum; j++) { if (si->locals.refs[j] == FLAG_OBJECT) { DEBUG_GC(" Locals[" << j << "] "); REF* ref = &si->locals.vars[j].ref; ManagedObject *obj = UNCOMPRESS_INTERP(*ref); if (obj == 0) { DEBUG_GC("NULL\n"); } else { DEBUG_GC(obj); vm_enumerate(ref, FALSE); // CHECK!!! can we enumerate uncompressed ref in compressed mode } } } MonitorList *ml = si->locked_monitors; while(ml) { vm_enumerate_root_reference((void**)&ml->monitor, FALSE); ml = ml->next; } interp_si_goto_previous(si); } // enumerate m2n frames M2nFrame *m2n = m2n_get_last_frame(thread); while(m2n) { oh_enumerate_handles(m2n_get_local_handles(m2n)); m2n = m2n_get_previous_frame(m2n); } }