/* * mono_arch_find_jit_info: * * See exceptions-amd64.c for docs; */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { int i; gssize regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); for (i = 0; i < 16; ++i) regs [i] = new_ctx->regs [i]; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS, save_locations, MONO_MAX_IREGS, &cfa); for (i = 0; i < 16; ++i) new_ctx->regs [i] = regs [i]; new_ctx->pc = regs [ARMREG_LR]; new_ctx->regs [ARMREG_SP] = (gsize)cfa; if (*lmf && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->sp)) { /* remove any unused lmf */ *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); } /* Clear thumb bit */ new_ctx->pc &= ~1; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; return TRUE; } else if (*lmf) { if (((gsize)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->ip, NULL))) { frame->ji = ji; } else { if (!(*lmf)->method) return FALSE; frame->method = (*lmf)->method; } /* * The LMF is saved at the start of the method using: * ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP) * ARM_PUSH (code, 0x5ff0); * So it stores the register state as it existed at the caller. We need to * produce the register state which existed at the time of the call which * transitioned to native call, so we save the sp/fp/ip in the LMF. */ memcpy (&new_ctx->regs [0], &(*lmf)->iregs [0], sizeof (mgreg_t) * 13); new_ctx->pc = (*lmf)->ip; new_ctx->regs [ARMREG_SP] = (*lmf)->sp; new_ctx->regs [ARMREG_FP] = (*lmf)->fp; /* Clear thumb bit */ new_ctx->pc &= ~1; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * This function is used to gather information from @ctx, and store it in @frame_info. * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf * is modified if needed. * Returns TRUE on success, FALSE otherwise. */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { int i; gpointer ip = MONO_CONTEXT_GET_IP (ctx); mgreg_t regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); for (i = 0; i < MONO_MAX_IREGS; ++i) regs [i] = new_ctx->sc_regs [i]; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS, save_locations, MONO_MAX_IREGS, &cfa); for (i = 0; i < MONO_MAX_IREGS; ++i) new_ctx->sc_regs [i] = regs [i]; new_ctx->sc_pc = regs [mips_ra]; new_ctx->sc_regs [mips_sp] = (mgreg_t)cfa; if (*lmf && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->iregs [mips_sp])) { /* remove any unused lmf */ *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); } /* we substract 8, so that the IP points into the call instruction */ MONO_CONTEXT_SET_IP (new_ctx, new_ctx->sc_pc - 8); /* Sanity check -- we should have made progress here */ g_assert (MONO_CONTEXT_GET_SP (new_ctx) != MONO_CONTEXT_GET_SP (ctx)); return TRUE; } else if (*lmf) { if (((mgreg_t)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if (!(*lmf)->method) { #ifdef DEBUG_EXCEPTIONS g_print ("mono_arch_find_jit_info: bad lmf @ %p\n", (void *) *lmf); #endif return FALSE; } g_assert (((*lmf)->magic == MIPS_LMF_MAGIC1) || ((*lmf)->magic == MIPS_LMF_MAGIC2)); ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL); if (!ji) { // FIXME: This can happen with multiple appdomains (bug #444383) return FALSE; } frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; memcpy (&new_ctx->sc_regs, (*lmf)->iregs, sizeof (gulong) * MONO_SAVED_GREGS); memcpy (&new_ctx->sc_fpregs, (*lmf)->fregs, sizeof (float) * MONO_SAVED_FREGS); MONO_CONTEXT_SET_IP (new_ctx, (*lmf)->eip); /* ensure that we've made progress */ g_assert (new_ctx->sc_pc != ctx->sc_pc); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * See exceptions-amd64.c for docs. */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { gssize regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); regs [X86_EAX] = new_ctx->eax; regs [X86_EBX] = new_ctx->ebx; regs [X86_ECX] = new_ctx->ecx; regs [X86_EDX] = new_ctx->edx; regs [X86_ESP] = new_ctx->esp; regs [X86_EBP] = new_ctx->ebp; regs [X86_ESI] = new_ctx->esi; regs [X86_EDI] = new_ctx->edi; regs [X86_NREG] = new_ctx->eip; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS + 1, save_locations, MONO_MAX_IREGS, &cfa); new_ctx->eax = regs [X86_EAX]; new_ctx->ebx = regs [X86_EBX]; new_ctx->ecx = regs [X86_ECX]; new_ctx->edx = regs [X86_EDX]; new_ctx->esp = regs [X86_ESP]; new_ctx->ebp = regs [X86_EBP]; new_ctx->esi = regs [X86_ESI]; new_ctx->edi = regs [X86_EDI]; new_ctx->eip = regs [X86_NREG]; /* The CFA becomes the new SP value */ new_ctx->esp = (gssize)cfa; /* Adjust IP */ new_ctx->eip --; if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) { /* remove any unused lmf */ *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); } /* Pop arguments off the stack */ /* * FIXME: LLVM doesn't push these, we can't use ji->from_llvm as it describes * the callee. */ #ifndef ENABLE_LLVM if (ji->has_arch_eh_info) new_ctx->esp += mono_jit_info_get_arch_eh_info (ji)->stack_size; #endif return TRUE; } else if (*lmf) { if (((guint64)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL))) { } else { if (!((guint32)((*lmf)->previous_lmf) & 1)) /* Top LMF entry */ return FALSE; g_assert_not_reached (); /* Trampoline lmf frame */ frame->method = (*lmf)->method; } new_ctx->esi = (*lmf)->esi; new_ctx->edi = (*lmf)->edi; new_ctx->ebx = (*lmf)->ebx; new_ctx->ebp = (*lmf)->ebp; new_ctx->eip = (*lmf)->eip; /* Adjust IP */ new_ctx->eip --; frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; /* Check if we are in a trampoline LMF frame */ if ((guint32)((*lmf)->previous_lmf) & 1) { /* lmf->esp is set by the trampoline code */ new_ctx->esp = (*lmf)->esp; /* Pop arguments off the stack */ /* FIXME: Handle the delegate case too ((*lmf)->method == NULL) */ /* FIXME: Handle the IMT/vtable case too */ #if 0 #ifndef ENABLE_LLVM if ((*lmf)->method) { MonoMethod *method = (*lmf)->method; MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (method)->param_count + 1); guint32 stack_to_pop = mono_arch_get_argument_info (NULL, mono_method_signature (method), mono_method_signature (method)->param_count, arg_info); new_ctx->esp += stack_to_pop; } #endif #endif } else /* the lmf is always stored on the stack, so the following * expression points to a stack location which can be used as ESP */ new_ctx->esp = (unsigned long)&((*lmf)->eip); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * This function is used to gather information from @ctx, and store it in @frame_info. * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf * is modified if needed. * Returns TRUE on success, FALSE otherwise. */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { mgreg_t regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); frame->unwind_info = unwind_info; frame->unwind_info_len = unwind_info_len; regs [AMD64_RAX] = new_ctx->rax; regs [AMD64_RBX] = new_ctx->rbx; regs [AMD64_RCX] = new_ctx->rcx; regs [AMD64_RDX] = new_ctx->rdx; regs [AMD64_RBP] = new_ctx->rbp; regs [AMD64_RSP] = new_ctx->rsp; regs [AMD64_RSI] = new_ctx->rsi; regs [AMD64_RDI] = new_ctx->rdi; regs [AMD64_RIP] = new_ctx->rip; regs [AMD64_R12] = new_ctx->r12; regs [AMD64_R13] = new_ctx->r13; regs [AMD64_R14] = new_ctx->r14; regs [AMD64_R15] = new_ctx->r15; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS + 1, save_locations, MONO_MAX_IREGS, &cfa); new_ctx->rax = regs [AMD64_RAX]; new_ctx->rbx = regs [AMD64_RBX]; new_ctx->rcx = regs [AMD64_RCX]; new_ctx->rdx = regs [AMD64_RDX]; new_ctx->rbp = regs [AMD64_RBP]; new_ctx->rsp = regs [AMD64_RSP]; new_ctx->rsi = regs [AMD64_RSI]; new_ctx->rdi = regs [AMD64_RDI]; new_ctx->rip = regs [AMD64_RIP]; new_ctx->r12 = regs [AMD64_R12]; new_ctx->r13 = regs [AMD64_R13]; new_ctx->r14 = regs [AMD64_R14]; new_ctx->r15 = regs [AMD64_R15]; /* The CFA becomes the new SP value */ new_ctx->rsp = (mgreg_t)cfa; /* Adjust IP */ new_ctx->rip --; if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { /* remove any unused lmf */ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); } #ifndef MONO_AMD64_NO_PUSHES /* Pop arguments off the stack */ if (ji->has_arch_eh_info) new_ctx->rsp += mono_jit_info_get_arch_eh_info (ji)->stack_size; #endif return TRUE; } else if (*lmf) { guint64 rip; if (((guint64)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if (((guint64)(*lmf)->previous_lmf) & 1) { /* This LMF has the rip field set */ rip = (*lmf)->rip; } else if ((*lmf)->rsp == 0) { /* Top LMF entry */ return FALSE; } else { /* * The rsp field is set just before the call which transitioned to native * code. Obtain the rip from the stack. */ rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t)); } ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL); /* * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the * return address. */ //g_assert (ji); if (!ji) return FALSE; /* Adjust IP */ rip --; frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; new_ctx->rip = rip; new_ctx->rbp = (*lmf)->rbp; new_ctx->rsp = (*lmf)->rsp; new_ctx->rbx = (*lmf)->rbx; new_ctx->r12 = (*lmf)->r12; new_ctx->r13 = (*lmf)->r13; new_ctx->r14 = (*lmf)->r14; new_ctx->r15 = (*lmf)->r15; #ifdef TARGET_WIN32 new_ctx->rdi = (*lmf)->rdi; new_ctx->rsi = (*lmf)->rsi; #endif *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }