/* * mono_arch_unwind_frame: * * See exceptions-amd64.c for docs; */ gboolean mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { mgreg_t regs [MONO_MAX_IREGS + 8 + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len); memcpy (regs, &new_ctx->regs, sizeof (mgreg_t) * 32); /* v8..v15 are callee saved */ memcpy (regs + MONO_MAX_IREGS, &(new_ctx->fregs [8]), sizeof (mgreg_t) * 8); mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, NULL, regs, MONO_MAX_IREGS + 8, save_locations, MONO_MAX_IREGS, &cfa); memcpy (&new_ctx->regs, regs, sizeof (mgreg_t) * 32); memcpy (&(new_ctx->fregs [8]), regs + MONO_MAX_IREGS, sizeof (mgreg_t) * 8); new_ctx->pc = regs [ARMREG_LR]; new_ctx->regs [ARMREG_SP] = (mgreg_t)cfa; if (*lmf && (*lmf)->gregs [MONO_ARCH_LMF_REG_SP] && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->gregs [MONO_ARCH_LMF_REG_SP])) { /* remove any unused lmf */ *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); } /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; return TRUE; } else if (*lmf) { if (((gsize)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->pc, NULL); if (!ji) return FALSE; g_assert (MONO_ARCH_LMF_REGS == ((0x3ff << 19) | (1 << ARMREG_FP) | (1 << ARMREG_SP))); memcpy (&new_ctx->regs [ARMREG_R19], &(*lmf)->gregs [0], sizeof (mgreg_t) * 10); new_ctx->regs [ARMREG_FP] = (*lmf)->gregs [MONO_ARCH_LMF_REG_FP]; new_ctx->regs [ARMREG_SP] = (*lmf)->gregs [MONO_ARCH_LMF_REG_SP]; new_ctx->pc = (*lmf)->pc; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * See exceptions-amd64.c for docs; */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { int i; gssize regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); for (i = 0; i < 16; ++i) regs [i] = new_ctx->regs [i]; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS, save_locations, MONO_MAX_IREGS, &cfa); for (i = 0; i < 16; ++i) new_ctx->regs [i] = regs [i]; new_ctx->pc = regs [ARMREG_LR]; new_ctx->regs [ARMREG_SP] = (gsize)cfa; if (*lmf && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->sp)) { /* remove any unused lmf */ *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); } /* Clear thumb bit */ new_ctx->pc &= ~1; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; return TRUE; } else if (*lmf) { if (((gsize)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->ip, NULL))) { frame->ji = ji; } else { if (!(*lmf)->method) return FALSE; frame->method = (*lmf)->method; } /* * The LMF is saved at the start of the method using: * ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP) * ARM_PUSH (code, 0x5ff0); * So it stores the register state as it existed at the caller. We need to * produce the register state which existed at the time of the call which * transitioned to native call, so we save the sp/fp/ip in the LMF. */ memcpy (&new_ctx->regs [0], &(*lmf)->iregs [0], sizeof (mgreg_t) * 13); new_ctx->pc = (*lmf)->ip; new_ctx->regs [ARMREG_SP] = (*lmf)->sp; new_ctx->regs [ARMREG_FP] = (*lmf)->fp; /* Clear thumb bit */ new_ctx->pc &= ~1; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_unwind_frame: * * See exceptions-amd64.c for docs. */ gboolean mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { gssize regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; if (ji->is_trampoline) frame->type = FRAME_TYPE_TRAMPOLINE; else frame->type = FRAME_TYPE_MANAGED; unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len); regs [X86_EAX] = new_ctx->eax; regs [X86_EBX] = new_ctx->ebx; regs [X86_ECX] = new_ctx->ecx; regs [X86_EDX] = new_ctx->edx; regs [X86_ESP] = new_ctx->esp; regs [X86_EBP] = new_ctx->ebp; regs [X86_ESI] = new_ctx->esi; regs [X86_EDI] = new_ctx->edi; regs [X86_NREG] = new_ctx->eip; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, NULL, regs, MONO_MAX_IREGS + 1, save_locations, MONO_MAX_IREGS, &cfa); new_ctx->eax = regs [X86_EAX]; new_ctx->ebx = regs [X86_EBX]; new_ctx->ecx = regs [X86_ECX]; new_ctx->edx = regs [X86_EDX]; new_ctx->esp = regs [X86_ESP]; new_ctx->ebp = regs [X86_EBP]; new_ctx->esi = regs [X86_ESI]; new_ctx->edi = regs [X86_EDI]; new_ctx->eip = regs [X86_NREG]; /* The CFA becomes the new SP value */ new_ctx->esp = (gssize)cfa; /* Adjust IP */ new_ctx->eip --; return TRUE; } else if (*lmf) { if (((guint64)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL))) { frame->ji = ji; } else { if (!(*lmf)->method) return FALSE; frame->method = (*lmf)->method; } new_ctx->esi = (*lmf)->esi; new_ctx->edi = (*lmf)->edi; new_ctx->ebx = (*lmf)->ebx; new_ctx->ebp = (*lmf)->ebp; new_ctx->eip = (*lmf)->eip; /* Adjust IP */ new_ctx->eip --; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; /* Check if we are in a trampoline LMF frame */ if ((guint32)((*lmf)->previous_lmf) & 1) { /* lmf->esp is set by the trampoline code */ new_ctx->esp = (*lmf)->esp; } else /* the lmf is always stored on the stack, so the following * expression points to a stack location which can be used as ESP */ new_ctx->esp = (unsigned long)&((*lmf)->eip); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_unwind_frame: * * See exceptions-amd64.c for docs; */ gboolean mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { int i; mono_unwind_reg_t regs [MONO_MAX_IREGS + 1 + 8]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; if (ji->is_trampoline) frame->type = FRAME_TYPE_TRAMPOLINE; else frame->type = FRAME_TYPE_MANAGED; unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len); /* printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip); mono_print_unwind_info (unwind_info, unwind_info_len); */ for (i = 0; i < 16; ++i) regs [i] = new_ctx->regs [i]; #ifdef TARGET_IOS /* On IOS, d8..d15 are callee saved. They are mapped to 8..15 in unwind.c */ for (i = 0; i < 8; ++i) regs [MONO_MAX_IREGS + i] = *(guint64*)&(new_ctx->fregs [8 + i]); #endif mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, NULL, regs, MONO_MAX_IREGS + 8, save_locations, MONO_MAX_IREGS, &cfa); for (i = 0; i < 16; ++i) new_ctx->regs [i] = regs [i]; new_ctx->pc = regs [ARMREG_LR]; new_ctx->regs [ARMREG_SP] = (gsize)cfa; #ifdef TARGET_IOS for (i = 0; i < 8; ++i) new_ctx->fregs [8 + i] = *(double*)&(regs [MONO_MAX_IREGS + i]); #endif /* Clear thumb bit */ new_ctx->pc &= ~1; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; return TRUE; } else if (*lmf) { if (((gsize)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->ip, NULL))) { frame->ji = ji; } else { if (!(*lmf)->method) return FALSE; frame->method = (*lmf)->method; } /* * The LMF is saved at the start of the method using: * ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP) * ARM_PUSH (code, 0x5ff0); * So it stores the register state as it existed at the caller. We need to * produce the register state which existed at the time of the call which * transitioned to native call, so we save the sp/fp/ip in the LMF. */ memcpy (&new_ctx->regs [0], &(*lmf)->iregs [0], sizeof (mgreg_t) * 13); new_ctx->pc = (*lmf)->ip; new_ctx->regs [ARMREG_SP] = (*lmf)->sp; new_ctx->regs [ARMREG_FP] = (*lmf)->fp; /* Clear thumb bit */ new_ctx->pc &= ~1; /* we substract 1, so that the IP points into the call instruction */ new_ctx->pc--; *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_unwind_frame: * * This function is used to gather information from @ctx, and store it in @frame_info. * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf * is modified if needed. * Returns TRUE on success, FALSE otherwise. */ gboolean mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); int i; memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { mgreg_t regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; guint8 *epilog = NULL; if (ji->is_trampoline) frame->type = FRAME_TYPE_TRAMPOLINE; else frame->type = FRAME_TYPE_MANAGED; unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len); frame->unwind_info = unwind_info; frame->unwind_info_len = unwind_info_len; /* printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip); mono_print_unwind_info (unwind_info, unwind_info_len); */ /* LLVM compiled code doesn't have this info */ if (ji->has_arch_eh_info) epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji); for (i = 0; i < AMD64_NREG; ++i) regs [i] = new_ctx->gregs [i]; mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start, (guint8*)ji->code_start + ji->code_size, (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1, save_locations, MONO_MAX_IREGS, &cfa); for (i = 0; i < AMD64_NREG; ++i) new_ctx->gregs [i] = regs [i]; /* The CFA becomes the new SP value */ new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa; /* Adjust IP */ new_ctx->gregs [AMD64_RIP] --; return TRUE; } else if (*lmf) { guint64 rip; if (((guint64)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if (((guint64)(*lmf)->previous_lmf) & 4) { MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf); rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx); } else if (((guint64)(*lmf)->previous_lmf) & 1) { /* This LMF has the rip field set */ rip = (*lmf)->rip; } else if ((*lmf)->rsp == 0) { /* Top LMF entry */ return FALSE; } else { /* * The rsp field is set just before the call which transitioned to native * code. Obtain the rip from the stack. */ rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t)); } ji = mini_jit_info_table_find (domain, (char *)rip, NULL); /* * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the * return address. */ //g_assert (ji); if (!ji) return FALSE; frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; if (((guint64)(*lmf)->previous_lmf) & 4) { MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf); /* Trampoline frame */ for (i = 0; i < AMD64_NREG; ++i) new_ctx->gregs [i] = ext->ctx->gregs [i]; /* Adjust IP */ new_ctx->gregs [AMD64_RIP] --; } else { /* * The registers saved in the LMF will be restored using the normal unwind info, * when the wrapper frame is processed. */ /* Adjust IP */ rip --; new_ctx->gregs [AMD64_RIP] = rip; new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp; new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp; for (i = 0; i < AMD64_NREG; ++i) { if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP) new_ctx->gregs [i] = 0; } } *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7); return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * See exceptions-amd64.c for docs. */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { gssize regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); regs [X86_EAX] = new_ctx->eax; regs [X86_EBX] = new_ctx->ebx; regs [X86_ECX] = new_ctx->ecx; regs [X86_EDX] = new_ctx->edx; regs [X86_ESP] = new_ctx->esp; regs [X86_EBP] = new_ctx->ebp; regs [X86_ESI] = new_ctx->esi; regs [X86_EDI] = new_ctx->edi; regs [X86_NREG] = new_ctx->eip; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS + 1, save_locations, MONO_MAX_IREGS, &cfa); new_ctx->eax = regs [X86_EAX]; new_ctx->ebx = regs [X86_EBX]; new_ctx->ecx = regs [X86_ECX]; new_ctx->edx = regs [X86_EDX]; new_ctx->esp = regs [X86_ESP]; new_ctx->ebp = regs [X86_EBP]; new_ctx->esi = regs [X86_ESI]; new_ctx->edi = regs [X86_EDI]; new_ctx->eip = regs [X86_NREG]; /* The CFA becomes the new SP value */ new_ctx->esp = (gssize)cfa; /* Adjust IP */ new_ctx->eip --; if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) { /* remove any unused lmf */ *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); } /* Pop arguments off the stack */ /* * FIXME: LLVM doesn't push these, we can't use ji->from_llvm as it describes * the callee. */ #ifndef ENABLE_LLVM if (ji->has_arch_eh_info) new_ctx->esp += mono_jit_info_get_arch_eh_info (ji)->stack_size; #endif return TRUE; } else if (*lmf) { if (((guint64)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL))) { } else { if (!((guint32)((*lmf)->previous_lmf) & 1)) /* Top LMF entry */ return FALSE; g_assert_not_reached (); /* Trampoline lmf frame */ frame->method = (*lmf)->method; } new_ctx->esi = (*lmf)->esi; new_ctx->edi = (*lmf)->edi; new_ctx->ebx = (*lmf)->ebx; new_ctx->ebp = (*lmf)->ebp; new_ctx->eip = (*lmf)->eip; /* Adjust IP */ new_ctx->eip --; frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; /* Check if we are in a trampoline LMF frame */ if ((guint32)((*lmf)->previous_lmf) & 1) { /* lmf->esp is set by the trampoline code */ new_ctx->esp = (*lmf)->esp; /* Pop arguments off the stack */ /* FIXME: Handle the delegate case too ((*lmf)->method == NULL) */ /* FIXME: Handle the IMT/vtable case too */ #if 0 #ifndef ENABLE_LLVM if ((*lmf)->method) { MonoMethod *method = (*lmf)->method; MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (method)->param_count + 1); guint32 stack_to_pop = mono_arch_get_argument_info (NULL, mono_method_signature (method), mono_method_signature (method)->param_count, arg_info); new_ctx->esp += stack_to_pop; } #endif #endif } else /* the lmf is always stored on the stack, so the following * expression points to a stack location which can be used as ESP */ new_ctx->esp = (unsigned long)&((*lmf)->eip); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_unwind_frame: * * See exceptions-amd64.c for docs. */ gboolean mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); MonoPPCStackFrame *sframe; memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; setup_context (new_ctx); if (ji != NULL) { int i; mgreg_t regs [ppc_lr + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; if (ji->is_trampoline) frame->type = FRAME_TYPE_TRAMPOLINE; else frame->type = FRAME_TYPE_MANAGED; unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len); sframe = (MonoPPCStackFrame*)MONO_CONTEXT_GET_SP (ctx); MONO_CONTEXT_SET_BP (new_ctx, sframe->sp); if (!ji->is_trampoline && jinfo_get_method (ji)->save_lmf) { /* sframe->sp points just past the end of the LMF */ guint8 *lmf_addr = (guint8*)sframe->sp - sizeof (MonoLMF); memcpy (&new_ctx->fregs [MONO_PPC_FIRST_SAVED_FREG], lmf_addr + G_STRUCT_OFFSET (MonoLMF, fregs), sizeof (double) * MONO_SAVED_FREGS); memcpy (&new_ctx->regs [MONO_PPC_FIRST_SAVED_GREG], lmf_addr + G_STRUCT_OFFSET (MonoLMF, iregs), sizeof (mgreg_t) * MONO_SAVED_GREGS); /* the calling IP is in the parent frame */ sframe = (MonoPPCStackFrame*)sframe->sp; /* we substract 4, so that the IP points into the call instruction */ MONO_CONTEXT_SET_IP (new_ctx, sframe->lr - 4); } else { regs [ppc_lr] = ctx->sc_ir; regs [ppc_sp] = ctx->sc_sp; for (i = MONO_PPC_FIRST_SAVED_GREG; i < MONO_MAX_IREGS; ++i) regs [i] = ctx->regs [i]; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, NULL, regs, ppc_lr + 1, save_locations, MONO_MAX_IREGS, &cfa); /* we substract 4, so that the IP points into the call instruction */ MONO_CONTEXT_SET_IP (new_ctx, regs [ppc_lr] - 4); MONO_CONTEXT_SET_BP (new_ctx, cfa); for (i = MONO_PPC_FIRST_SAVED_GREG; i < MONO_MAX_IREGS; ++i) new_ctx->regs [i] = regs [i]; } return TRUE; } else if (*lmf) { if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL))) { } else { if (!(*lmf)->method) return FALSE; /* Trampoline lmf frame */ frame->method = (*lmf)->method; } /*sframe = (MonoPPCStackFrame*)MONO_CONTEXT_GET_SP (ctx); MONO_CONTEXT_SET_BP (new_ctx, sframe->sp); MONO_CONTEXT_SET_IP (new_ctx, sframe->lr);*/ MONO_CONTEXT_SET_BP (new_ctx, (*lmf)->ebp); MONO_CONTEXT_SET_IP (new_ctx, (*lmf)->eip); memcpy (&new_ctx->regs [MONO_PPC_FIRST_SAVED_GREG], (*lmf)->iregs, sizeof (mgreg_t) * MONO_SAVED_GREGS); memcpy (&new_ctx->fregs [MONO_PPC_FIRST_SAVED_FREG], (*lmf)->fregs, sizeof (double) * MONO_SAVED_FREGS); frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; /* FIXME: what about trampoline LMF frames? see exceptions-x86.c */ *lmf = (*lmf)->previous_lmf; return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * This function is used to gather information from @ctx, and store it in @frame_info. * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf * is modified if needed. * Returns TRUE on success, FALSE otherwise. */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { int i; gpointer ip = MONO_CONTEXT_GET_IP (ctx); mgreg_t regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len); for (i = 0; i < MONO_MAX_IREGS; ++i) regs [i] = new_ctx->sc_regs [i]; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, NULL, regs, MONO_MAX_IREGS, save_locations, MONO_MAX_IREGS, &cfa); for (i = 0; i < MONO_MAX_IREGS; ++i) new_ctx->sc_regs [i] = regs [i]; new_ctx->sc_pc = regs [mips_ra]; new_ctx->sc_regs [mips_sp] = (mgreg_t)cfa; /* we substract 8, so that the IP points into the call instruction */ MONO_CONTEXT_SET_IP (new_ctx, new_ctx->sc_pc - 8); /* Sanity check -- we should have made progress here */ g_assert (MONO_CONTEXT_GET_SP (new_ctx) != MONO_CONTEXT_GET_SP (ctx)); return TRUE; } else if (*lmf) { if (((mgreg_t)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if (!(*lmf)->method) { #ifdef DEBUG_EXCEPTIONS g_print ("mono_arch_find_jit_info: bad lmf @ %p\n", (void *) *lmf); #endif return FALSE; } g_assert (((*lmf)->magic == MIPS_LMF_MAGIC1) || ((*lmf)->magic == MIPS_LMF_MAGIC2)); ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL); if (!ji) { // FIXME: This can happen with multiple appdomains (bug #444383) return FALSE; } frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; memcpy (&new_ctx->sc_regs, (*lmf)->iregs, sizeof (gulong) * MONO_SAVED_GREGS); memcpy (&new_ctx->sc_fpregs, (*lmf)->fregs, sizeof (float) * MONO_SAVED_FREGS); MONO_CONTEXT_SET_IP (new_ctx, (*lmf)->eip); /* ensure that we've made progress */ g_assert (new_ctx->sc_pc != ctx->sc_pc); *lmf = (gpointer)(((gsize)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }
/* * mono_arch_find_jit_info: * * This function is used to gather information from @ctx, and store it in @frame_info. * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf * is modified if needed. * Returns TRUE on success, FALSE otherwise. */ gboolean mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, mgreg_t **save_locations, StackFrameInfo *frame) { gpointer ip = MONO_CONTEXT_GET_IP (ctx); memset (frame, 0, sizeof (StackFrameInfo)); frame->ji = ji; *new_ctx = *ctx; if (ji != NULL) { mgreg_t regs [MONO_MAX_IREGS + 1]; guint8 *cfa; guint32 unwind_info_len; guint8 *unwind_info; frame->type = FRAME_TYPE_MANAGED; if (ji->from_aot) unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); else unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); frame->unwind_info = unwind_info; frame->unwind_info_len = unwind_info_len; regs [AMD64_RAX] = new_ctx->rax; regs [AMD64_RBX] = new_ctx->rbx; regs [AMD64_RCX] = new_ctx->rcx; regs [AMD64_RDX] = new_ctx->rdx; regs [AMD64_RBP] = new_ctx->rbp; regs [AMD64_RSP] = new_ctx->rsp; regs [AMD64_RSI] = new_ctx->rsi; regs [AMD64_RDI] = new_ctx->rdi; regs [AMD64_RIP] = new_ctx->rip; regs [AMD64_R12] = new_ctx->r12; regs [AMD64_R13] = new_ctx->r13; regs [AMD64_R14] = new_ctx->r14; regs [AMD64_R15] = new_ctx->r15; mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, (guint8*)ji->code_start + ji->code_size, ip, regs, MONO_MAX_IREGS + 1, save_locations, MONO_MAX_IREGS, &cfa); new_ctx->rax = regs [AMD64_RAX]; new_ctx->rbx = regs [AMD64_RBX]; new_ctx->rcx = regs [AMD64_RCX]; new_ctx->rdx = regs [AMD64_RDX]; new_ctx->rbp = regs [AMD64_RBP]; new_ctx->rsp = regs [AMD64_RSP]; new_ctx->rsi = regs [AMD64_RSI]; new_ctx->rdi = regs [AMD64_RDI]; new_ctx->rip = regs [AMD64_RIP]; new_ctx->r12 = regs [AMD64_R12]; new_ctx->r13 = regs [AMD64_R13]; new_ctx->r14 = regs [AMD64_R14]; new_ctx->r15 = regs [AMD64_R15]; /* The CFA becomes the new SP value */ new_ctx->rsp = (mgreg_t)cfa; /* Adjust IP */ new_ctx->rip --; if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { /* remove any unused lmf */ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); } #ifndef MONO_AMD64_NO_PUSHES /* Pop arguments off the stack */ if (ji->has_arch_eh_info) new_ctx->rsp += mono_jit_info_get_arch_eh_info (ji)->stack_size; #endif return TRUE; } else if (*lmf) { guint64 rip; if (((guint64)(*lmf)->previous_lmf) & 2) { /* * This LMF entry is created by the soft debug code to mark transitions to * managed code done during invokes. */ MonoLMFExt *ext = (MonoLMFExt*)(*lmf); g_assert (ext->debugger_invoke); memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); frame->type = FRAME_TYPE_DEBUGGER_INVOKE; return TRUE; } if (((guint64)(*lmf)->previous_lmf) & 1) { /* This LMF has the rip field set */ rip = (*lmf)->rip; } else if ((*lmf)->rsp == 0) { /* Top LMF entry */ return FALSE; } else { /* * The rsp field is set just before the call which transitioned to native * code. Obtain the rip from the stack. */ rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t)); } ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL); /* * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the * return address. */ //g_assert (ji); if (!ji) return FALSE; /* Adjust IP */ rip --; frame->ji = ji; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; new_ctx->rip = rip; new_ctx->rbp = (*lmf)->rbp; new_ctx->rsp = (*lmf)->rsp; new_ctx->rbx = (*lmf)->rbx; new_ctx->r12 = (*lmf)->r12; new_ctx->r13 = (*lmf)->r13; new_ctx->r14 = (*lmf)->r14; new_ctx->r15 = (*lmf)->r15; #ifdef TARGET_WIN32 new_ctx->rdi = (*lmf)->rdi; new_ctx->rsi = (*lmf)->rsi; #endif *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); return TRUE; } return FALSE; }