/** * get_throw_trampoline: * * Returns a function pointer which can be used to raise * exceptions. The returned function has the following * signature: void (*func) (MonoException *exc); or * void (*func) (guint32 ex_token, guint8* ip); * */ static gpointer get_throw_trampoline (int size, gboolean corlib, gboolean rethrow, gboolean llvm, gboolean resume_unwind, const char *tramp_name, MonoTrampInfo **info, gboolean aot) { guint8 *start; guint8 *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; int cfa_offset; code = start = mono_global_codeman_reserve (size); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, 0); /* save all the regs on the stack */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP); ARM_PUSH (code, MONO_ARM_REGSAVE_MASK); cfa_offset = MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, start, ARMREG_LR, - sizeof (mgreg_t)); /* Save fp regs */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (double) * 16); cfa_offset += sizeof (double) * 16; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, cfa_offset); #if defined(ARM_FPU_VFP) ARM_FSTMD (code, ARM_VFP_D0, 16, ARMREG_SP); #endif /* Param area */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8); cfa_offset += 8; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, cfa_offset); /* call throw_exception (exc, ip, sp, int_regs, fp_regs) */ /* caller sp */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, cfa_offset); /* exc is already in place in r0 */ if (corlib) { /* The caller ip is already in R1 */ if (llvm) /* Negate the ip adjustment done in mono_arm_throw_exception */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4); } else { ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR); /* caller ip */ } /* int regs */ ARM_ADD_REG_IMM8 (code, ARMREG_R3, ARMREG_SP, (cfa_offset - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t)))); /* we encode rethrow in the ip */ ARM_ORR_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, rethrow); /* fp regs */ ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_SP, 8); ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, 0); if (aot) { const char *icall_name; if (resume_unwind) icall_name = "mono_arm_resume_unwind"; else if (corlib) icall_name = "mono_arm_throw_exception_by_token"; else icall_name = "mono_arm_throw_exception"; ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)(gpointer)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, GPOINTER_TO_UINT (resume_unwind ? (gpointer)mono_arm_resume_unwind : (corlib ? (gpointer)mono_arm_throw_exception_by_token : (gpointer)mono_arm_throw_exception))); } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); /* we should never reach this breakpoint */ ARM_DBRK (code); g_assert ((code - start) < size); mono_arch_flush_icache (start, code - start); if (info) *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops); return start; }
/* * get_throw_trampoline: * * Generate a call to mono_amd64_throw_exception/ * mono_amd64_throw_corlib_exception. */ static gpointer get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot) { guint8* start; guint8 *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space; const guint kMaxCodeSize = NACL_SIZE (256, 512); #ifdef TARGET_WIN32 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */ #else dummy_stack_space = 0; #endif start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize); /* The stack is unaligned on entry */ stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8; code = start; if (info) unwind_ops = mono_arch_get_cie_program (); /* Alloc frame */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size); if (info) mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8); /* * To hide linux/windows calling convention differences, we pass all arguments on * the stack by passing 6 dummy values in registers. */ arg_offsets [0] = dummy_stack_space + 0; arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t); arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2; ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4; regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs); /* Save registers */ for (i = 0; i < AMD64_NREG; ++i) if (i != AMD64_RSP) amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t)); /* Save RSP */ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t)); amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t)); /* Save IP */ if (llvm_abs) amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX); else amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t)); amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t)); /* Set arg1 == ctx */ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset); amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t)); /* Set arg2 == exc/ex_token_index */ if (resume_unwind) amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t)); else amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t)); /* Set arg3 == rethrow/pc offset */ if (resume_unwind) { amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t)); } else if (corlib) { amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t)); if (llvm_abs) /* * The caller is LLVM code which passes the absolute address not a pc offset, * so compensate by passing 0 as 'rip' and passing the negated abs address as * the pc offset. */ amd64_neg_membase (code, AMD64_RSP, arg_offsets [2]); } else { amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t)); } if (aot) { const char *icall_name; if (resume_unwind) icall_name = "mono_amd64_resume_unwind"; else if (corlib) icall_name = "mono_amd64_throw_corlib_exception"; else icall_name = "mono_amd64_throw_exception"; ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception)); } amd64_call_reg (code, AMD64_R11); amd64_breakpoint (code); mono_arch_flush_icache (start, code - start); g_assert ((code - start) < kMaxCodeSize); nacl_global_codeman_validate(&start, kMaxCodeSize, &code); mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL); if (info) *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops); return start; }
gpointer mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot) { guint8 *code, *start; guint8 *br[1]; gpointer throw_trampoline; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; const guint kMaxCodeSize = NACL_SIZE (128, 256); start = code = mono_global_codeman_reserve (kMaxCodeSize); /* We are in the frame of a managed method after a call */ /* * We would like to throw the pending exception in such a way that it looks to * be thrown from the managed method. */ /* Save registers which might contain the return value of the call */ amd64_push_reg (code, AMD64_RAX); amd64_push_reg (code, AMD64_RDX); amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0); /* Align stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* Obtain the pending exception */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception"); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception); } amd64_call_reg (code, AMD64_R11); /* Check if it is NULL, and branch */ amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); /* exc != NULL branch */ /* Save the exc on the stack */ amd64_push_reg (code, AMD64_RAX); /* Align stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* Obtain the original ip and clear the flag in previous_lmf */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip"); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip); } amd64_call_reg (code, AMD64_R11); /* Load exc */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8); /* Pop saved stuff from the stack */ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8); /* Setup arguments for the throw trampoline */ /* Exception */ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8); /* The trampoline expects the caller ip to be pushed on the stack */ amd64_push_reg (code, AMD64_RAX); /* Call the throw trampoline */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception"); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { throw_trampoline = mono_get_throw_exception (); amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline); } /* We use a jump instead of a call so we can push the original ip on the stack */ amd64_jump_reg (code, AMD64_R11); /* ex == NULL branch */ mono_amd64_patch (br [0], code); /* Obtain the original ip and clear the flag in previous_lmf */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip"); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip); } amd64_call_reg (code, AMD64_R11); amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); /* Restore registers */ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0); amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); amd64_pop_reg (code, AMD64_RDX); amd64_pop_reg (code, AMD64_RAX); /* Return to original code */ amd64_jump_reg (code, AMD64_R11); g_assert ((code - start) < kMaxCodeSize); nacl_global_codeman_validate(&start, kMaxCodeSize, &code); if (info) *info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops); return start; }
/** * get_throw_trampoline: * * Returns a function pointer which can be used to raise * exceptions. The returned function has the following * signature: void (*func) (MonoException *exc); or * void (*func) (guint32 ex_token, guint8* ip); * */ static gpointer get_throw_trampoline (int size, gboolean corlib, gboolean rethrow, gboolean llvm, gboolean resume_unwind, const char *tramp_name, MonoTrampInfo **info, gboolean aot) { guint8 *start; guint8 *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; code = start = mono_global_codeman_reserve (size); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, 0); /* save all the regs on the stack */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP); ARM_PUSH (code, MONO_ARM_REGSAVE_MASK); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, 10 * 4); mono_add_unwind_op_offset (unwind_ops, code, start, ARMREG_LR, -4); /* call throw_exception (exc, ip, sp, int_regs, fp_regs) */ /* caller sp */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, 10 * 4); /* 10 saved regs */ /* exc is already in place in r0 */ if (corlib) { /* The caller ip is already in R1 */ if (llvm) /* Negate the ip adjustment done in mono_arm_throw_exception */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4); } else { ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR); /* caller ip */ } /* FIXME: pointer to the saved fp regs */ /*pos = alloc_size - sizeof (double) * MONO_SAVED_FREGS; ppc_addi (code, ppc_r7, ppc_sp, pos);*/ /* pointer to the saved int regs */ ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP); /* the pushed regs */ /* we encode rethrow in the ip, so we avoid args on the stack */ ARM_ORR_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, rethrow); if (aot) { ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_arm_throw_exception_by_token" : "mono_arm_throw_exception"); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)(gpointer)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, GPOINTER_TO_UINT (resume_unwind ? (gpointer)mono_arm_resume_unwind : (corlib ? (gpointer)mono_arm_throw_exception_by_token : (gpointer)mono_arm_throw_exception))); } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); /* we should never reach this breakpoint */ ARM_DBRK (code); g_assert ((code - start) < size); mono_arch_flush_icache (start, code - start); if (info) *info = mono_tramp_info_create (g_strdup_printf (tramp_name), start, code - start, ji, unwind_ops); return start; }
gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot) { guint8 *code, *buf; int buf_len, cfa_offset; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; guint8 *br_out, *br [16], *br_ret [16]; int i, offset, arg_reg, npushed, info_offset, mrgctx_offset; int caller_reg_area_offset, caller_freg_area_offset, callee_reg_area_offset, callee_freg_area_offset; int lr_offset, fp, br_ret_index, args_size; buf_len = 784; buf = code = mono_global_codeman_reserve (buf_len); arg_reg = ARMREG_R0; /* Registers pushed by the arg trampoline */ npushed = 4; // ios abi compatible frame fp = ARMREG_R7; cfa_offset = npushed * TARGET_SIZEOF_VOID_P; mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset); ARM_PUSH (code, (1 << fp) | (1 << ARMREG_LR)); cfa_offset += 2 * TARGET_SIZEOF_VOID_P; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, buf, fp, (- cfa_offset)); mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_LR, ((- cfa_offset) + 4)); ARM_MOV_REG_REG (code, fp, ARMREG_SP); mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, fp); /* Allocate stack frame */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 32 + (16 * sizeof (double))); if (MONO_ARCH_FRAME_ALIGNMENT > 8) ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, (MONO_ARCH_FRAME_ALIGNMENT - 8)); offset = 4; info_offset = -offset; offset += 4; mrgctx_offset = -offset; offset += 4 * 4; callee_reg_area_offset = -offset; offset += 8 * 8; caller_freg_area_offset = -offset; offset += 8 * 8; callee_freg_area_offset = -offset; caller_reg_area_offset = cfa_offset - (npushed * TARGET_SIZEOF_VOID_P); lr_offset = 4; /* Save info struct which is in r0 */ ARM_STR_IMM (code, arg_reg, fp, info_offset); /* Save rgctx reg */ ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, fp, mrgctx_offset); /* Allocate callee area */ ARM_LDR_IMM (code, ARMREG_IP, arg_reg, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage)); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); /* Allocate callee register area just below the callee area so the slots are correct */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 4 * TARGET_SIZEOF_VOID_P); if (mono_arm_is_hard_float ()) { /* Save caller fregs */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -caller_freg_area_offset); for (i = 0; i < 8; ++i) ARM_FSTD (code, i * 2, ARMREG_IP, (i * sizeof (double))); } /* * The stack now looks like this: * <caller frame> * <saved r0-r3, lr> * <saved fp> <- fp * <our frame> * <callee area> <- sp */ g_assert (mono_arm_thumb_supported ()); /* Call start_gsharedvt_call () */ /* 6 arguments, needs 2 stack slot, need to clean it up after the call */ args_size = 2 * TARGET_SIZEOF_VOID_P; ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, args_size); /* arg1 == info */ ARM_LDR_IMM (code, ARMREG_R0, fp, info_offset); /* arg2 == caller stack area */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, fp, cfa_offset - 4 * TARGET_SIZEOF_VOID_P); /* arg3 == callee stack area */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, args_size); /* arg4 == mrgctx reg */ ARM_LDR_IMM (code, ARMREG_R3, fp, mrgctx_offset); /* arg5 == caller freg area */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -caller_freg_area_offset); ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, 0); /* arg6 == callee freg area */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -callee_freg_area_offset); ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, 4); /* Make the call */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_arm_start_gsharedvt_call"); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = (gpointer)mono_arm_start_gsharedvt_call; code += 4; } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); code = emit_bx (code, ARMREG_IP); /* Clean up stack */ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, args_size); /* Make the real method call */ /* R0 contains the addr to call */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_R0); /* Load argument registers */ ARM_LDM (code, ARMREG_SP, (1 << ARMREG_R0) | (1 << ARMREG_R1) | (1 << ARMREG_R2) | (1 << ARMREG_R3)); if (mono_arm_is_hard_float ()) { /* Load argument fregs */ ARM_SUB_REG_IMM8 (code, ARMREG_LR, fp, -callee_freg_area_offset); for (i = 0; i < 8; ++i) ARM_FLDD (code, i * 2, ARMREG_LR, (i * sizeof (double))); } /* Pop callee register area */ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 4 * TARGET_SIZEOF_VOID_P); /* Load rgctx */ ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, fp, mrgctx_offset); /* Make the call */ #if 0 ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, addr)); #endif /* mono_arch_find_imt_method () depends on this */ ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4); ARM_BX (code, ARMREG_IP); *((gpointer*)code) = NULL; code += 4; br_ret_index = 0; /* Branch between IN/OUT cases */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in)); ARM_CMP_REG_IMM8 (code, ARMREG_IP, 1); br_out = code; ARM_B_COND (code, ARMCOND_NE, 0); /* IN CASE */ /* LR == return marshalling type */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal)); /* Continue if no marshalling required */ ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_NONE); br_ret [br_ret_index ++] = code; ARM_B_COND (code, ARMCOND_EQ, 0); /* Compute vret area address in LR */ ARM_LDR_IMM (code, ARMREG_LR, fp, info_offset); ARM_LDR_IMM (code, ARMREG_LR, ARMREG_LR, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot)); /* The slot value is off by 4 */ ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4); ARM_SHL_IMM (code, ARMREG_LR, ARMREG_LR, 2); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_SP); /* Branch to specific marshalling code */ ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREG); br [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREGS); br [1] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_I1); br [2] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U1); br [3] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_I2); br [4] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U2); br [5] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R4); br [6] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R8); br [7] = code; ARM_B_COND (code, ARMCOND_EQ, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* IN IREG case */ arm_patch (br [0], code); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* IN IREGS case */ arm_patch (br [1], code); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, 0); ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, 4); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* I1 case */ arm_patch (br [2], code); ARM_LDRSB_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* U1 case */ arm_patch (br [3], code); ARM_LDRB_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* I2 case */ arm_patch (br [4], code); ARM_LDRSH_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* U2 case */ arm_patch (br [5], code); ARM_LDRH_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* R4 case */ arm_patch (br [6], code); ARM_FLDS (code, ARM_VFP_D0, ARMREG_LR, 0); code += 4; br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* R8 case */ arm_patch (br [7], code); ARM_FLDD (code, ARM_VFP_D0, ARMREG_LR, 0); code += 4; br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* OUT CASE */ arm_patch (br_out, code); /* Marshal return value */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal)); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREGS); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT IREGS case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save both registers for simplicity */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 0); ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, 4); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREG); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT IREG case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U1); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT U1 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_STRB_IMM (code, ARMREG_R0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R4); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT R4 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_FSTS (code, ARM_VFP_D0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R8); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT R8 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); /* OUT other cases */ br_ret [br_ret_index ++] = code; ARM_B (code, 0); for (i = 0; i < br_ret_index; ++i) arm_patch (br_ret [i], code); /* Normal return */ /* Restore registers + stack */ ARM_MOV_REG_REG (code, ARMREG_SP, fp); ARM_LDM (code, fp, (1 << fp) | (1 << ARMREG_LR)); ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, cfa_offset); /* Return */ ARM_BX (code, ARMREG_LR); g_assert ((code - buf) < buf_len); if (info) *info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops); mono_arch_flush_icache (buf, code - buf); return buf; }