/* * We have to pass the address of cu so that we can call magic_trampoline * But addr of cu is of 32 bit and we can use only 8-bit imm so * we need to have addr of cu in our constant pool which will be emitted * at the starting. */ void encode_setup_trampoline(struct buffer *buffer, uint32_t cu_addr, uint32_t target_addr) { uint32_t encoded_insn; /* * Branch to the call instruction directly. This branch is added * because just after this insn addresses of cu and * jit_magic_trampoline are emitted which are not instructions. */ emit32(buffer, ARM_B(ARM_BRANCH_OFFSET(0x04))); /* Emit the address of cu */ emit32(buffer, cu_addr); /* Emit the address of magic_tampoline */ emit32(buffer, target_addr); /* Load the addr of cu in R0 */ encoded_insn = arm_encode_table[INSN_LDR_REG_MEMLOCAL]; encoded_insn = encoded_insn | IMM_OFFSET_SUB | ((arm_encode_reg(MACH_REG_R0) & 0xF) << 12) | ((arm_encode_reg(MACH_REG_PC) & 0xF) << 16) | (0x010); emit32(buffer, encoded_insn); /* * Call jit_magic_trampoline. First store the value of PC in LR and * then load the address of magic_trampoline form constant pool. */ emit32(buffer, ARM_SUB_IMM8(ARM_LR, ARM_PC, 0)); encoded_insn = arm_encode_table[INSN_LDR_REG_MEMLOCAL]; encoded_insn = encoded_insn | IMM_OFFSET_SUB | ((arm_encode_reg(MACH_REG_PC) & 0xF) << 12) | ((arm_encode_reg(MACH_REG_PC) & 0xF) << 16) | (0x014); emit32(buffer, encoded_insn); }
/** * get_throw_trampoline: * * Returns a function pointer which can be used to raise * exceptions. The returned function has the following * signature: void (*func) (MonoException *exc); or * void (*func) (guint32 ex_token, guint8* ip); * */ static gpointer get_throw_trampoline (int size, gboolean corlib, gboolean rethrow, gboolean llvm, gboolean resume_unwind, const char *tramp_name, MonoTrampInfo **info, gboolean aot) { guint8 *start; guint8 *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; int cfa_offset; code = start = mono_global_codeman_reserve (size); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, 0); /* save all the regs on the stack */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP); ARM_PUSH (code, MONO_ARM_REGSAVE_MASK); cfa_offset = MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, start, ARMREG_LR, - sizeof (mgreg_t)); /* Save fp regs */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (double) * 16); cfa_offset += sizeof (double) * 16; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, cfa_offset); #if defined(ARM_FPU_VFP) ARM_FSTMD (code, ARM_VFP_D0, 16, ARMREG_SP); #endif /* Param area */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8); cfa_offset += 8; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, cfa_offset); /* call throw_exception (exc, ip, sp, int_regs, fp_regs) */ /* caller sp */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, cfa_offset); /* exc is already in place in r0 */ if (corlib) { /* The caller ip is already in R1 */ if (llvm) /* Negate the ip adjustment done in mono_arm_throw_exception */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4); } else { ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR); /* caller ip */ } /* int regs */ ARM_ADD_REG_IMM8 (code, ARMREG_R3, ARMREG_SP, (cfa_offset - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t)))); /* we encode rethrow in the ip */ ARM_ORR_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, rethrow); /* fp regs */ ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_SP, 8); ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, 0); if (aot) { const char *icall_name; if (resume_unwind) icall_name = "mono_arm_resume_unwind"; else if (corlib) icall_name = "mono_arm_throw_exception_by_token"; else icall_name = "mono_arm_throw_exception"; ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)(gpointer)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, GPOINTER_TO_UINT (resume_unwind ? (gpointer)mono_arm_resume_unwind : (corlib ? (gpointer)mono_arm_throw_exception_by_token : (gpointer)mono_arm_throw_exception))); } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); /* we should never reach this breakpoint */ ARM_DBRK (code); g_assert ((code - start) < size); mono_arch_flush_icache (start, code - start); if (info) *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops); return start; }
/** * get_throw_trampoline: * * Returns a function pointer which can be used to raise * exceptions. The returned function has the following * signature: void (*func) (MonoException *exc); or * void (*func) (guint32 ex_token, guint8* ip); * */ static gpointer get_throw_trampoline (int size, gboolean corlib, gboolean rethrow, gboolean llvm, gboolean resume_unwind, const char *tramp_name, MonoTrampInfo **info, gboolean aot) { guint8 *start; guint8 *code; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; code = start = mono_global_codeman_reserve (size); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, 0); /* save all the regs on the stack */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP); ARM_PUSH (code, MONO_ARM_REGSAVE_MASK); mono_add_unwind_op_def_cfa (unwind_ops, code, start, ARMREG_SP, 10 * 4); mono_add_unwind_op_offset (unwind_ops, code, start, ARMREG_LR, -4); /* call throw_exception (exc, ip, sp, int_regs, fp_regs) */ /* caller sp */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, 10 * 4); /* 10 saved regs */ /* exc is already in place in r0 */ if (corlib) { /* The caller ip is already in R1 */ if (llvm) /* Negate the ip adjustment done in mono_arm_throw_exception */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4); } else { ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR); /* caller ip */ } /* FIXME: pointer to the saved fp regs */ /*pos = alloc_size - sizeof (double) * MONO_SAVED_FREGS; ppc_addi (code, ppc_r7, ppc_sp, pos);*/ /* pointer to the saved int regs */ ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP); /* the pushed regs */ /* we encode rethrow in the ip, so we avoid args on the stack */ ARM_ORR_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, rethrow); if (aot) { ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_arm_throw_exception_by_token" : "mono_arm_throw_exception"); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)(gpointer)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, GPOINTER_TO_UINT (resume_unwind ? (gpointer)mono_arm_resume_unwind : (corlib ? (gpointer)mono_arm_throw_exception_by_token : (gpointer)mono_arm_throw_exception))); } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); /* we should never reach this breakpoint */ ARM_DBRK (code); g_assert ((code - start) < size); mono_arch_flush_icache (start, code - start); if (info) *info = mono_tramp_info_create (g_strdup_printf (tramp_name), start, code - start, ji, unwind_ops); return start; }
gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot) { guint8 *code, *buf; int buf_len, cfa_offset; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; guint8 *br_out, *br [16], *br_ret [16]; int i, offset, arg_reg, npushed, info_offset, mrgctx_offset; int caller_reg_area_offset, caller_freg_area_offset, callee_reg_area_offset, callee_freg_area_offset; int lr_offset, fp, br_ret_index, args_size; buf_len = 784; buf = code = mono_global_codeman_reserve (buf_len); arg_reg = ARMREG_R0; /* Registers pushed by the arg trampoline */ npushed = 4; // ios abi compatible frame fp = ARMREG_R7; cfa_offset = npushed * TARGET_SIZEOF_VOID_P; mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset); ARM_PUSH (code, (1 << fp) | (1 << ARMREG_LR)); cfa_offset += 2 * TARGET_SIZEOF_VOID_P; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, buf, fp, (- cfa_offset)); mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_LR, ((- cfa_offset) + 4)); ARM_MOV_REG_REG (code, fp, ARMREG_SP); mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, fp); /* Allocate stack frame */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 32 + (16 * sizeof (double))); if (MONO_ARCH_FRAME_ALIGNMENT > 8) ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, (MONO_ARCH_FRAME_ALIGNMENT - 8)); offset = 4; info_offset = -offset; offset += 4; mrgctx_offset = -offset; offset += 4 * 4; callee_reg_area_offset = -offset; offset += 8 * 8; caller_freg_area_offset = -offset; offset += 8 * 8; callee_freg_area_offset = -offset; caller_reg_area_offset = cfa_offset - (npushed * TARGET_SIZEOF_VOID_P); lr_offset = 4; /* Save info struct which is in r0 */ ARM_STR_IMM (code, arg_reg, fp, info_offset); /* Save rgctx reg */ ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, fp, mrgctx_offset); /* Allocate callee area */ ARM_LDR_IMM (code, ARMREG_IP, arg_reg, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage)); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); /* Allocate callee register area just below the callee area so the slots are correct */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 4 * TARGET_SIZEOF_VOID_P); if (mono_arm_is_hard_float ()) { /* Save caller fregs */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -caller_freg_area_offset); for (i = 0; i < 8; ++i) ARM_FSTD (code, i * 2, ARMREG_IP, (i * sizeof (double))); } /* * The stack now looks like this: * <caller frame> * <saved r0-r3, lr> * <saved fp> <- fp * <our frame> * <callee area> <- sp */ g_assert (mono_arm_thumb_supported ()); /* Call start_gsharedvt_call () */ /* 6 arguments, needs 2 stack slot, need to clean it up after the call */ args_size = 2 * TARGET_SIZEOF_VOID_P; ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, args_size); /* arg1 == info */ ARM_LDR_IMM (code, ARMREG_R0, fp, info_offset); /* arg2 == caller stack area */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, fp, cfa_offset - 4 * TARGET_SIZEOF_VOID_P); /* arg3 == callee stack area */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, args_size); /* arg4 == mrgctx reg */ ARM_LDR_IMM (code, ARMREG_R3, fp, mrgctx_offset); /* arg5 == caller freg area */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -caller_freg_area_offset); ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, 0); /* arg6 == callee freg area */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -callee_freg_area_offset); ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, 4); /* Make the call */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_arm_start_gsharedvt_call"); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = (gpointer)mono_arm_start_gsharedvt_call; code += 4; } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); code = emit_bx (code, ARMREG_IP); /* Clean up stack */ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, args_size); /* Make the real method call */ /* R0 contains the addr to call */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_R0); /* Load argument registers */ ARM_LDM (code, ARMREG_SP, (1 << ARMREG_R0) | (1 << ARMREG_R1) | (1 << ARMREG_R2) | (1 << ARMREG_R3)); if (mono_arm_is_hard_float ()) { /* Load argument fregs */ ARM_SUB_REG_IMM8 (code, ARMREG_LR, fp, -callee_freg_area_offset); for (i = 0; i < 8; ++i) ARM_FLDD (code, i * 2, ARMREG_LR, (i * sizeof (double))); } /* Pop callee register area */ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 4 * TARGET_SIZEOF_VOID_P); /* Load rgctx */ ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, fp, mrgctx_offset); /* Make the call */ #if 0 ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, addr)); #endif /* mono_arch_find_imt_method () depends on this */ ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4); ARM_BX (code, ARMREG_IP); *((gpointer*)code) = NULL; code += 4; br_ret_index = 0; /* Branch between IN/OUT cases */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in)); ARM_CMP_REG_IMM8 (code, ARMREG_IP, 1); br_out = code; ARM_B_COND (code, ARMCOND_NE, 0); /* IN CASE */ /* LR == return marshalling type */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal)); /* Continue if no marshalling required */ ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_NONE); br_ret [br_ret_index ++] = code; ARM_B_COND (code, ARMCOND_EQ, 0); /* Compute vret area address in LR */ ARM_LDR_IMM (code, ARMREG_LR, fp, info_offset); ARM_LDR_IMM (code, ARMREG_LR, ARMREG_LR, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot)); /* The slot value is off by 4 */ ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4); ARM_SHL_IMM (code, ARMREG_LR, ARMREG_LR, 2); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_SP); /* Branch to specific marshalling code */ ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREG); br [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREGS); br [1] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_I1); br [2] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U1); br [3] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_I2); br [4] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U2); br [5] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R4); br [6] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R8); br [7] = code; ARM_B_COND (code, ARMCOND_EQ, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* IN IREG case */ arm_patch (br [0], code); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* IN IREGS case */ arm_patch (br [1], code); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, 0); ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, 4); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* I1 case */ arm_patch (br [2], code); ARM_LDRSB_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* U1 case */ arm_patch (br [3], code); ARM_LDRB_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* I2 case */ arm_patch (br [4], code); ARM_LDRSH_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* U2 case */ arm_patch (br [5], code); ARM_LDRH_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* R4 case */ arm_patch (br [6], code); ARM_FLDS (code, ARM_VFP_D0, ARMREG_LR, 0); code += 4; br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* R8 case */ arm_patch (br [7], code); ARM_FLDD (code, ARM_VFP_D0, ARMREG_LR, 0); code += 4; br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* OUT CASE */ arm_patch (br_out, code); /* Marshal return value */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal)); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREGS); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT IREGS case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save both registers for simplicity */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 0); ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, 4); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREG); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT IREG case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U1); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT U1 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_STRB_IMM (code, ARMREG_R0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R4); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT R4 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_FSTS (code, ARM_VFP_D0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R8); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT R8 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); /* OUT other cases */ br_ret [br_ret_index ++] = code; ARM_B (code, 0); for (i = 0; i < br_ret_index; ++i) arm_patch (br_ret [i], code); /* Normal return */ /* Restore registers + stack */ ARM_MOV_REG_REG (code, ARMREG_SP, fp); ARM_LDM (code, fp, (1 << fp) | (1 << ARMREG_LR)); ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, cfa_offset); /* Return */ ARM_BX (code, ARMREG_LR); g_assert ((code - buf) < buf_len); if (info) *info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops); mono_arch_flush_icache (buf, code - buf); return buf; }
/* * Returns a pointer to a native function that can be used to * call the specified method. * The function created will receive the arguments according * to the call convention specified in the method. * This function works by creating a MonoInvocation structure, * filling the fields in and calling ves_exec_method on it. * Still need to figure out how to handle the exception stuff * across the managed/unmanaged boundary. */ void* mono_arch_create_method_pointer (MonoMethod* method) { MonoMethodSignature* sig; guchar* p, * p_method, * p_stackval_from_data, * p_exec; void* code_buff; int i, stack_size, arg_pos, arg_add, stackval_pos, offs; int areg, reg_args, shift, pos; MonoJitInfo *ji; code_buff = alloc_code_buff(128); p = (guchar*)code_buff; sig = method->signature; ARM_B(p, 3); /* embed magic number followed by method pointer */ *p++ = 'M'; *p++ = 'o'; *p++ = 'n'; *p++ = 'o'; /* method ptr */ *(void**)p = method; p_method = p; p += 4; /* call table */ *(void**)p = stackval_from_data; p_stackval_from_data = p; p += 4; *(void**)p = ves_exec_method; p_exec = p; p += 4; stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t); /* prologue */ p = (guchar*)arm_emit_lean_prologue((arminstr_t*)p, stack_size, (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); /* R7 - ptr to stack args */ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_IP); /* * Initialize MonoInvocation fields, first the ones known now. */ ARM_MOV_REG_IMM8(p, ARMREG_R4, 0); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex)); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler)); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent)); /* Set the method pointer. */ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, -(int)(p - p_method + sizeof(arminstr_t)*2)); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(method)); if (sig->hasthis) { /* [this] in A1 */ ARM_STR_IMM(p, ARMREG_A1, ARMREG_SP, MINV_OFFS(obj)); } else { /* else set minv.obj to NULL */ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(obj)); } /* copy args from registers to stack */ areg = ARMREG_A1 + sig->hasthis; arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t); arg_add = 0; for (i = 0; i < sig->param_count; ++i) { if (areg >= ARM_NUM_ARG_REGS) break; ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos); ++areg; if (!sig->params[i]->byref) { switch (sig->params[i]->type) { case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: if (areg >= ARM_NUM_ARG_REGS) { /* load second half of 64-bit arg */ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_R7, 0); ARM_STR_IMM(p, ARMREG_R4, ARMREG_R7, arg_pos + sizeof(armword_t)); arg_add = sizeof(armword_t); } else { /* second half is already the register */ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos + sizeof(armword_t)); ++areg; } break; case MONO_TYPE_VALUETYPE: /* assert */ default: break; } } arg_pos += 2 * sizeof(armword_t); } /* number of args passed in registers */ reg_args = i; /* * Calc and save stack args ptr, * args follow MonoInvocation struct on the stack. */ ARM_ADD_REG_IMM8(p, ARMREG_R1, ARMREG_SP, sizeof(MonoInvocation)); ARM_STR_IMM(p, ARMREG_R1, ARMREG_SP, MINV_OFFS(stack_args)); /* convert method args to stackvals */ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t); stackval_pos = sizeof(MonoInvocation); for (i = 0; i < sig->param_count; ++i) { if (i < reg_args) { ARM_SUB_REG_IMM8(p, ARMREG_A3, ARMREG_R7, -arg_pos); arg_pos += 2 * sizeof(armword_t); } else { if (arg_pos < 0) arg_pos = 0; pos = arg_pos + arg_add; if (pos <= 0xFF) { ARM_ADD_REG_IMM8(p, ARMREG_A3, ARMREG_R7, pos); } else { if (is_arm_const((armword_t)pos)) { shift = calc_arm_mov_const_shift((armword_t)pos); ARM_ADD_REG_IMM(p, ARMREG_A3, ARMREG_R7, pos >> ((32 - shift) & 31), shift >> 1); } else { p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)pos); ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_R7, ARMREG_R6); } } arg_pos += sizeof(armword_t); if (!sig->params[i]->byref) { switch (sig->params[i]->type) { case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: arg_pos += sizeof(armword_t); break; case MONO_TYPE_VALUETYPE: /* assert */ default: break; } } }