/* * Check a 2D array access operation for exception conditions. */ static void Check2DArrayAccess(MDUnroll *unroll, int reg, int reg2, int reg3, unsigned char *pc, unsigned char *label) { #ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS md_inst_ptr patch1; #endif md_inst_ptr patch2; md_inst_ptr patch3; #ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS /* Check the array reference against NULL */ arm_test_reg_imm8(unroll->out, ARM_CMP, reg, 0); patch1 = unroll->out; arm_branch_imm(unroll->out, ARM_CC_EQ, 0); #endif /* Check the array bounds. We assume that we can use the link register as a work register because "lr" would have been saved on entry to "_ILCVMInterpreter" */ arm_load_membase(unroll->out, ARM_WORK, reg, 12); arm_alu_reg_reg(unroll->out, ARM_SUB, reg2, reg2, ARM_WORK); arm_load_membase(unroll->out, ARM_LINK, reg, 16); arm_test_reg_reg(unroll->out, ARM_CMP, reg2, ARM_LINK); patch2 = unroll->out; arm_branch_imm(unroll->out, ARM_CC_LT_UN, 0); arm_alu_reg_reg(unroll->out, ARM_ADD, reg2, reg2, ARM_WORK); patch3 = unroll->out; arm_jump_imm(unroll->out, 0); arm_patch(patch2, unroll->out); arm_load_membase(unroll->out, ARM_WORK, reg, 24); arm_alu_reg_reg(unroll->out, ARM_SUB, reg3, reg3, ARM_WORK); arm_load_membase(unroll->out, ARM_LINK, reg, 28); arm_test_reg_reg(unroll->out, ARM_CMP, reg3, ARM_LINK); patch2 = unroll->out; arm_branch_imm(unroll->out, ARM_CC_LT_UN, 0); arm_alu_reg_reg(unroll->out, ARM_ADD, reg3, reg3, ARM_WORK); arm_load_membase(unroll->out, ARM_WORK, reg, 12); arm_alu_reg_reg(unroll->out, ARM_ADD, reg2, reg2, ARM_WORK); /* Re-execute the current instruction in the interpreter */ #ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS arm_patch(patch1, unroll->out); #endif arm_patch(patch3, unroll->out); ReExecute(unroll, pc, label); /* Compute the address of the array element */ arm_patch(patch2, unroll->out); arm_load_membase(unroll->out, ARM_WORK, reg, 20); arm_mul_reg_reg(unroll->out, reg2, reg2, ARM_WORK); arm_load_membase(unroll->out, ARM_WORK, reg, 32); arm_mul_reg_reg(unroll->out, reg3, reg3, ARM_WORK); arm_alu_reg_reg(unroll->out, ARM_ADD, reg2, reg2, reg3); arm_load_membase(unroll->out, ARM_WORK, reg, 4); arm_mul_reg_reg(unroll->out, reg2, reg2, ARM_WORK); arm_load_membase(unroll->out, reg, reg, 8); arm_alu_reg_reg(unroll->out, ARM_ADD, reg, reg, reg2); }
void _jit_gen_epilog(jit_gencode_t gen, jit_function_t func) { int reg, regset; arm_inst_buf inst; void **fixup; void **next; jit_nint offset; /* Initialize the instruction buffer */ jit_gen_load_inst_ptr(gen, inst); /* Determine which registers need to be restored when we return */ regset = 0; for(reg = 0; reg <= 15; ++reg) { if(jit_reg_is_used(gen->touched, reg) && (_jit_reg_info[reg].flags & JIT_REG_CALL_USED) == 0) { regset |= (1 << reg); } } /* Apply fixups for blocks that jump to the epilog */ fixup = (void **)(gen->epilog_fixup); while(fixup != 0) { offset = (((jit_nint)(fixup[0])) & 0x00FFFFFF) << 2; if(!offset) { next = 0; } else { next = (void **)(((unsigned char *)fixup) - offset); } arm_patch(inst, fixup, arm_inst_get_posn(inst)); fixup = next; } gen->epilog_fixup = 0; /* Pop the local stack frame and return */ arm_pop_frame(inst, regset); jit_gen_save_inst_ptr(gen, inst); /* Flush the remainder of the constant pool */ flush_constants(gen, 1); }
/* * Flush the contents of the constant pool. */ static void flush_constants(jit_gencode_t gen, int after_epilog) { arm_inst_buf inst; arm_inst_word *patch; arm_inst_word *current; arm_inst_word *fixup; int index, value, offset; /* Bail out if there are no constants to flush */ if(!(gen->num_constants)) { return; } /* Initialize the cache output pointer */ jit_gen_load_inst_ptr(gen, inst); /* Jump over the constant pool if it is being output inline */ if(!after_epilog) { patch = arm_inst_get_posn(inst); arm_jump_imm(inst, 0); } else { patch = 0; } /* Align the constant pool, if requested */ if(gen->align_constants && (((int)arm_inst_get_posn(inst)) & 7) != 0) { arm_inst_add(inst, 0); } /* Output the constant values and apply the necessary fixups */ for(index = 0; index < gen->num_constants; ++index) { current = arm_inst_get_posn(inst); arm_inst_add(inst, gen->constants[index]); fixup = gen->fixup_constants[index]; while(fixup != 0) { if((*fixup & 0x0F000000) == 0x05000000) { /* Word constant fixup */ value = *fixup & 0x0FFF; offset = ((arm_inst_get_posn(inst) - 1 - fixup) * 4) - 8; *fixup = ((*fixup & ~0x0FFF) | offset); } else { /* Floating-point constant fixup */ value = (*fixup & 0x00FF) * 4; offset = ((arm_inst_get_posn(inst) - 1 - fixup) * 4) - 8; *fixup = ((*fixup & ~0x00FF) | (offset / 4)); } if(value) { fixup -= value; } else { fixup = 0; } } } /* Backpatch the jump if necessary */ if(!after_epilog) { arm_patch(inst, patch, arm_inst_get_posn(inst)); } /* Flush the pool state and restart */ gen->num_constants = 0; gen->align_constants = 0; gen->first_constant_use = 0; jit_gen_save_inst_ptr(gen, inst); }
gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot) { guint8 *code, *buf; int buf_len, cfa_offset; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; guint8 *br_out, *br [16], *br_ret [16]; int i, offset, arg_reg, npushed, info_offset, mrgctx_offset; int caller_reg_area_offset, caller_freg_area_offset, callee_reg_area_offset, callee_freg_area_offset; int lr_offset, fp, br_ret_index, args_size; buf_len = 784; buf = code = mono_global_codeman_reserve (buf_len); arg_reg = ARMREG_R0; /* Registers pushed by the arg trampoline */ npushed = 4; // ios abi compatible frame fp = ARMREG_R7; cfa_offset = npushed * TARGET_SIZEOF_VOID_P; mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset); ARM_PUSH (code, (1 << fp) | (1 << ARMREG_LR)); cfa_offset += 2 * TARGET_SIZEOF_VOID_P; mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, buf, fp, (- cfa_offset)); mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_LR, ((- cfa_offset) + 4)); ARM_MOV_REG_REG (code, fp, ARMREG_SP); mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, fp); /* Allocate stack frame */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 32 + (16 * sizeof (double))); if (MONO_ARCH_FRAME_ALIGNMENT > 8) ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, (MONO_ARCH_FRAME_ALIGNMENT - 8)); offset = 4; info_offset = -offset; offset += 4; mrgctx_offset = -offset; offset += 4 * 4; callee_reg_area_offset = -offset; offset += 8 * 8; caller_freg_area_offset = -offset; offset += 8 * 8; callee_freg_area_offset = -offset; caller_reg_area_offset = cfa_offset - (npushed * TARGET_SIZEOF_VOID_P); lr_offset = 4; /* Save info struct which is in r0 */ ARM_STR_IMM (code, arg_reg, fp, info_offset); /* Save rgctx reg */ ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, fp, mrgctx_offset); /* Allocate callee area */ ARM_LDR_IMM (code, ARMREG_IP, arg_reg, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage)); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); /* Allocate callee register area just below the callee area so the slots are correct */ ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 4 * TARGET_SIZEOF_VOID_P); if (mono_arm_is_hard_float ()) { /* Save caller fregs */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -caller_freg_area_offset); for (i = 0; i < 8; ++i) ARM_FSTD (code, i * 2, ARMREG_IP, (i * sizeof (double))); } /* * The stack now looks like this: * <caller frame> * <saved r0-r3, lr> * <saved fp> <- fp * <our frame> * <callee area> <- sp */ g_assert (mono_arm_thumb_supported ()); /* Call start_gsharedvt_call () */ /* 6 arguments, needs 2 stack slot, need to clean it up after the call */ args_size = 2 * TARGET_SIZEOF_VOID_P; ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, args_size); /* arg1 == info */ ARM_LDR_IMM (code, ARMREG_R0, fp, info_offset); /* arg2 == caller stack area */ ARM_ADD_REG_IMM8 (code, ARMREG_R1, fp, cfa_offset - 4 * TARGET_SIZEOF_VOID_P); /* arg3 == callee stack area */ ARM_ADD_REG_IMM8 (code, ARMREG_R2, ARMREG_SP, args_size); /* arg4 == mrgctx reg */ ARM_LDR_IMM (code, ARMREG_R3, fp, mrgctx_offset); /* arg5 == caller freg area */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -caller_freg_area_offset); ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, 0); /* arg6 == callee freg area */ ARM_SUB_REG_IMM8 (code, ARMREG_IP, fp, -callee_freg_area_offset); ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, 4); /* Make the call */ if (aot) { ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_arm_start_gsharedvt_call"); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP); } else { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = (gpointer)mono_arm_start_gsharedvt_call; code += 4; } ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); code = emit_bx (code, ARMREG_IP); /* Clean up stack */ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, args_size); /* Make the real method call */ /* R0 contains the addr to call */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_R0); /* Load argument registers */ ARM_LDM (code, ARMREG_SP, (1 << ARMREG_R0) | (1 << ARMREG_R1) | (1 << ARMREG_R2) | (1 << ARMREG_R3)); if (mono_arm_is_hard_float ()) { /* Load argument fregs */ ARM_SUB_REG_IMM8 (code, ARMREG_LR, fp, -callee_freg_area_offset); for (i = 0; i < 8; ++i) ARM_FLDD (code, i * 2, ARMREG_LR, (i * sizeof (double))); } /* Pop callee register area */ ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 4 * TARGET_SIZEOF_VOID_P); /* Load rgctx */ ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, fp, mrgctx_offset); /* Make the call */ #if 0 ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, addr)); #endif /* mono_arch_find_imt_method () depends on this */ ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4); ARM_BX (code, ARMREG_IP); *((gpointer*)code) = NULL; code += 4; br_ret_index = 0; /* Branch between IN/OUT cases */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in)); ARM_CMP_REG_IMM8 (code, ARMREG_IP, 1); br_out = code; ARM_B_COND (code, ARMCOND_NE, 0); /* IN CASE */ /* LR == return marshalling type */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal)); /* Continue if no marshalling required */ ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_NONE); br_ret [br_ret_index ++] = code; ARM_B_COND (code, ARMCOND_EQ, 0); /* Compute vret area address in LR */ ARM_LDR_IMM (code, ARMREG_LR, fp, info_offset); ARM_LDR_IMM (code, ARMREG_LR, ARMREG_LR, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot)); /* The slot value is off by 4 */ ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4); ARM_SHL_IMM (code, ARMREG_LR, ARMREG_LR, 2); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_SP); /* Branch to specific marshalling code */ ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREG); br [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREGS); br [1] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_I1); br [2] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U1); br [3] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_I2); br [4] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U2); br [5] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R4); br [6] = code; ARM_B_COND (code, ARMCOND_EQ, 0); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R8); br [7] = code; ARM_B_COND (code, ARMCOND_EQ, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* IN IREG case */ arm_patch (br [0], code); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* IN IREGS case */ arm_patch (br [1], code); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, 0); ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, 4); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* I1 case */ arm_patch (br [2], code); ARM_LDRSB_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* U1 case */ arm_patch (br [3], code); ARM_LDRB_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* I2 case */ arm_patch (br [4], code); ARM_LDRSH_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* U2 case */ arm_patch (br [5], code); ARM_LDRH_IMM (code, ARMREG_R0, ARMREG_LR, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* R4 case */ arm_patch (br [6], code); ARM_FLDS (code, ARM_VFP_D0, ARMREG_LR, 0); code += 4; br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* R8 case */ arm_patch (br [7], code); ARM_FLDD (code, ARM_VFP_D0, ARMREG_LR, 0); code += 4; br_ret [br_ret_index ++] = code; ARM_B (code, 0); /* OUT CASE */ arm_patch (br_out, code); /* Marshal return value */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal)); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREGS); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT IREGS case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save both registers for simplicity */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 0); ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, 4); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_IREG); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT IREG case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_U1); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT U1 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_STRB_IMM (code, ARMREG_R0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R4); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT R4 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_FSTS (code, ARM_VFP_D0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); ARM_CMP_REG_IMM8 (code, ARMREG_IP, GSHAREDVT_RET_VFP_R8); br [0] = code; ARM_B_COND (code, ARMCOND_NE, 0); /* OUT R8 case */ /* Load vtype ret addr from the caller arg regs */ ARM_LDR_IMM (code, ARMREG_IP, fp, info_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg)); ARM_SHL_IMM (code, ARMREG_IP, ARMREG_IP, 2); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, fp); ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_IP, caller_reg_area_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); /* Save the return value to the buffer pointed to by the vret addr */ ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, 0); br_ret [br_ret_index ++] = code; ARM_B (code, 0); arm_patch (br [0], code); /* OUT other cases */ br_ret [br_ret_index ++] = code; ARM_B (code, 0); for (i = 0; i < br_ret_index; ++i) arm_patch (br_ret [i], code); /* Normal return */ /* Restore registers + stack */ ARM_MOV_REG_REG (code, ARMREG_SP, fp); ARM_LDM (code, fp, (1 << fp) | (1 << ARMREG_LR)); ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, cfa_offset); /* Return */ ARM_BX (code, ARMREG_LR); g_assert ((code - buf) < buf_len); if (info) *info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops); mono_arch_flush_icache (buf, code - buf); return buf; }