guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { guint8 *buf, *tramp, *code; int i, offset, lmfOffset; g_assert (!aot); if (info) *info = NULL; /* Now we'll create in 'buf' the S/390 trampoline code. This is the trampoline code common to all methods */ code = buf = mono_global_codeman_reserve(512); /*----------------------------------------------------------- STEP 0: First create a non-standard function prologue with a stack size big enough to save our registers. -----------------------------------------------------------*/ s390_stmg (buf, s390_r6, s390_r15, STK_BASE, S390_REG_SAVE_OFFSET); s390_lgr (buf, s390_r11, s390_r15); s390_aghi (buf, STK_BASE, -CREATE_STACK_SIZE); s390_stg (buf, s390_r11, 0, STK_BASE, 0); s390_stg (buf, s390_r1, 0, STK_BASE, METHOD_SAVE_OFFSET); s390_stmg (buf, s390_r2, s390_r5, STK_BASE, CREATE_GR_OFFSET); /* Save the FP registers */ offset = CREATE_FP_OFFSET; for (i = s390_f0; i <= s390_f15; ++i) { s390_std (buf, i, 0, STK_BASE, offset); offset += 8; } /*---------------------------------------------------------- STEP 1: call 'mono_get_lmf_addr()' to get the address of our LMF. We'll need to restore it after the call to 's390_magic_trampoline' and before the call to the native method. ----------------------------------------------------------*/ s390_basr (buf, s390_r13, 0); s390_j (buf, 6); s390_llong(buf, mono_get_lmf_addr); s390_lg (buf, s390_r1, 0, s390_r13, 4); s390_basr (buf, s390_r14, s390_r1); /*---------------------------------------------------------------*/ /* we build the MonoLMF structure on the stack - see mini-s390.h */ /* Keep in sync with the code in mono_arch_emit_prolog */ /*---------------------------------------------------------------*/ lmfOffset = CREATE_STACK_SIZE - sizeof(MonoLMF); s390_lgr (buf, s390_r13, STK_BASE); s390_aghi (buf, s390_r13, lmfOffset); /*---------------------------------------------------------------*/ /* Set lmf.lmf_addr = jit_tls->lmf */ /*---------------------------------------------------------------*/ s390_stg (buf, s390_r2, 0, s390_r13, G_STRUCT_OFFSET(MonoLMF, lmf_addr)); /*---------------------------------------------------------------*/ /* Get current lmf */ /*---------------------------------------------------------------*/ s390_lg (buf, s390_r0, 0, s390_r2, 0); /*---------------------------------------------------------------*/ /* Set our lmf as the current lmf */ /*---------------------------------------------------------------*/ s390_stg (buf, s390_r13, 0, s390_r2, 0); /*---------------------------------------------------------------*/ /* Have our lmf.previous_lmf point to the last lmf */ /*---------------------------------------------------------------*/ s390_stg (buf, s390_r0, 0, s390_r13, G_STRUCT_OFFSET(MonoLMF, previous_lmf)); /*---------------------------------------------------------------*/ /* save method info */ /*---------------------------------------------------------------*/ s390_lg (buf, s390_r1, 0, STK_BASE, METHOD_SAVE_OFFSET); s390_stg (buf, s390_r1, 0, s390_r13, G_STRUCT_OFFSET(MonoLMF, method)); /*---------------------------------------------------------------*/ /* save the current SP */ /*---------------------------------------------------------------*/ s390_lg (buf, s390_r1, 0, STK_BASE, 0); s390_stg (buf, s390_r1, 0, s390_r13, G_STRUCT_OFFSET(MonoLMF, ebp)); /*---------------------------------------------------------------*/ /* save the current IP */ /*---------------------------------------------------------------*/ if (tramp_type == MONO_TRAMPOLINE_JUMP) { s390_lghi (buf, s390_r1, 0); } else { s390_lg (buf, s390_r1, 0, s390_r1, S390_RET_ADDR_OFFSET); // s390_la (buf, s390_r1, 0, s390_r1, 0); } s390_stg (buf, s390_r1, 0, s390_r13, G_STRUCT_OFFSET(MonoLMF, eip)); /*---------------------------------------------------------------*/ /* Save general and floating point registers */ /*---------------------------------------------------------------*/ s390_mvc (buf, 4*sizeof(gulong), s390_r13, G_STRUCT_OFFSET(MonoLMF, gregs[2]), STK_BASE, CREATE_GR_OFFSET); s390_mvc (buf, 10*sizeof(gulong), s390_r13, G_STRUCT_OFFSET(MonoLMF, gregs[6]), s390_r11, S390_REG_SAVE_OFFSET); /* Simply copy fpregs already saved above */ s390_mvc (buf, 16*sizeof(double), s390_r13, G_STRUCT_OFFSET(MonoLMF, fregs[0]), STK_BASE, CREATE_FP_OFFSET); /*---------------------------------------------------------------*/ /* STEP 2: call the C trampoline function */ /*---------------------------------------------------------------*/ /* Set arguments */ /* Arg 1: mgreg_t *regs. We pass sp instead */ s390_la (buf, s390_r2, 0, STK_BASE, CREATE_STACK_SIZE); /* Arg 2: code (next address to the instruction that called us) */ if (tramp_type == MONO_TRAMPOLINE_JUMP) { s390_lghi (buf, s390_r3, 0); } else { s390_lg (buf, s390_r3, 0, s390_r11, S390_RET_ADDR_OFFSET); } /* Arg 3: MonoMethod *method. It was put in r1 by the method-specific trampoline code, and then saved before the call to mono_get_lmf_addr()'. */ s390_lg (buf, s390_r4, 0, STK_BASE, METHOD_SAVE_OFFSET); /* Arg 4: trampoline address. Ignore for now */ /* Calculate call address and call the C trampoline. Return value will be in r2 */ s390_basr (buf, s390_r13, 0); s390_j (buf, 6); tramp = (guint8*)mono_get_trampoline_func (tramp_type); s390_llong (buf, tramp); s390_lg (buf, s390_r1, 0, s390_r13, 4); s390_basr (buf, s390_r14, s390_r1); /* OK, code address is now on r2. Move it to r1, so that we can restore r2 and use it from r1 later */ s390_lgr (buf, s390_r1, s390_r2); /*---------------------------------------------------------- STEP 3: Restore the LMF ----------------------------------------------------------*/ restoreLMF(buf, STK_BASE, CREATE_STACK_SIZE); /*---------------------------------------------------------- STEP 4: call the compiled method ----------------------------------------------------------*/ /* Restore registers */ s390_lmg (buf, s390_r2, s390_r5, STK_BASE, CREATE_GR_OFFSET); /* Restore the FP registers */ offset = CREATE_FP_OFFSET; for (i = s390_f0; i <= s390_f15; ++i) { s390_ld (buf, i, 0, STK_BASE, offset); offset += 8; } /* Restore stack pointer and jump to the code - R14 contains the return address to our caller */ s390_lgr (buf, STK_BASE, s390_r11); s390_lmg (buf, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT || tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) s390_br (buf, s390_r14); else s390_br (buf, s390_r1); /* Flush instruction cache, since we've generated code */ mono_arch_flush_icache (code, buf - code); /* Sanity check */ g_assert ((buf - code) <= 512); return code; }
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { guint8 *buf, *tramp; int i, offset, saved_regs_offset, saved_fpregs_offset, last_offset, framesize; int in0, local0, out0, l0, l1, l2, l3, l4, l5, l6, l7, l8, o0, o1, o2, o3; gboolean has_caller; Ia64CodegenState code; unw_dyn_info_t *di; unw_dyn_region_info_t *r_pro; g_assert (!aot); *info = NULL; /* * Since jump trampolines are not patched, this trampoline is executed every * time a call is made to a jump trampoline. So we try to keep things faster * in that case. */ if (tramp_type == MONO_TRAMPOLINE_JUMP) has_caller = FALSE; else has_caller = TRUE; buf = mono_global_codeman_reserve (2048); ia64_codegen_init (code, buf); /* Stacked Registers */ in0 = 32; local0 = in0 + 8; out0 = local0 + 16; l0 = 40; l1 = 41; l2 = 42; l3 = 43; l4 = 44; l5 = 45; /* saved ar.pfs */ l6 = 46; /* arg */ l7 = 47; /* code */ l8 = 48; /* saved sp */ o0 = out0 + 0; /* regs */ o1 = out0 + 1; /* code */ o2 = out0 + 2; /* arg */ o3 = out0 + 3; /* tramp */ framesize = (128 * 8) + 1024; framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); /* * Allocate a new register+memory stack frame. * 8 input registers (the max used by the ABI) * 16 locals * 4 output (number of parameters passed to trampoline) */ ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + l5); ia64_alloc (code, l5, local0 - in0, out0 - local0, 4, 0); ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + l8); ia64_mov (code, l8, IA64_SP); ia64_adds_imm (code, IA64_SP, (-framesize), IA64_SP); offset = 16; /* scratch area */ /* Save the argument received from the specific trampoline */ ia64_mov (code, l6, GP_SCRATCH_REG); /* Save the calling address */ ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 7); ia64_mov_from_br (code, l7, IA64_B0); /* Create unwind info for the prolog */ ia64_begin_bundle (code); r_pro = mono_ia64_create_unwind_region (&code); /* Save registers */ /* Not needed for jump trampolines */ if (tramp_type != MONO_TRAMPOLINE_JUMP) { saved_regs_offset = offset; offset += 128 * 8; /* * Only the registers which are needed for computing vtable slots need * to be saved. */ last_offset = -1; for (i = 0; i < 64; ++i) if ((1 << i) & MONO_ARCH_CALLEE_REGS) { if (last_offset != i * 8) ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP); ia64_st8_spill_inc_imm_hint (code, l1, i, 8, 0); last_offset = (i + 1) * 8; } } /* Save fp registers */ saved_fpregs_offset = offset; offset += 8 * 8; ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP); for (i = 0; i < 8; ++i) ia64_stfd_inc_imm_hint (code, l1, i + 8, 8, 0); g_assert (offset < framesize); /* Arg1 is the pointer to the saved registers */ ia64_adds_imm (code, o0, saved_regs_offset, IA64_SP); /* Arg2 is the address of the calling code */ if (has_caller) ia64_mov (code, o1, l7); else ia64_mov (code, o1, 0); /* Arg3 is the method/vtable ptr */ ia64_mov (code, o2, l6); /* Arg4 is the trampoline address */ /* FIXME: */ ia64_mov (code, o3, 0); tramp = (guint8*)mono_get_trampoline_func (tramp_type); /* Call the trampoline using an indirect call */ ia64_movl (code, l0, tramp); ia64_ld8_inc_imm (code, l1, l0, 8); ia64_mov_to_br (code, IA64_B6, l1); ia64_ld8 (code, IA64_GP, l0); ia64_br_call_reg (code, 0, IA64_B6); /* Check for thread interruption */ /* This is not perf critical code so no need to check the interrupt flag */ ia64_mov (code, l2, IA64_R8); tramp = (guint8*)mono_thread_force_interruption_checkpoint; ia64_movl (code, l0, tramp); ia64_ld8_inc_imm (code, l1, l0, 8); ia64_mov_to_br (code, IA64_B6, l1); ia64_ld8 (code, IA64_GP, l0); ia64_br_call_reg (code, 0, IA64_B6); ia64_mov (code, IA64_R8, l2); /* Restore fp regs */ ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP); for (i = 0; i < 8; ++i) ia64_ldfd_inc_imm (code, i + 8, l1, 8); /* FIXME: Handle NATs in fp regs / scratch regs */ if (tramp_type != MONO_TRAMPOLINE_CLASS_INIT) { /* Load method address from function descriptor */ ia64_ld8 (code, l0, IA64_R8); ia64_mov_to_br (code, IA64_B6, l0); } /* Clean up register/memory stack frame */ ia64_adds_imm (code, IA64_SP, framesize, IA64_SP); ia64_mov_to_ar_i (code, IA64_PFS, l5); if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) { ia64_mov_ret_to_br (code, IA64_B0, l7); ia64_br_ret_reg (code, IA64_B0); } else { /* Call the compiled method */ ia64_mov_to_br (code, IA64_B0, l7); ia64_br_cond_reg (code, IA64_B6); } ia64_codegen_close (code); g_assert ((code.buf - buf) <= 2048); /* FIXME: emit unwind info for epilog */ di = g_malloc0 (sizeof (unw_dyn_info_t)); di->start_ip = (unw_word_t) buf; di->end_ip = (unw_word_t) code.buf; di->gp = 0; di->format = UNW_INFO_FORMAT_DYNAMIC; di->u.pi.name_ptr = (unw_word_t)"ia64_generic_trampoline"; di->u.pi.regions = r_pro; _U_dyn_register (di); mono_arch_flush_icache (buf, code.buf - buf); return buf; }
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { char *tramp_name; guint8 *buf, *tramp, *code; int i, offset, has_caller; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; g_assert (!aot); /* Now we'll create in 'buf' the S/390 trampoline code. This is the trampoline code common to all methods */ code = buf = mono_global_codeman_reserve(512); if ((tramp_type == MONO_TRAMPOLINE_JUMP) || (tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)) has_caller = 0; else has_caller = 1; /*----------------------------------------------------------- STEP 0: First create a non-standard function prologue with a stack size big enough to save our registers. -----------------------------------------------------------*/ s390_stmg (buf, s390_r6, s390_r15, STK_BASE, S390_REG_SAVE_OFFSET); s390_lgr (buf, s390_r11, s390_r15); s390_aghi (buf, STK_BASE, -sizeof(trampStack_t)); s390_stg (buf, s390_r11, 0, STK_BASE, 0); /*---------------------------------------------------------------*/ /* we build the MonoLMF structure on the stack - see mini-s390.h */ /* Keep in sync with the code in mono_arch_emit_prolog */ /*---------------------------------------------------------------*/ s390_lgr (buf, LMFReg, STK_BASE); s390_aghi (buf, LMFReg, G_STRUCT_OFFSET(trampStack_t, LMF)); /*---------------------------------------------------------------*/ /* Save general and floating point registers in LMF */ /*---------------------------------------------------------------*/ s390_stmg (buf, s390_r0, s390_r1, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[0])); s390_stmg (buf, s390_r2, s390_r5, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[2])); s390_mvc (buf, 10*sizeof(gulong), LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[6]), s390_r11, S390_REG_SAVE_OFFSET); offset = G_STRUCT_OFFSET(MonoLMF, fregs[0]); for (i = s390_f0; i <= s390_f15; ++i) { s390_std (buf, i, 0, LMFReg, offset); offset += sizeof(gdouble); } /*---------------------------------------------------------- STEP 1: call 'mono_get_lmf_addr()' to get the address of our LMF. We'll need to restore it after the call to 's390_magic_trampoline' and before the call to the native method. ----------------------------------------------------------*/ S390_SET (buf, s390_r1, mono_get_lmf_addr); s390_basr (buf, s390_r14, s390_r1); /*---------------------------------------------------------------*/ /* Set lmf.lmf_addr = jit_tls->lmf */ /*---------------------------------------------------------------*/ s390_stg (buf, s390_r2, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, lmf_addr)); /*---------------------------------------------------------------*/ /* Get current lmf */ /*---------------------------------------------------------------*/ s390_lg (buf, s390_r0, 0, s390_r2, 0); /*---------------------------------------------------------------*/ /* Set our lmf as the current lmf */ /*---------------------------------------------------------------*/ s390_stg (buf, LMFReg, 0, s390_r2, 0); /*---------------------------------------------------------------*/ /* Have our lmf.previous_lmf point to the last lmf */ /*---------------------------------------------------------------*/ s390_stg (buf, s390_r0, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, previous_lmf)); /*---------------------------------------------------------------*/ /* save method info */ /*---------------------------------------------------------------*/ s390_lg (buf, s390_r1, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[1])); s390_stg (buf, s390_r1, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, method)); /*---------------------------------------------------------------*/ /* save the current SP */ /*---------------------------------------------------------------*/ s390_lg (buf, s390_r1, 0, STK_BASE, 0); s390_stg (buf, s390_r1, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, ebp)); /*---------------------------------------------------------------*/ /* save the current IP */ /*---------------------------------------------------------------*/ if (has_caller) { s390_lg (buf, s390_r1, 0, s390_r1, S390_RET_ADDR_OFFSET); } else { s390_lghi (buf, s390_r1, 0); } s390_stg (buf, s390_r1, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, eip)); /*---------------------------------------------------------------*/ /* STEP 2: call the C trampoline function */ /*---------------------------------------------------------------*/ /* Set arguments */ /* Arg 1: mgreg_t *regs */ s390_la (buf, s390_r2, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[0])); /* Arg 2: code (next address to the instruction that called us) */ if (has_caller) { s390_lg (buf, s390_r3, 0, s390_r11, S390_RET_ADDR_OFFSET); } else { s390_lghi (buf, s390_r3, 0); } /* Arg 3: Trampoline argument */ if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) s390_lg (buf, s390_r4, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[MONO_ARCH_VTABLE_REG])); else s390_lg (buf, s390_r4, 0, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[1])); /* Arg 4: trampoline address. */ S390_SET (buf, s390_r5, buf); /* Calculate call address and call the C trampoline. Return value will be in r2 */ tramp = (guint8*)mono_get_trampoline_func (tramp_type); S390_SET (buf, s390_r1, tramp); s390_basr (buf, s390_r14, s390_r1); /* OK, code address is now on r2. Move it to r1, so that we can restore r2 and use it from r1 later */ s390_lgr (buf, s390_r1, s390_r2); /*---------------------------------------------------------- STEP 3: Restore the LMF ----------------------------------------------------------*/ restoreLMF(buf, STK_BASE, sizeof(trampStack_t)); /*---------------------------------------------------------- STEP 4: call the compiled method ----------------------------------------------------------*/ /* Restore parameter registers */ s390_lmg (buf, s390_r2, s390_r5, LMFReg, G_STRUCT_OFFSET(MonoLMF, gregs[2])); /* Restore the FP registers */ offset = G_STRUCT_OFFSET(MonoLMF, fregs[0]); for (i = s390_f0; i <= s390_f15; ++i) { s390_ld (buf, i, 0, LMFReg, offset); offset += sizeof(gdouble); } /* Restore stack pointer and jump to the code - * R14 contains the return address to our caller */ s390_lgr (buf, STK_BASE, s390_r11); s390_lmg (buf, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); if (MONO_TRAMPOLINE_TYPE_MUST_RETURN(tramp_type)) { s390_lgr (buf, s390_r2, s390_r1); s390_br (buf, s390_r14); } else { s390_br (buf, s390_r1); } /* Flush instruction cache, since we've generated code */ mono_arch_flush_icache (code, buf - code); mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL); g_assert (info); tramp_name = mono_get_generic_trampoline_name (tramp_type); *info = mono_tramp_info_create (tramp_name, buf, buf - code, ji, unwind_ops); g_free (tramp_name); /* Sanity check */ g_assert ((buf - code) <= 512); return code; }
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { guint8 *buf, *code, *tramp_addr; guint32 lmf_offset, regs_offset, method_reg, i; gboolean has_caller; g_assert (!aot); *info = NULL; if (tramp_type == MONO_TRAMPOLINE_JUMP) has_caller = FALSE; else has_caller = TRUE; code = buf = mono_global_codeman_reserve (1024); sparc_save_imm (code, sparc_sp, -1608, sparc_sp); #ifdef SPARCV9 method_reg = sparc_g4; #else method_reg = sparc_g1; #endif regs_offset = MONO_SPARC_STACK_BIAS + 1000; /* Save r1 needed by the IMT code */ sparc_sti_imm (code, sparc_g1, sparc_sp, regs_offset + (sparc_g1 * sizeof (gpointer))); /* * sparc_g5 contains the return address, the trampoline argument is stored in the * instruction stream after the call. */ sparc_ld_imm (code, sparc_g5, 8, method_reg); #ifdef SPARCV9 /* Save fp regs since they are not preserved by calls */ for (i = 0; i < 16; i ++) sparc_stdf_imm (code, sparc_f0 + (i * 2), sparc_sp, MONO_SPARC_STACK_BIAS + 320 + (i * 8)); #endif /* We receive the method address in %r1, so save it here */ sparc_sti_imm (code, method_reg, sparc_sp, MONO_SPARC_STACK_BIAS + 200); /* Save lmf since compilation can raise exceptions */ lmf_offset = MONO_SPARC_STACK_BIAS - sizeof (MonoLMF); /* Save the data for the parent (managed) frame */ /* Save ip */ sparc_sti_imm (code, sparc_i7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip)); /* Save sp */ sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp)); /* Save fp */ /* Load previous fp from the saved register window */ sparc_flushw (code); sparc_ldi_imm (code, sparc_fp, MONO_SPARC_STACK_BIAS + (sparc_i6 - 16) * sizeof (gpointer), sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp)); /* Save method */ sparc_sti_imm (code, method_reg, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method)); sparc_set (code, mono_get_lmf_addr, sparc_o7); sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); sparc_nop (code); code = mono_sparc_emit_save_lmf (code, lmf_offset); if (has_caller) { /* Load all registers of the caller into a table inside this frame */ /* first the out registers */ for (i = 0; i < 8; ++i) sparc_sti_imm (code, sparc_i0 + i, sparc_sp, regs_offset + ((sparc_o0 + i) * sizeof (gpointer))); /* then the in+local registers */ for (i = 0; i < 16; i ++) { sparc_ldi_imm (code, sparc_fp, MONO_SPARC_STACK_BIAS + (i * sizeof (gpointer)), sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_sp, regs_offset + ((sparc_l0 + i) * sizeof (gpointer))); } } tramp_addr = mono_get_trampoline_func (tramp_type); sparc_ldi_imm (code, sparc_sp, MONO_SPARC_STACK_BIAS + 200, sparc_o2); /* pass address of register table as third argument */ sparc_add_imm (code, FALSE, sparc_sp, regs_offset, sparc_o0); sparc_set (code, tramp_addr, sparc_o7); /* set %o1 to caller address */ if (has_caller) sparc_mov_reg_reg (code, sparc_i7, sparc_o1); else sparc_set (code, 0, sparc_o1); sparc_set (code, 0, sparc_o3); sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); sparc_nop (code); /* Save result */ sparc_sti_imm (code, sparc_o0, sparc_sp, MONO_SPARC_STACK_BIAS + 304); /* Check for thread interruption */ sparc_set (code, (guint8*)mono_thread_force_interruption_checkpoint, sparc_o7); sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); sparc_nop (code); /* Restore lmf */ code = mono_sparc_emit_restore_lmf (code, lmf_offset); /* Reload result */ sparc_ldi_imm (code, sparc_sp, MONO_SPARC_STACK_BIAS + 304, sparc_o0); #ifdef SPARCV9 /* Reload fp regs */ for (i = 0; i < 16; i ++) sparc_lddf_imm (code, sparc_sp, MONO_SPARC_STACK_BIAS + 320 + (i * 8), sparc_f0 + (i * 2)); #endif sparc_jmpl (code, sparc_o0, sparc_g0, sparc_g0); /* restore previous frame in delay slot */ sparc_restore_simple (code); /* { gpointer addr; sparc_save_imm (code, sparc_sp, -608, sparc_sp); addr = code; sparc_call_simple (code, 16); sparc_nop (code); sparc_rett_simple (code); sparc_nop (code); sparc_save_imm (code, sparc_sp, -608, sparc_sp); sparc_ta (code, 1); tramp_addr = &sparc_magic_trampoline; sparc_call_simple (code, tramp_addr - code); sparc_nop (code); sparc_rett_simple (code); sparc_nop (code); } */ g_assert ((code - buf) <= 512); mono_arch_flush_icache (buf, code - buf); return buf; }
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { const char *tramp_name; guint8 *buf, *code, *tramp, *br_ex_check; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; int i, offset, frame_size, regarray_offset, lmf_offset, caller_ip_offset, arg_offset; int cfa_offset; /* cfa = cfa_reg + cfa_offset */ code = buf = mono_global_codeman_reserve (256); /* Note that there is a single argument to the trampoline * and it is stored at: esp + pushed_args * sizeof (target_mgreg_t) * the ret address is at: esp + (pushed_args + 1) * sizeof (target_mgreg_t) */ /* Compute frame offsets relative to the frame pointer %ebp */ arg_offset = sizeof (target_mgreg_t); caller_ip_offset = 2 * sizeof (target_mgreg_t); offset = 0; offset += sizeof (MonoLMF); lmf_offset = -offset; offset += X86_NREG * sizeof (target_mgreg_t); regarray_offset = -offset; /* Argument area */ offset += 4 * sizeof (target_mgreg_t); frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); /* ret addr and arg are on the stack */ cfa_offset = 2 * sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset); // IP saved at CFA - 4 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -4); /* Allocate frame */ x86_push_reg (code, X86_EBP); cfa_offset += sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, -cfa_offset); x86_mov_reg_reg (code, X86_EBP, X86_ESP); mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP); /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, frame_size + sizeof (target_mgreg_t)); /* Save all registers */ for (i = X86_EAX; i <= X86_EDI; ++i) { int reg = i; if (i == X86_EBP) { /* Save original ebp */ /* EAX is already saved */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t)); reg = X86_EAX; } else if (i == X86_ESP) { /* Save original esp */ /* EAX is already saved */ x86_mov_reg_reg (code, X86_EAX, X86_EBP); /* Saved ebp + trampoline arg + return addr */ x86_alu_reg_imm (code, X86_ADD, X86_EAX, 3 * sizeof (target_mgreg_t)); reg = X86_EAX; } x86_mov_membase_reg (code, X86_EBP, regarray_offset + (i * sizeof (target_mgreg_t)), reg, sizeof (target_mgreg_t)); } /* Setup LMF */ /* eip */ if (tramp_type == MONO_TRAMPOLINE_JUMP) { x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), 0, sizeof (target_mgreg_t)); } else { x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (target_mgreg_t)); } /* method */ if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) { x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), X86_EAX, sizeof (target_mgreg_t)); } else { x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof (target_mgreg_t)); } /* esp */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esp), X86_EAX, sizeof (target_mgreg_t)); /* callee save registers */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBX * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EDI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), X86_EAX, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), X86_EAX, sizeof (target_mgreg_t)); /* Push LMF */ /* get the address of lmf for the current thread */ if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr"); x86_call_reg (code, X86_EAX); } else { x86_call_code (code, mono_get_lmf_addr); } /* lmf->lmf_addr = lmf_addr (%eax) */ x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), X86_EAX, sizeof (target_mgreg_t)); /* lmf->previous_lmf = *(lmf_addr) */ x86_mov_reg_membase (code, X86_ECX, X86_EAX, 0, sizeof (target_mgreg_t)); /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */ x86_alu_reg_imm (code, X86_ADD, X86_ECX, 1); x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), X86_ECX, sizeof (target_mgreg_t)); /* *lmf_addr = lmf */ x86_lea_membase (code, X86_ECX, X86_EBP, lmf_offset); x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t)); /* Call trampoline function */ /* Arg 1 - registers */ x86_lea_membase (code, X86_EAX, X86_EBP, regarray_offset); x86_mov_membase_reg (code, X86_ESP, (0 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t)); /* Arg2 - calling code */ if (tramp_type == MONO_TRAMPOLINE_JUMP) { x86_mov_membase_imm (code, X86_ESP, (1 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t)); } else { x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, (1 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t)); } /* Arg3 - trampoline argument */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, X86_ESP, (2 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t)); /* Arg4 - trampoline address */ // FIXME: x86_mov_membase_imm (code, X86_ESP, (3 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t)); #ifdef __APPLE__ /* check the stack is aligned after the ret ip is pushed */ /* x86_mov_reg_reg (code, X86_EDX, X86_ESP); x86_alu_reg_imm (code, X86_AND, X86_EDX, 15); x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0); x86_branch_disp (code, X86_CC_Z, 3, FALSE); x86_breakpoint (code); */ #endif if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_TRAMPOLINE_FUNC_ADDR, GINT_TO_POINTER (tramp_type)); x86_call_reg (code, X86_EAX); } else { tramp = (guint8*)mono_get_trampoline_func (tramp_type); x86_call_code (code, tramp); } /* * Overwrite the trampoline argument with the address we need to jump to, * to free %eax. */ x86_mov_membase_reg (code, X86_EBP, arg_offset, X86_EAX, 4); /* Restore LMF */ x86_mov_reg_membase (code, X86_EAX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof (target_mgreg_t)); x86_alu_reg_imm (code, X86_SUB, X86_ECX, 1); x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t)); /* Check for interruptions */ if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise"); x86_call_reg (code, X86_EAX); } else { x86_call_code (code, (guint8*)mono_thread_force_interruption_checkpoint_noraise); } x86_test_reg_reg (code, X86_EAX, X86_EAX); br_ex_check = code; x86_branch8 (code, X86_CC_Z, -1, 1); /* * Exception case: * We have an exception we want to throw in the caller's frame, so pop * the trampoline frame and throw from the caller. */ x86_leave (code); /* * The exception is in eax. * We are calling the throw trampoline used by OP_THROW, so we have to setup the * stack to look the same. * The stack contains the ret addr, and the trampoline argument, the throw trampoline * expects it to contain the ret addr and the exception. It also needs to be aligned * after the exception is pushed. */ /* Align stack */ x86_push_reg (code, X86_EAX); /* Push the exception */ x86_push_reg (code, X86_EAX); //x86_breakpoint (code); /* Push the original return value */ x86_push_membase (code, X86_ESP, 3 * 4); /* * EH is initialized after trampolines, so get the address of the variable * which contains throw_exception, and load it from there. */ if (aot) { /* Not really a jit icall */ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "rethrow_preserve_exception_addr"); } else { x86_mov_reg_imm (code, X86_ECX, (guint8*)mono_get_rethrow_preserve_exception_addr ()); } x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof (target_mgreg_t)); x86_jump_reg (code, X86_ECX); /* Normal case */ mono_x86_patch (br_ex_check, code); /* Restore registers */ for (i = X86_EAX; i <= X86_EDI; ++i) { if (i == X86_ESP || i == X86_EBP) continue; if (i == X86_EAX && tramp_type != MONO_TRAMPOLINE_AOT_PLT) continue; x86_mov_reg_membase (code, i, X86_EBP, regarray_offset + (i * 4), 4); } /* Restore frame */ x86_leave (code); cfa_offset -= sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset); mono_add_unwind_op_same_value (unwind_ops, code, buf, X86_EBP); if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) { /* Load the value returned by the trampoline */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 0, 4); /* The trampoline returns normally, pop the trampoline argument */ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); cfa_offset -= sizeof (target_mgreg_t); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset); x86_ret (code); } else { x86_ret (code); } g_assert ((code - buf) <= 256); MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL)); tramp_name = mono_get_generic_trampoline_name (tramp_type); *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops); return buf; }