Esempio n. 1
0
/*
 * Check a 2D array access operation for exception conditions.
 */
static void Check2DArrayAccess(MDUnroll *unroll, int reg, int reg2, int reg3,
							   unsigned char *pc, unsigned char *label)
{
#ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS
	unsigned char *patch1;
#endif
	unsigned char *patch2;
	unsigned char *patch3;

#ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS
	/* Check the array reference against NULL */
	x86_alu_reg_reg(unroll->out, X86_OR, reg, reg);
	patch1 = unroll->out;
	x86_branch8(unroll->out, X86_CC_EQ, 0, 0);
#endif

	/* Check the array bounds */
	x86_alu_reg_membase(unroll->out, X86_SUB, reg2, reg, 12);
	x86_alu_reg_membase(unroll->out, X86_CMP, reg2, reg, 16);
	patch2 = unroll->out;
	x86_branch32(unroll->out, X86_CC_LT, 0, 0);
	x86_alu_reg_membase(unroll->out, X86_ADD, reg2, reg, 12);
	patch3 = unroll->out;
	x86_jump8(unroll->out, 0);
	x86_patch(patch2, unroll->out);
	x86_alu_reg_membase(unroll->out, X86_SUB, reg3, reg, 24);
	x86_alu_reg_membase(unroll->out, X86_CMP, reg3, reg, 28);
	patch2 = unroll->out;
	x86_branch32(unroll->out, X86_CC_LT, 0, 0);
	x86_alu_reg_membase(unroll->out, X86_ADD, reg2, reg, 12);
	x86_alu_reg_membase(unroll->out, X86_ADD, reg3, reg, 28);

	/* Re-execute the current instruction in the interpreter */
#ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS
	x86_patch(patch1, unroll->out);
#endif
	x86_patch(patch3, unroll->out);
	ReExecute(unroll, pc, label);

	/* Compute the address of the array element */
	x86_patch(patch2, unroll->out);
	x86_imul_reg_membase(unroll->out, reg2, reg, 20);
	x86_imul_reg_membase(unroll->out, reg3, reg, 32);
	x86_alu_reg_reg(unroll->out, X86_ADD, reg2, reg3);
	x86_imul_reg_membase(unroll->out, reg2, reg, 4);
	x86_mov_reg_membase(unroll->out, reg, reg, 8, 4);
	x86_alu_reg_reg(unroll->out, X86_ADD, reg, reg2);
}
Esempio n. 2
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_x86_throw_exception/
 * mono_x86_throw_corlib_exception.
 * If LLVM is true, generate code which assumes the caller is LLVM generated code, 
 * which doesn't push the arguments.
 */
static guint8*
get_throw_trampoline (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, MonoTrampInfo **info, gboolean aot)
{
	guint8 *start, *code, *labels [16];
	int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	guint kMaxCodeSize = 192;

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	stack_size = 128;

	/* 
	 * On apple, the stack is misaligned by the pushing of the return address.
	 */
	if (!llvm && corlib)
		/* On OSX, we don't generate alignment code to save space */
		stack_size += 4;
	else
		stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;

	/*
	 * The stack looks like this:
	 * <pc offset> (only if corlib is TRUE)
	 * <exception object>/<type token>
	 * <return addr> <- esp (unaligned on apple)
	 */

	unwind_ops = mono_arch_get_cie_program ();

	/* Alloc frame */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);

	arg_offsets [0] = 0;
	arg_offsets [1] = 4;
	arg_offsets [2] = 8;
	arg_offsets [3] = 12;
	regs_offset = 16;

	/* Save registers */
	for (i = 0; i < X86_NREG; ++i)
		if (i != X86_ESP)
			x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
	/* Calculate the offset between the current sp and the sp of the caller */
	if (llvm) {
		/* LLVM doesn't push the arguments */
		stack_offset = stack_size + 4;
	} else {
		if (corlib) {
			/* Two arguments */
			stack_offset = stack_size + 4 + 8;
#ifdef __APPLE__
			/* We don't generate stack alignment code on osx to save space */
#endif
		} else {
			/* One argument + stack alignment */
			stack_offset = stack_size + 4 + 4;
#ifdef __APPLE__
			/* Pop the alignment added by OP_THROW too */
			stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#else
			if (mono_do_x86_stack_align)
				stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#endif
		}
	}
	/* Save ESP */
	x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
	x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);

	/* Clear fp stack */
	labels [0] = code;
	x86_fnstsw (code);
	x86_shift_reg_imm (code, X86_SHR, X86_EAX, 11);
	x86_alu_reg_imm (code, X86_AND, X86_EAX, 7);
	x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
	labels [1] = code;
	x86_branch8 (code, X86_CC_EQ, 0, FALSE);
	x86_fstp (code, 0);
	x86_jump_code (code, labels [0]);
	mono_x86_patch (labels [1], code);

	/* Set arg1 == regs */
	x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		x86_mov_reg_imm (code, X86_EAX, 0);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
	/* Set arg3 == eip */
	if (llvm_abs)
		x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
	/* Set arg4 == rethrow/pc_offset */
	if (resume_unwind) {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], 0, 4);
	} else if (corlib) {
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
		if (llvm_abs) {
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'ip' and passing the negated abs address as
			 * the pc offset.
			 */
			x86_neg_reg (code, X86_EAX);
		}
		x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
	} else {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
	}
	/* Make the call */
	if (aot) {
		// This can be called from runtime code, which can't guarantee that
		// ebx contains the got address.
		// So emit the got address loading code too
		code = mono_arch_emit_load_got_addr (start, code, NULL, &ji);
		code = mono_arch_emit_load_aotconst (start, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_x86_throw_corlib_exception" : "mono_x86_throw_exception");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, resume_unwind ? (gpointer)(mono_x86_resume_unwind) : (corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception));
	}
	x86_breakpoint (code);

	g_assert ((code - start) < kMaxCodeSize);

	if (info)
		*info = mono_tramp_info_create (name, start, code - start, ji, unwind_ops);
	else {
		GSList *l;

		for (l = unwind_ops; l; l = l->next)
			g_free (l->data);
		g_slist_free (unwind_ops);
	}

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	return start;
}
Esempio n. 3
0
gpointer
mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *buf;
	int buf_len, cfa_offset;
	GSList *unwind_ops = NULL;
	MonoJumpInfo *ji = NULL;
	guint8 *br_out, *br [16];
	int info_offset, mrgctx_offset;

	buf_len = 320;
	buf = code = mono_global_codeman_reserve (buf_len);

	/*
	 * This trampoline is responsible for marshalling calls between normal code and gsharedvt code. The
	 * caller is a normal or gshared method which uses the signature of the inflated method to make the call, while
	 * the callee is a gsharedvt method which has a signature which uses valuetypes in place of type parameters, i.e.
	 * caller:
	 * foo<bool> (bool b)
	 * callee:
	 * T=<type used to represent vtype type arguments, currently TypedByRef>
	 * foo<T> (T b)
	 * The trampoline is responsible for marshalling the arguments and marshalling the result back. To simplify
	 * things, we create our own stack frame, and do most of the work in a C function, which receives a
	 * GSharedVtCallInfo structure as an argument. The structure should contain information to execute the C function to
	 * be as fast as possible. The argument is received in EAX from a gsharedvt trampoline. So the real
	 * call sequence looks like this:
	 * caller -> gsharedvt trampoline -> gsharevt in trampoline -> start_gsharedvt_call
	 * FIXME: Optimize this.
	 */

	cfa_offset = sizeof (gpointer);
	mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);
	x86_push_reg (code, X86_EBP);
	cfa_offset += sizeof (gpointer);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);
	x86_mov_reg_reg (code, X86_EBP, X86_ESP, sizeof (gpointer));
	mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
	/* Alloc stack frame/align stack */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
	info_offset = -4;
	mrgctx_offset = - 8;
	/* The info struct is put into EAX by the gsharedvt trampoline */
	/* Save info struct addr */
	x86_mov_membase_reg (code, X86_EBP, info_offset, X86_EAX, 4);
	/* Save rgctx */
	x86_mov_membase_reg (code, X86_EBP, mrgctx_offset, MONO_ARCH_RGCTX_REG, 4);

	/* Allocate stack area used to pass arguments to the method */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), sizeof (gpointer));
	x86_alu_reg_reg (code, X86_SUB, X86_ESP, X86_EAX);

#if 0
	/* Stack alignment check */
	x86_mov_reg_reg (code, X86_ECX, X86_ESP, 4);
	x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
	x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
	x86_breakpoint (code);
#endif

	/* ecx = caller argument area */
	x86_mov_reg_reg (code, X86_ECX, X86_EBP, 4);
	x86_alu_reg_imm (code, X86_ADD, X86_ECX, 8);
	/* eax = callee argument area */
	x86_mov_reg_reg (code, X86_EAX, X86_ESP, 4);

	/* Call start_gsharedvt_call */
	/* Arg 4 */
	x86_push_membase (code, X86_EBP, mrgctx_offset);
	/* Arg3 */
	x86_push_reg (code, X86_EAX);
	/* Arg2 */
	x86_push_reg (code, X86_ECX);
	/* Arg1 */
	x86_push_membase (code, X86_EBP, info_offset);
	if (aot) {
		code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_x86_start_gsharedvt_call");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, mono_x86_start_gsharedvt_call);
	}
	x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4 * 4);
	/* The address to call is in eax */
	/* The stack is now setup for the real call */
	/* Load info struct */
	x86_mov_reg_membase (code, X86_ECX, X86_EBP, info_offset, 4);
	/* Load rgctx */
	x86_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, X86_EBP, mrgctx_offset, sizeof (gpointer));
	/* Make the call */
	x86_call_reg (code, X86_EAX);
	/* The return value is either in registers, or stored to an area beginning at sp [info->vret_slot] */
	/* EAX/EDX might contain the return value, only ECX is free */
	/* Load info struct */
	x86_mov_reg_membase (code, X86_ECX, X86_EBP, info_offset, 4);

	/* Branch to the in/out handling code */
	x86_alu_membase_imm (code, X86_CMP, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1);	
	br_out = code;
	x86_branch32 (code, X86_CC_NE, 0, TRUE);

	/*
	 * IN CASE
	 */

	/* Load ret marshal type */
	x86_mov_reg_membase (code, X86_ECX, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_NONE);
	br [0] = code;
	x86_branch8 (code, X86_CC_NE, 0, TRUE);

	/* Normal return, no marshalling required */
	x86_leave (code);
	x86_ret (code);

	/* Return value marshalling */
	x86_patch (br [0], code);
	/* Load info struct */
	x86_mov_reg_membase (code, X86_EAX, X86_EBP, info_offset, 4);
	/* Load 'vret_slot' */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
	/* Compute ret area address */
	x86_shift_reg_imm (code, X86_SHL, X86_EAX, 2);
	x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_ESP);
	/* The callee does a ret $4, so sp is off by 4 */
	x86_alu_reg_imm (code, X86_SUB, X86_EAX, sizeof (gpointer));

	/* Branch to specific marshalling code */
	// FIXME: Move the I4 case to the top */
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_DOUBLE_FPSTACK);
	br [1] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_FLOAT_FPSTACK);
	br [2] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_STACK_POP);
	br [3] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_I1);
	br [4] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_U1);
	br [5] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_I2);
	br [6] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_U2);
	br [7] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	/* IREGS case */
	/* Load both eax and edx for simplicity */
	x86_mov_reg_membase (code, X86_EDX, X86_EAX, sizeof (gpointer), sizeof (gpointer));
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, sizeof (gpointer));
	x86_leave (code);
	x86_ret (code);
	/* DOUBLE_FPSTACK case */
	x86_patch (br [1], code);
	x86_fld_membase (code, X86_EAX, 0, TRUE);
	x86_jump8 (code, 0);
	x86_leave (code);
	x86_ret (code);
	/* FLOAT_FPSTACK case */
	x86_patch (br [2], code);
	x86_fld_membase (code, X86_EAX, 0, FALSE);
	x86_leave (code);
	x86_ret (code);
	/* STACK_POP case */
	x86_patch (br [3], code);
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* I1 case */
	x86_patch (br [4], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, TRUE, FALSE);
	x86_leave (code);
	x86_ret (code);
	/* U1 case */
	x86_patch (br [5], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, FALSE, FALSE);
	x86_leave (code);
	x86_ret (code);
	/* I2 case */
	x86_patch (br [6], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, TRUE, TRUE);
	x86_leave (code);
	x86_ret (code);
	/* U2 case */
	x86_patch (br [7], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, FALSE, TRUE);
	x86_leave (code);
	x86_ret (code);

	/*
	 * OUT CASE
	 */

	x86_patch (br_out, code);
	/* Load ret marshal type into ECX */
	x86_mov_reg_membase (code, X86_ECX, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_NONE);
	br [0] = code;
	x86_branch8 (code, X86_CC_NE, 0, TRUE);

	/* Normal return, no marshalling required */
	x86_leave (code);
	x86_ret (code);

	/* Return value marshalling */
	x86_patch (br [0], code);

	/* EAX might contain the return value */
	// FIXME: Use moves
	x86_push_reg (code, X86_EAX);

	/* Load info struct */
	x86_mov_reg_membase (code, X86_EAX, X86_EBP, info_offset, 4);
	/* Load 'vret_arg_slot' */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_slot), 4);
	/* Compute ret area address in the caller frame in EAX */
	x86_shift_reg_imm (code, X86_SHL, X86_EAX, 2);
	x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EBP);
	x86_alu_reg_imm (code, X86_ADD, X86_EAX, 8);
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, sizeof (gpointer));

	/* Branch to specific marshalling code */
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_DOUBLE_FPSTACK);
	br [1] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_FLOAT_FPSTACK);
	br [2] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_STACK_POP);
	br [3] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_IREGS);
	br [4] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	/* IREG case */
	x86_mov_reg_reg (code, X86_ECX, X86_EAX, sizeof (gpointer));
	x86_pop_reg (code, X86_EAX);
	x86_mov_membase_reg (code, X86_ECX, 0, X86_EAX, sizeof (gpointer));
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* IREGS case */
	x86_patch (br [4], code);
	x86_mov_reg_reg (code, X86_ECX, X86_EAX, sizeof (gpointer));
	x86_pop_reg (code, X86_EAX);
	x86_mov_membase_reg (code, X86_ECX, sizeof (gpointer), X86_EDX, sizeof (gpointer));
	x86_mov_membase_reg (code, X86_ECX, 0, X86_EAX, sizeof (gpointer));
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* DOUBLE_FPSTACK case */
	x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
	x86_patch (br [1], code);
	x86_fst_membase (code, X86_EAX, 0, TRUE, TRUE);
	x86_jump8 (code, 0);
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* FLOAT_FPSTACK case */
	x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
	x86_patch (br [2], code);
	x86_fst_membase (code, X86_EAX, 0, FALSE, TRUE);
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* STACK_POP case */
	x86_patch (br [3], code);
	x86_leave (code);
	x86_ret_imm (code, 4);

	g_assert ((code - buf) < buf_len);

	if (info)
		*info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);

	mono_arch_flush_icache (buf, code - buf);
	return buf;
}
Esempio n. 4
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_x86_throw_exception/
 * mono_x86_throw_corlib_exception.
 * If LLVM is true, generate code which assumes the caller is LLVM generated code, 
 * which doesn't push the arguments.
 */
static guint8*
get_throw_trampoline (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, MonoTrampInfo **info, gboolean aot)
{
	guint8 *start, *code;
	int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	start = code = mono_global_codeman_reserve (128);

	stack_size = 128;

	/* 
	 * On apple, the stack is misaligned by the pushing of the return address.
	 */
	if (!llvm && corlib)
		/* On OSX, we don't generate alignment code to save space */
		stack_size += 4;
	else
		stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;

	/*
	 * The stack looks like this:
	 * <pc offset> (only if corlib is TRUE)
	 * <exception object>/<type token>
	 * <return addr> <- esp (unaligned on apple)
	 */

	mono_add_unwind_op_def_cfa (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
	mono_add_unwind_op_offset (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);

	/* Alloc frame */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);

	arg_offsets [0] = 0;
	arg_offsets [1] = 4;
	arg_offsets [2] = 8;
	arg_offsets [3] = 12;
	regs_offset = 16;

	/* Save registers */
	for (i = 0; i < X86_NREG; ++i)
		if (i != X86_ESP)
			x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
	/* Calculate the offset between the current sp and the sp of the caller */
	if (llvm) {
		/* LLVM doesn't push the arguments */
		stack_offset = stack_size + 4;
	} else {
		if (corlib) {
			/* Two arguments */
			stack_offset = stack_size + 4 + 8;
#ifdef __APPLE__
			/* We don't generate stack alignment code on osx to save space */
#endif
		} else {
			/* One argument */
			stack_offset = stack_size + 4 + 4;
#ifdef __APPLE__
			/* Pop the alignment added by OP_THROW too */
			stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#endif
		}
	}
	/* Save ESP */
	x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
	x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);

	/* Set arg1 == regs */
	x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		x86_mov_reg_imm (code, X86_EAX, 0);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
	/* Set arg3 == eip */
	if (llvm_abs)
		x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
	/* Set arg4 == rethrow/pc_offset */
	if (resume_unwind) {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], 0, 4);
	} else if (corlib) {
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
		if (llvm_abs) {
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'ip' and passing the negated abs address as
			 * the pc offset.
			 */
			x86_neg_reg (code, X86_EAX);
		}
		x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
	} else {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
	}
	/* Make the call */
	if (aot) {
		// This can be called from runtime code, which can't guarantee that
		// ebx contains the got address.
		// So emit the got address loading code too
		code = mono_arch_emit_load_got_addr (start, code, NULL, &ji);
		code = mono_arch_emit_load_aotconst (start, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_x86_throw_corlib_exception" : "mono_x86_throw_exception");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, resume_unwind ? (mono_x86_resume_unwind) : (corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception));
	}
	x86_breakpoint (code);

	g_assert ((code - start) < 128);

	if (info)
		*info = mono_tramp_info_create (g_strdup (name), start, code - start, ji, unwind_ops);

	return start;
}