Ejemplo n.º 1
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_x86_throw_exception/
 * mono_x86_throw_corlib_exception.
 * If LLVM is true, generate code which assumes the caller is LLVM generated code, 
 * which doesn't push the arguments.
 */
static guint8*
get_throw_trampoline (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, MonoTrampInfo **info, gboolean aot)
{
	guint8 *start, *code, *labels [16];
	int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	guint kMaxCodeSize = 192;

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	stack_size = 128;

	/* 
	 * On apple, the stack is misaligned by the pushing of the return address.
	 */
	if (!llvm && corlib)
		/* On OSX, we don't generate alignment code to save space */
		stack_size += 4;
	else
		stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;

	/*
	 * The stack looks like this:
	 * <pc offset> (only if corlib is TRUE)
	 * <exception object>/<type token>
	 * <return addr> <- esp (unaligned on apple)
	 */

	unwind_ops = mono_arch_get_cie_program ();

	/* Alloc frame */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);

	arg_offsets [0] = 0;
	arg_offsets [1] = 4;
	arg_offsets [2] = 8;
	arg_offsets [3] = 12;
	regs_offset = 16;

	/* Save registers */
	for (i = 0; i < X86_NREG; ++i)
		if (i != X86_ESP)
			x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
	/* Calculate the offset between the current sp and the sp of the caller */
	if (llvm) {
		/* LLVM doesn't push the arguments */
		stack_offset = stack_size + 4;
	} else {
		if (corlib) {
			/* Two arguments */
			stack_offset = stack_size + 4 + 8;
#ifdef __APPLE__
			/* We don't generate stack alignment code on osx to save space */
#endif
		} else {
			/* One argument + stack alignment */
			stack_offset = stack_size + 4 + 4;
#ifdef __APPLE__
			/* Pop the alignment added by OP_THROW too */
			stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#else
			if (mono_do_x86_stack_align)
				stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#endif
		}
	}
	/* Save ESP */
	x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
	x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);

	/* Clear fp stack */
	labels [0] = code;
	x86_fnstsw (code);
	x86_shift_reg_imm (code, X86_SHR, X86_EAX, 11);
	x86_alu_reg_imm (code, X86_AND, X86_EAX, 7);
	x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
	labels [1] = code;
	x86_branch8 (code, X86_CC_EQ, 0, FALSE);
	x86_fstp (code, 0);
	x86_jump_code (code, labels [0]);
	mono_x86_patch (labels [1], code);

	/* Set arg1 == regs */
	x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		x86_mov_reg_imm (code, X86_EAX, 0);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
	/* Set arg3 == eip */
	if (llvm_abs)
		x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
	/* Set arg4 == rethrow/pc_offset */
	if (resume_unwind) {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], 0, 4);
	} else if (corlib) {
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
		if (llvm_abs) {
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'ip' and passing the negated abs address as
			 * the pc offset.
			 */
			x86_neg_reg (code, X86_EAX);
		}
		x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
	} else {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
	}
	/* Make the call */
	if (aot) {
		// This can be called from runtime code, which can't guarantee that
		// ebx contains the got address.
		// So emit the got address loading code too
		code = mono_arch_emit_load_got_addr (start, code, NULL, &ji);
		code = mono_arch_emit_load_aotconst (start, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_x86_throw_corlib_exception" : "mono_x86_throw_exception");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, resume_unwind ? (gpointer)(mono_x86_resume_unwind) : (corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception));
	}
	x86_breakpoint (code);

	g_assert ((code - start) < kMaxCodeSize);

	if (info)
		*info = mono_tramp_info_create (name, start, code - start, ji, unwind_ops);
	else {
		GSList *l;

		for (l = unwind_ops; l; l = l->next)
			g_free (l->data);
		g_slist_free (unwind_ops);
	}

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	return start;
}
Ejemplo n.º 2
0
gpointer
mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *buf;
	int buf_len, cfa_offset;
	GSList *unwind_ops = NULL;
	MonoJumpInfo *ji = NULL;
	guint8 *br_out, *br [16];
	int info_offset, mrgctx_offset;

	buf_len = 320;
	buf = code = mono_global_codeman_reserve (buf_len);

	/*
	 * This trampoline is responsible for marshalling calls between normal code and gsharedvt code. The
	 * caller is a normal or gshared method which uses the signature of the inflated method to make the call, while
	 * the callee is a gsharedvt method which has a signature which uses valuetypes in place of type parameters, i.e.
	 * caller:
	 * foo<bool> (bool b)
	 * callee:
	 * T=<type used to represent vtype type arguments, currently TypedByRef>
	 * foo<T> (T b)
	 * The trampoline is responsible for marshalling the arguments and marshalling the result back. To simplify
	 * things, we create our own stack frame, and do most of the work in a C function, which receives a
	 * GSharedVtCallInfo structure as an argument. The structure should contain information to execute the C function to
	 * be as fast as possible. The argument is received in EAX from a gsharedvt trampoline. So the real
	 * call sequence looks like this:
	 * caller -> gsharedvt trampoline -> gsharevt in trampoline -> start_gsharedvt_call
	 * FIXME: Optimize this.
	 */

	cfa_offset = sizeof (gpointer);
	mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);
	x86_push_reg (code, X86_EBP);
	cfa_offset += sizeof (gpointer);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);
	x86_mov_reg_reg (code, X86_EBP, X86_ESP, sizeof (gpointer));
	mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
	/* Alloc stack frame/align stack */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
	info_offset = -4;
	mrgctx_offset = - 8;
	/* The info struct is put into EAX by the gsharedvt trampoline */
	/* Save info struct addr */
	x86_mov_membase_reg (code, X86_EBP, info_offset, X86_EAX, 4);
	/* Save rgctx */
	x86_mov_membase_reg (code, X86_EBP, mrgctx_offset, MONO_ARCH_RGCTX_REG, 4);

	/* Allocate stack area used to pass arguments to the method */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), sizeof (gpointer));
	x86_alu_reg_reg (code, X86_SUB, X86_ESP, X86_EAX);

#if 0
	/* Stack alignment check */
	x86_mov_reg_reg (code, X86_ECX, X86_ESP, 4);
	x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
	x86_branch_disp (code, X86_CC_EQ, 3, FALSE);
	x86_breakpoint (code);
#endif

	/* ecx = caller argument area */
	x86_mov_reg_reg (code, X86_ECX, X86_EBP, 4);
	x86_alu_reg_imm (code, X86_ADD, X86_ECX, 8);
	/* eax = callee argument area */
	x86_mov_reg_reg (code, X86_EAX, X86_ESP, 4);

	/* Call start_gsharedvt_call */
	/* Arg 4 */
	x86_push_membase (code, X86_EBP, mrgctx_offset);
	/* Arg3 */
	x86_push_reg (code, X86_EAX);
	/* Arg2 */
	x86_push_reg (code, X86_ECX);
	/* Arg1 */
	x86_push_membase (code, X86_EBP, info_offset);
	if (aot) {
		code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_x86_start_gsharedvt_call");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, mono_x86_start_gsharedvt_call);
	}
	x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4 * 4);
	/* The address to call is in eax */
	/* The stack is now setup for the real call */
	/* Load info struct */
	x86_mov_reg_membase (code, X86_ECX, X86_EBP, info_offset, 4);
	/* Load rgctx */
	x86_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, X86_EBP, mrgctx_offset, sizeof (gpointer));
	/* Make the call */
	x86_call_reg (code, X86_EAX);
	/* The return value is either in registers, or stored to an area beginning at sp [info->vret_slot] */
	/* EAX/EDX might contain the return value, only ECX is free */
	/* Load info struct */
	x86_mov_reg_membase (code, X86_ECX, X86_EBP, info_offset, 4);

	/* Branch to the in/out handling code */
	x86_alu_membase_imm (code, X86_CMP, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1);	
	br_out = code;
	x86_branch32 (code, X86_CC_NE, 0, TRUE);

	/*
	 * IN CASE
	 */

	/* Load ret marshal type */
	x86_mov_reg_membase (code, X86_ECX, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_NONE);
	br [0] = code;
	x86_branch8 (code, X86_CC_NE, 0, TRUE);

	/* Normal return, no marshalling required */
	x86_leave (code);
	x86_ret (code);

	/* Return value marshalling */
	x86_patch (br [0], code);
	/* Load info struct */
	x86_mov_reg_membase (code, X86_EAX, X86_EBP, info_offset, 4);
	/* Load 'vret_slot' */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
	/* Compute ret area address */
	x86_shift_reg_imm (code, X86_SHL, X86_EAX, 2);
	x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_ESP);
	/* The callee does a ret $4, so sp is off by 4 */
	x86_alu_reg_imm (code, X86_SUB, X86_EAX, sizeof (gpointer));

	/* Branch to specific marshalling code */
	// FIXME: Move the I4 case to the top */
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_DOUBLE_FPSTACK);
	br [1] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_FLOAT_FPSTACK);
	br [2] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_STACK_POP);
	br [3] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_I1);
	br [4] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_U1);
	br [5] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_I2);
	br [6] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_U2);
	br [7] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	/* IREGS case */
	/* Load both eax and edx for simplicity */
	x86_mov_reg_membase (code, X86_EDX, X86_EAX, sizeof (gpointer), sizeof (gpointer));
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, sizeof (gpointer));
	x86_leave (code);
	x86_ret (code);
	/* DOUBLE_FPSTACK case */
	x86_patch (br [1], code);
	x86_fld_membase (code, X86_EAX, 0, TRUE);
	x86_jump8 (code, 0);
	x86_leave (code);
	x86_ret (code);
	/* FLOAT_FPSTACK case */
	x86_patch (br [2], code);
	x86_fld_membase (code, X86_EAX, 0, FALSE);
	x86_leave (code);
	x86_ret (code);
	/* STACK_POP case */
	x86_patch (br [3], code);
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* I1 case */
	x86_patch (br [4], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, TRUE, FALSE);
	x86_leave (code);
	x86_ret (code);
	/* U1 case */
	x86_patch (br [5], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, FALSE, FALSE);
	x86_leave (code);
	x86_ret (code);
	/* I2 case */
	x86_patch (br [6], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, TRUE, TRUE);
	x86_leave (code);
	x86_ret (code);
	/* U2 case */
	x86_patch (br [7], code);
	x86_widen_membase (code, X86_EAX, X86_EAX, 0, FALSE, TRUE);
	x86_leave (code);
	x86_ret (code);

	/*
	 * OUT CASE
	 */

	x86_patch (br_out, code);
	/* Load ret marshal type into ECX */
	x86_mov_reg_membase (code, X86_ECX, X86_ECX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_NONE);
	br [0] = code;
	x86_branch8 (code, X86_CC_NE, 0, TRUE);

	/* Normal return, no marshalling required */
	x86_leave (code);
	x86_ret (code);

	/* Return value marshalling */
	x86_patch (br [0], code);

	/* EAX might contain the return value */
	// FIXME: Use moves
	x86_push_reg (code, X86_EAX);

	/* Load info struct */
	x86_mov_reg_membase (code, X86_EAX, X86_EBP, info_offset, 4);
	/* Load 'vret_arg_slot' */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_slot), 4);
	/* Compute ret area address in the caller frame in EAX */
	x86_shift_reg_imm (code, X86_SHL, X86_EAX, 2);
	x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EBP);
	x86_alu_reg_imm (code, X86_ADD, X86_EAX, 8);
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, sizeof (gpointer));

	/* Branch to specific marshalling code */
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_DOUBLE_FPSTACK);
	br [1] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_FLOAT_FPSTACK);
	br [2] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_STACK_POP);
	br [3] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	x86_alu_reg_imm (code, X86_CMP, X86_ECX, GSHAREDVT_RET_IREGS);
	br [4] = code;
	x86_branch8 (code, X86_CC_E, 0, TRUE);
	/* IREG case */
	x86_mov_reg_reg (code, X86_ECX, X86_EAX, sizeof (gpointer));
	x86_pop_reg (code, X86_EAX);
	x86_mov_membase_reg (code, X86_ECX, 0, X86_EAX, sizeof (gpointer));
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* IREGS case */
	x86_patch (br [4], code);
	x86_mov_reg_reg (code, X86_ECX, X86_EAX, sizeof (gpointer));
	x86_pop_reg (code, X86_EAX);
	x86_mov_membase_reg (code, X86_ECX, sizeof (gpointer), X86_EDX, sizeof (gpointer));
	x86_mov_membase_reg (code, X86_ECX, 0, X86_EAX, sizeof (gpointer));
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* DOUBLE_FPSTACK case */
	x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
	x86_patch (br [1], code);
	x86_fst_membase (code, X86_EAX, 0, TRUE, TRUE);
	x86_jump8 (code, 0);
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* FLOAT_FPSTACK case */
	x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
	x86_patch (br [2], code);
	x86_fst_membase (code, X86_EAX, 0, FALSE, TRUE);
	x86_leave (code);
	x86_ret_imm (code, 4);
	/* STACK_POP case */
	x86_patch (br [3], code);
	x86_leave (code);
	x86_ret_imm (code, 4);

	g_assert ((code - buf) < buf_len);

	if (info)
		*info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);

	mono_arch_flush_icache (buf, code - buf);
	return buf;
}
Ejemplo n.º 3
0
gpointer
mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *buf;
	int buf_len, cfa_offset;
	GSList *unwind_ops = NULL;
	MonoJumpInfo *ji = NULL;
	int n_arg_regs, n_arg_fregs, framesize, i;
	int info_offset, offset, rgctx_arg_reg_offset;
	int caller_reg_area_offset, callee_reg_area_offset, callee_stack_area_offset;
	guint8 *br_out, *br [64], *br_ret [64];
	int b_ret_index;
	int reg_area_size;

	buf_len = 2048;
	buf = code = mono_global_codeman_reserve (buf_len + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);

	/*
	 * We are being called by an gsharedvt arg trampoline, the info argument is in AMD64_RAX.
	 */
	n_arg_regs = PARAM_REGS;
	n_arg_fregs = FLOAT_PARAM_REGS;

	/* Compute stack frame size and offsets */
	offset = 0;
	/* info reg */
	info_offset = offset;
	offset += 8;

	/* rgctx reg */
	rgctx_arg_reg_offset = offset;
	offset += 8;

	/*callconv in regs */
	caller_reg_area_offset = offset;
	reg_area_size = ALIGN_TO ((n_arg_regs + n_arg_fregs) * 8, MONO_ARCH_FRAME_ALIGNMENT);
	offset += reg_area_size;

	framesize = offset;

	g_assert (framesize % MONO_ARCH_FRAME_ALIGNMENT == 0);
	g_assert (reg_area_size % MONO_ARCH_FRAME_ALIGNMENT == 0);

	/* unwind markers 1/3 */
	cfa_offset = sizeof (gpointer);
	mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);

	/* save the old frame pointer */
	amd64_push_reg (code, AMD64_RBP);

	/* unwind markers 2/3 */
	cfa_offset += sizeof (gpointer);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);

	/* set it as the new frame pointer */
	amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));

	/* unwind markers 3/3 */
	mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
	mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);

	/* setup the frame */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
	
	/* save stuff */

	/* save info */
	amd64_mov_membase_reg (code, AMD64_RSP, info_offset, AMD64_RAX, sizeof (mgreg_t));
	/* save rgctx */
	amd64_mov_membase_reg (code, AMD64_RSP, rgctx_arg_reg_offset, MONO_ARCH_RGCTX_REG, sizeof (mgreg_t));

	for (i = 0; i < n_arg_regs; ++i)
		amd64_mov_membase_reg (code, AMD64_RSP, caller_reg_area_offset + i * 8, param_regs [i], sizeof (mgreg_t));

	for (i = 0; i < n_arg_fregs; ++i)
		amd64_sse_movsd_membase_reg (code, AMD64_RSP, caller_reg_area_offset + (i + n_arg_regs) * 8, i);

	/* TODO Allocate stack area used to pass arguments to the method */


	/* Allocate callee register area just below the caller area so it can be accessed from start_gsharedvt_call using negative offsets */
	/* XXX figure out alignment */
	callee_reg_area_offset = reg_area_size - ((n_arg_regs + n_arg_fregs) * 8); /* Ensure alignment */
	callee_stack_area_offset = callee_reg_area_offset + reg_area_size;
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, reg_area_size);

	/* Allocate stack area used to pass arguments to the method */
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), 4);
	amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_R11);

	/* The stack now looks like this:

	<caller stack params area>
	<return address>
	<old frame pointer>
	<caller registers area>
	<rgctx>
	<gsharedvt info>
	<callee stack area>
	<callee reg area>
	 */

	/* Call start_gsharedvt_call () */
	/* arg1 == info */
	amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
	/* arg2 = caller stack area */
	amd64_lea_membase (code, MONO_AMD64_ARG_REG2, AMD64_RBP, -(framesize - caller_reg_area_offset)); 

	/* arg3 == callee stack area */
	amd64_lea_membase (code, MONO_AMD64_ARG_REG3, AMD64_RSP, callee_reg_area_offset);

	/* arg4 = mrgctx reg */
	amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG4, MONO_ARCH_RGCTX_REG, sizeof(mgreg_t));

	if (aot) {
		code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_start_gsharedvt_call");
		#ifdef TARGET_WIN32
			/* Since we are doing a call as part of setting up stackframe, the reserved shadow stack used by Windows platform is allocated up in
			the callee stack area but currently the callee reg area is in between. Windows calling convention dictates that room is made on stack where
			callee can save any parameters passed in registers. Since Windows x64 calling convention
			uses 4 registers for the first 4 parameters, stack needs to be adjusted before making the call.
			NOTE, Windows calling convention assumes that space for all registers have been reserved, regardless
			of the number of function parameters actually used.
			*/
			int shadow_reg_size = 0;

			shadow_reg_size = ALIGN_TO (PARAM_REGS * sizeof(gpointer), MONO_ARCH_FRAME_ALIGNMENT);
			amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, shadow_reg_size);
			amd64_call_reg (code, AMD64_R11);
			amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, shadow_reg_size);
		#else
			amd64_call_reg (code, AMD64_R11);
		#endif
	} else {
		amd64_call_code (code, mono_amd64_start_gsharedvt_call);
	}

	/* Method to call is now on RAX. Restore regs and jump */
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, sizeof(mgreg_t));

	for (i = 0; i < n_arg_regs; ++i)
		amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, callee_reg_area_offset + i * 8, sizeof (mgreg_t));

	for (i = 0; i < n_arg_fregs; ++i)
		amd64_sse_movsd_reg_membase (code, i, AMD64_RSP, callee_reg_area_offset + (i + n_arg_regs) * 8);

	//load rgctx
	amd64_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, AMD64_RBP, -(framesize - rgctx_arg_reg_offset), sizeof (mgreg_t));

	/* Clear callee reg area */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, reg_area_size);

	/* Call the thing */
	amd64_call_reg (code, AMD64_R11);

	/* Marshal return value. Available registers: R10 and R11 */
	/* Load info struct */
	amd64_mov_reg_membase (code, AMD64_R10, AMD64_RBP, -(framesize - info_offset), sizeof (mgreg_t));

	/* Branch to the in/out handling code */
	amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1, 4);

	b_ret_index = 0;
	br_out = code;
	x86_branch32 (code, X86_CC_NE, 0, TRUE);

	/*
	 * IN CASE
	 */

	/* Load vret_slot */
	/* Use first input parameter register as scratch since it is volatile on all platforms */
	amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
	amd64_alu_reg_imm (code, X86_SUB, MONO_AMD64_ARG_REG1, n_arg_regs + n_arg_fregs);
	amd64_shift_reg_imm (code, X86_SHL, MONO_AMD64_ARG_REG1, 3);

	/* vret address is RBP - (framesize - caller_reg_area_offset) */
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
	amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, MONO_AMD64_ARG_REG1);

	/* Load ret marshal type */
	/* Load vret address in R11 */
	amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);

	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
		br [i] = code;
		amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
	}
	x86_breakpoint (code); /* unhandled case */

	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		mono_amd64_patch (br [i], code);
		switch (i) {
		case GSHAREDVT_RET_NONE:
			break;
		case GSHAREDVT_RET_I1:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, FALSE);
			break;
		case GSHAREDVT_RET_U1:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, FALSE);
			break;
		case GSHAREDVT_RET_I2:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, TRUE);
			break;
		case GSHAREDVT_RET_U2:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, TRUE);
			break;
		case GSHAREDVT_RET_I4: // CORRECT
		case GSHAREDVT_RET_U4: // THIS IS INCORRECT. WHY IS IT NOT FAILING?
			amd64_movsxd_reg_membase (code, AMD64_RAX, AMD64_R11, 0);
			break;
		case GSHAREDVT_RET_I8:
			amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 0, 8);
			break;
		case GSHAREDVT_RET_IREGS_1:
			amd64_mov_reg_membase (code, return_regs [i - GSHAREDVT_RET_IREGS_1], AMD64_R11, 0, 8);
			break;
		case GSHAREDVT_RET_R8:
			amd64_sse_movsd_reg_membase (code, AMD64_XMM0, AMD64_R11, 0);
			break;
		default:
			x86_breakpoint (code); /* can't handle specific case */
		}

		br_ret [b_ret_index ++] = code;
		x86_jump32 (code, 0);
	}

	/*
	 * OUT CASE
	 */
	mono_amd64_patch (br_out, code);

	/*
		Address to write return to is in the original value of the register specified by vret_arg_reg.
		This will be either RSI, RDI (System V) or RCX, RDX (Windows) depending on whether this is a static call.
		Its location:
		We alloc 'framesize' bytes below RBP to save regs, info and rgctx. RSP = RBP - framesize
		We store RDI (System V), RCX (Windows) at RSP + caller_reg_area_offset + slot_index_of (register) * 8.

		address: RBP - framesize + caller_reg_area_offset + 8*slot
	*/

	int caller_vret_offset = caller_reg_area_offset - framesize;

	/* Load vret address in R11 */
	/* Position to return to is passed as a hidden argument. Load 'vret_arg_slot' to find it */
	amd64_movsxd_reg_membase (code, AMD64_R11, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg));

	// In the GSHAREDVT_RET_NONE case, vret_arg_slot is -1. In this case, skip marshalling.
	amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
	br_ret [b_ret_index ++] = code;
	amd64_branch32 (code, X86_CC_LT, 0, TRUE);

	/* Compute ret area address in the caller frame, *( ((gpointer *)RBP) [R11+2] ) */
	amd64_shift_reg_imm (code, X86_SHL, AMD64_R11, 3);
	amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, caller_vret_offset);
	amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, AMD64_RBP);
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof (gpointer));

	/* Load ret marshal type in R10 */
	amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);

	// Switch table for ret_marshal value
	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
		br [i] = code;
		amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
	}
	x86_breakpoint (code); /* unhandled case */

	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		mono_amd64_patch (br [i], code);
		switch (i) {
		case GSHAREDVT_RET_NONE:
			break;
		case GSHAREDVT_RET_IREGS_1:
			amd64_mov_membase_reg (code, AMD64_R11, 0, return_regs [i - GSHAREDVT_RET_IREGS_1], 8);
			break;
		case GSHAREDVT_RET_R8:
			amd64_sse_movsd_membase_reg (code, AMD64_R11, 0, AMD64_XMM0);
			break;
		default:
			x86_breakpoint (code); /* can't handle specific case */
		}

		br_ret [b_ret_index ++] = code;
		x86_jump32 (code, 0);
	}

	/* exit path */
	for (i = 0; i < b_ret_index; ++i)
		mono_amd64_patch (br_ret [i], code);

	/* Exit code path */
#if TARGET_WIN32
	amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
	amd64_pop_reg (code, AMD64_RBP);
	mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
#else
	amd64_leave (code);
#endif
	amd64_ret (code);

	g_assert ((code - buf) < buf_len);
	g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));

	if (info)
		*info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);

	mono_arch_flush_icache (buf, code - buf);
	return buf;
}
Ejemplo n.º 4
0
/*
 * mono_arch_create_sdb_trampoline:
 *
 *   Return a trampoline which captures the current context, passes it to
 * mini_get_dbg_callbacks ()->single_step_from_context ()/mini_get_dbg_callbacks ()->breakpoint_from_context (),
 * then restores the (potentially changed) context.
 */
guint8*
mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
{
	int tramp_size = 256;
	int framesize, ctx_offset, cfa_offset;
	guint8 *code, *buf;
	GSList *unwind_ops = NULL;
	MonoJumpInfo *ji = NULL;

	code = buf = mono_global_codeman_reserve (tramp_size);

	framesize = 0;

	/* Argument area */
	framesize += sizeof (target_mgreg_t);

	framesize = ALIGN_TO (framesize, 8);
	ctx_offset = framesize;
	framesize += sizeof (MonoContext);

	framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);

	// CFA = sp + 4
	cfa_offset = 4;
	mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, 4);
	// IP saved at CFA - 4
	mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);

	x86_push_reg (code, X86_EBP);
	cfa_offset += sizeof (target_mgreg_t);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);

	x86_mov_reg_reg (code, X86_EBP, X86_ESP);
	mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
	/* The + 8 makes the stack aligned */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, framesize + 8);

	/* Initialize a MonoContext structure on the stack */
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), X86_EAX, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), X86_ECX, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), X86_EDX, sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), X86_EAX, sizeof (target_mgreg_t));
	x86_mov_reg_reg (code, X86_EAX, X86_EBP);
	x86_alu_reg_imm (code, X86_ADD, X86_EAX, cfa_offset);
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_EAX, X86_EBP, 4, sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), X86_EAX, sizeof (target_mgreg_t));

	/* Call the single step/breakpoint function in sdb */
	x86_lea_membase (code, X86_EAX, X86_ESP, ctx_offset);
	x86_mov_membase_reg (code, X86_ESP, 0, X86_EAX, sizeof (target_mgreg_t));

	if (aot) {
		x86_breakpoint (code);
	} else {
		if (single_step)
			x86_call_code (code, mini_get_dbg_callbacks ()->single_step_from_context);
		else
			x86_call_code (code, mini_get_dbg_callbacks ()->breakpoint_from_context);
	}

	/* Restore registers from ctx */
	/* Overwrite the saved ebp */
	x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_EBP, 0, X86_EAX, sizeof (target_mgreg_t));
	/* Overwrite saved eip */
	x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), sizeof (target_mgreg_t));
	x86_mov_membase_reg (code, X86_EBP, 4, X86_EAX, sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_EBX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_ECX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_EDX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_ESI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), sizeof (target_mgreg_t));
	x86_mov_reg_membase (code, X86_EDI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), sizeof (target_mgreg_t));

	x86_leave (code);
	cfa_offset -= sizeof (target_mgreg_t);
	mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
	x86_ret (code);

	mono_arch_flush_icache (code, code - buf);
	MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
	g_assert (code - buf <= tramp_size);

	const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
	*info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);

	return buf;
}
Ejemplo n.º 5
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_x86_throw_exception/
 * mono_x86_throw_corlib_exception.
 * If LLVM is true, generate code which assumes the caller is LLVM generated code, 
 * which doesn't push the arguments.
 */
static guint8*
get_throw_trampoline (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, MonoTrampInfo **info, gboolean aot)
{
	guint8 *start, *code;
	int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	start = code = mono_global_codeman_reserve (128);

	stack_size = 128;

	/* 
	 * On apple, the stack is misaligned by the pushing of the return address.
	 */
	if (!llvm && corlib)
		/* On OSX, we don't generate alignment code to save space */
		stack_size += 4;
	else
		stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;

	/*
	 * The stack looks like this:
	 * <pc offset> (only if corlib is TRUE)
	 * <exception object>/<type token>
	 * <return addr> <- esp (unaligned on apple)
	 */

	mono_add_unwind_op_def_cfa (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
	mono_add_unwind_op_offset (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);

	/* Alloc frame */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);

	arg_offsets [0] = 0;
	arg_offsets [1] = 4;
	arg_offsets [2] = 8;
	arg_offsets [3] = 12;
	regs_offset = 16;

	/* Save registers */
	for (i = 0; i < X86_NREG; ++i)
		if (i != X86_ESP)
			x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
	/* Calculate the offset between the current sp and the sp of the caller */
	if (llvm) {
		/* LLVM doesn't push the arguments */
		stack_offset = stack_size + 4;
	} else {
		if (corlib) {
			/* Two arguments */
			stack_offset = stack_size + 4 + 8;
#ifdef __APPLE__
			/* We don't generate stack alignment code on osx to save space */
#endif
		} else {
			/* One argument */
			stack_offset = stack_size + 4 + 4;
#ifdef __APPLE__
			/* Pop the alignment added by OP_THROW too */
			stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#endif
		}
	}
	/* Save ESP */
	x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
	x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);

	/* Set arg1 == regs */
	x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		x86_mov_reg_imm (code, X86_EAX, 0);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
	/* Set arg3 == eip */
	if (llvm_abs)
		x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
	/* Set arg4 == rethrow/pc_offset */
	if (resume_unwind) {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], 0, 4);
	} else if (corlib) {
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
		if (llvm_abs) {
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'ip' and passing the negated abs address as
			 * the pc offset.
			 */
			x86_neg_reg (code, X86_EAX);
		}
		x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
	} else {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
	}
	/* Make the call */
	if (aot) {
		// This can be called from runtime code, which can't guarantee that
		// ebx contains the got address.
		// So emit the got address loading code too
		code = mono_arch_emit_load_got_addr (start, code, NULL, &ji);
		code = mono_arch_emit_load_aotconst (start, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_x86_throw_corlib_exception" : "mono_x86_throw_exception");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, resume_unwind ? (mono_x86_resume_unwind) : (corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception));
	}
	x86_breakpoint (code);

	g_assert ((code - start) < 128);

	if (info)
		*info = mono_tramp_info_create (g_strdup (name), start, code - start, ji, unwind_ops);

	return start;
}
Ejemplo n.º 6
0
/*
 * get_throw_exception:
 *
 *  Generate a call to mono_x86_throw_exception/
 * mono_x86_throw_corlib_exception.
 * If LLVM is true, generate code which assumes the caller is LLVM generated code, 
 * which doesn't push the arguments.
 */
static guint8*
get_throw_exception (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib)
{
	guint8 *start, *code;
	GSList *unwind_ops = NULL;
	int i, stack_size, stack_offset, arg_offsets [5], regs_offset;

	start = code = mono_global_codeman_reserve (128);

	stack_size = 128;

	/* 
	 * On apple, the stack is misaligned by the pushing of the return address.
	 */
	if (!llvm && corlib)
		/* On OSX, we don't generate alignment code to save space */
		stack_size += 4;
	else
		stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;

	/*
	 * The stack looks like this:
	 * <pc offset> (only if corlib is TRUE)
	 * <exception object>/<type token>
	 * <return addr> <- esp (unaligned on apple)
	 */

	mono_add_unwind_op_def_cfa (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
	mono_add_unwind_op_offset (unwind_ops, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);

	/* Alloc frame */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);

	arg_offsets [0] = 0;
	arg_offsets [1] = 4;
	arg_offsets [2] = 8;
	arg_offsets [3] = 12;
	regs_offset = 16;

	/* Save registers */
	for (i = 0; i < X86_NREG; ++i)
		if (i != X86_ESP)
			x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
	/* Calculate the offset between the current sp and the sp of the caller */
	if (llvm) {
		/* LLVM doesn't push the arguments */
		stack_offset = stack_size + 4;
	} else {
		if (corlib) {
			/* Two arguments */
			stack_offset = stack_size + 4 + 8;
#ifdef __APPLE__
			/* We don't generate stack alignment code on osx to save space */
#endif
		} else {
			/* One argument */
			stack_offset = stack_size + 4 + 4;
#ifdef __APPLE__
			/* Pop the alignment added by OP_THROW too */
			stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#endif
		}
	}
	/* Save ESP */
	x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
	x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);

	/* Set arg1 == regs */
	x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
	/* Set arg2 == exc */
	x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
	/* Set arg3 == eip */
	x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
	if (corlib) {
		/* Set arg4 == offset */
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
		x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
	} else {
		/* Set arg4 == rethrow */
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
	}
	/* Make the call */
	x86_call_code (code, corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception);
	x86_breakpoint (code);

	g_assert ((code - start) < 128);

	mono_save_trampoline_xdebug_info (corlib ? "llvm_throw_corlib_exception_trampoline" : "llvm_throw_exception_trampoline", start, code - start, unwind_ops);

	return start;
}