Ejemplo n.º 1
0
/*
 * mono_arch_get_gsharedvt_arg_trampoline:
 *
 *   See tramp-x86.c for documentation.
 */
gpointer
mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
{
	guint8 *code, *start;
	int buf_len;

	buf_len = 32;

	start = code = mono_domain_code_reserve (domain, buf_len);

	amd64_mov_reg_imm (code, AMD64_RAX, arg);
	amd64_jump_code (code, addr);
	g_assert ((code - start) < buf_len);

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);

	mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);

	return start;
}
Ejemplo n.º 2
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_amd64_throw_exception/
 * mono_amd64_throw_corlib_exception.
 */
static gpointer
get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
{
	guint8* start;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
	const guint kMaxCodeSize = NACL_SIZE (256, 512);

#ifdef TARGET_WIN32
	dummy_stack_space = 6 * sizeof(mgreg_t);	/* Windows expects stack space allocated for all 6 dummy args. */
#else
	dummy_stack_space = 0;
#endif

	start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);

	/* The stack is unaligned on entry */
	stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;

	code = start;

	if (info)
		unwind_ops = mono_arch_get_cie_program ();

	/* Alloc frame */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
	if (info)
		mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);

	/*
	 * To hide linux/windows calling convention differences, we pass all arguments on
	 * the stack by passing 6 dummy values in registers.
	 */

	arg_offsets [0] = dummy_stack_space + 0;
	arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
	arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
	ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
	regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);

	/* Save registers */
	for (i = 0; i < AMD64_NREG; ++i)
		if (i != AMD64_RSP)
			amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
	/* Save RSP */
	amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
	amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
	/* Save IP */
	if (llvm_abs)
		amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
	else
		amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
	amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
	/* Set arg1 == ctx */
	amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
	amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
	else
		amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
	/* Set arg3 == rethrow/pc offset */
	if (resume_unwind) {
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
	} else if (corlib) {
		amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
		if (llvm_abs)
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'rip' and passing the negated abs address as
			 * the pc offset.
			 */
			amd64_neg_membase (code, AMD64_RSP, arg_offsets [2]);
	} else {
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
	}

	if (aot) {
		const char *icall_name;

		if (resume_unwind)
			icall_name = "mono_amd64_resume_unwind";
		else if (corlib)
			icall_name = "mono_amd64_throw_corlib_exception";
		else
			icall_name = "mono_amd64_throw_exception";
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
	}
	amd64_call_reg (code, AMD64_R11);
	amd64_breakpoint (code);

	mono_arch_flush_icache (start, code - start);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);

	return start;
}
Ejemplo n.º 3
0
gpointer
mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *start;
	guint8 *br[1];
	gpointer throw_trampoline;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	const guint kMaxCodeSize = NACL_SIZE (128, 256);

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	/* We are in the frame of a managed method after a call */
	/* 
	 * We would like to throw the pending exception in such a way that it looks to
	 * be thrown from the managed method.
	 */

	/* Save registers which might contain the return value of the call */
	amd64_push_reg (code, AMD64_RAX);
	amd64_push_reg (code, AMD64_RDX);

	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
	amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);

	/* Align stack */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* Obtain the pending exception */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
	}
	amd64_call_reg (code, AMD64_R11);

	/* Check if it is NULL, and branch */
	amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
	br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);

	/* exc != NULL branch */

	/* Save the exc on the stack */
	amd64_push_reg (code, AMD64_RAX);
	/* Align stack */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* Obtain the original ip and clear the flag in previous_lmf */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
	}
	amd64_call_reg (code, AMD64_R11);	

	/* Load exc */
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);

	/* Pop saved stuff from the stack */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);

	/* Setup arguments for the throw trampoline */
	/* Exception */
	amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
	/* The trampoline expects the caller ip to be pushed on the stack */
	amd64_push_reg (code, AMD64_RAX);

	/* Call the throw trampoline */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		throw_trampoline = mono_get_throw_exception ();
		amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
	}
	/* We use a jump instead of a call so we can push the original ip on the stack */
	amd64_jump_reg (code, AMD64_R11);

	/* ex == NULL branch */
	mono_amd64_patch (br [0], code);

	/* Obtain the original ip and clear the flag in previous_lmf */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
	}
	amd64_call_reg (code, AMD64_R11);	
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);

	/* Restore registers */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
	amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
	amd64_pop_reg (code, AMD64_RDX);
	amd64_pop_reg (code, AMD64_RAX);

	/* Return to original code */
	amd64_jump_reg (code, AMD64_R11);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);

	if (info)
		*info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops);

	return start;
}