Exemple #1
0
/*
 * mono_arch_get_restore_context:
 *
 * Returns a pointer to a method which restores a previously saved sigcontext.
 */
gpointer
mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start = NULL;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	/* restore_contect (MonoContext *ctx) */

	start = code = mono_global_codeman_reserve (256);

	amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);

	/* Restore all registers except %rip and %r11 */
	amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rax), 8);
	amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rcx), 8);
	amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rdx), 8);
	amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
	amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
	amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
	//amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r8), 8);
	//amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r9), 8);
	//amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r10), 8);
	amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r12), 8);
	amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r13), 8);
	amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r14), 8);
#if !defined(__native_client_codegen__)
	amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r15), 8);
#endif

	/*
	 * The context resides on the stack, in the stack frame of the
	 * caller of this function.  The stack pointer that we need to
	 * restore is potentially many stack frames higher up, so the
	 * distance between them can easily be more than the red zone
	 * size.  Hence the stack pointer can be restored only after
	 * we have finished loading everything from the context.
	 */
	amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rsp), 8);
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rip), 8);
	amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);

	/* jump to the saved IP */
	amd64_jump_reg (code, AMD64_R11);

	nacl_global_codeman_validate(&start, 256, &code);

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);

	return start;
}
Exemple #2
0
/*
 * mono_arch_get_restore_context:
 *
 * Returns a pointer to a method which restores a previously saved sigcontext.
 */
gpointer
mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start = NULL;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	/* restore_contect (MonoContext *ctx) */

	start = code = mono_global_codeman_reserve (256);

	amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);

	/* Restore all registers except %rip and %r11 */
	amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rax), 8);
	amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rcx), 8);
	amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rdx), 8);
	amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rbx), 8);
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rbp), 8);
	amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rsi), 8);
	amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rdi), 8);
	//amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r8), 8);
	//amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r9), 8);
	//amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r10), 8);
	amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r12), 8);
	amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r13), 8);
	amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r14), 8);
#if !defined(__native_client_codegen__)
	amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r15), 8);
#endif

	if (mono_running_on_valgrind ()) {
		/* Prevent 'Address 0x... is just below the stack ptr.' errors */
		amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rsp), 8);
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rip), 8);
		amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
	} else {
		amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rsp), 8);
		/* get return address */
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rip), 8);
	}

	/* jump to the saved IP */
	amd64_jump_reg (code, AMD64_R11);

	nacl_global_codeman_validate(&start, 256, &code);

	mono_arch_flush_icache (start, code - start);

	if (info)
		*info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);

	return start;
}
Exemple #3
0
/*
 * mono_arch_get_restore_context:
 *
 * Returns a pointer to a method which restores a previously saved sigcontext.
 */
gpointer
mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start = NULL;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	int i, gregs_offset;

	/* restore_contect (MonoContext *ctx) */

	start = code = (guint8 *)mono_global_codeman_reserve (256);

	amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);

	/* Restore all registers except %rip and %r11 */
	gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
	for (i = 0; i < AMD64_NREG; ++i) {
#if defined(__native_client_codegen__)
		if (i == AMD64_R15)
			continue;
#endif
		if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
			amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
	}

	/*
	 * The context resides on the stack, in the stack frame of the
	 * caller of this function.  The stack pointer that we need to
	 * restore is potentially many stack frames higher up, so the
	 * distance between them can easily be more than the red zone
	 * size.  Hence the stack pointer can be restored only after
	 * we have finished loading everything from the context.
	 */
	amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  gregs_offset + (AMD64_RSP * 8), 8);
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  gregs_offset + (AMD64_RIP * 8), 8);
	amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);

	/* jump to the saved IP */
	amd64_jump_reg (code, AMD64_R11);

	nacl_global_codeman_validate (&start, 256, &code);

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);

	return start;
}
Exemple #4
0
gpointer
mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *start;
	guint8 *br[1];
	gpointer throw_trampoline;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	const guint kMaxCodeSize = NACL_SIZE (128, 256);

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	/* We are in the frame of a managed method after a call */
	/* 
	 * We would like to throw the pending exception in such a way that it looks to
	 * be thrown from the managed method.
	 */

	/* Save registers which might contain the return value of the call */
	amd64_push_reg (code, AMD64_RAX);
	amd64_push_reg (code, AMD64_RDX);

	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
	amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);

	/* Align stack */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* Obtain the pending exception */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
	}
	amd64_call_reg (code, AMD64_R11);

	/* Check if it is NULL, and branch */
	amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
	br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);

	/* exc != NULL branch */

	/* Save the exc on the stack */
	amd64_push_reg (code, AMD64_RAX);
	/* Align stack */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* Obtain the original ip and clear the flag in previous_lmf */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
	}
	amd64_call_reg (code, AMD64_R11);	

	/* Load exc */
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);

	/* Pop saved stuff from the stack */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);

	/* Setup arguments for the throw trampoline */
	/* Exception */
	amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
	/* The trampoline expects the caller ip to be pushed on the stack */
	amd64_push_reg (code, AMD64_RAX);

	/* Call the throw trampoline */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		throw_trampoline = mono_get_throw_exception ();
		amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
	}
	/* We use a jump instead of a call so we can push the original ip on the stack */
	amd64_jump_reg (code, AMD64_R11);

	/* ex == NULL branch */
	mono_amd64_patch (br [0], code);

	/* Obtain the original ip and clear the flag in previous_lmf */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
	}
	amd64_call_reg (code, AMD64_R11);	
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);

	/* Restore registers */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
	amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
	amd64_pop_reg (code, AMD64_RDX);
	amd64_pop_reg (code, AMD64_RAX);

	/* Return to original code */
	amd64_jump_reg (code, AMD64_R11);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);

	if (info)
		*info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops);

	return start;
}