Exemple #1
0
/*
 * mono_arch_get_restore_context:
 *
 * Returns a pointer to a method which restores a previously saved sigcontext.
 */
gpointer
mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start = NULL;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	int i, gregs_offset;

	/* restore_contect (MonoContext *ctx) */

	start = code = (guint8 *)mono_global_codeman_reserve (256);

	amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);

	/* Restore all registers except %rip and %r11 */
	gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
	for (i = 0; i < AMD64_NREG; ++i) {
#if defined(__native_client_codegen__)
		if (i == AMD64_R15)
			continue;
#endif
		if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
			amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
	}

	/*
	 * The context resides on the stack, in the stack frame of the
	 * caller of this function.  The stack pointer that we need to
	 * restore is potentially many stack frames higher up, so the
	 * distance between them can easily be more than the red zone
	 * size.  Hence the stack pointer can be restored only after
	 * we have finished loading everything from the context.
	 */
	amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  gregs_offset + (AMD64_RSP * 8), 8);
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  gregs_offset + (AMD64_RIP * 8), 8);
	amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);

	/* jump to the saved IP */
	amd64_jump_reg (code, AMD64_R11);

	nacl_global_codeman_validate (&start, 256, &code);

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);

	return start;
}
Exemple #2
0
MonoContinuationRestore
mono_tasklets_arch_restore (void)
{
	static guint8* saved = NULL;
	guint8 *code, *start;
	int cont_reg = AMD64_R9; /* register usable on both call conventions */
	const guint kMaxCodeSize = NACL_SIZE (64, 128);
	

	if (saved)
		return (MonoContinuationRestore)saved;
	code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
	/* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
	/* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
	 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
	 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
	 * We move cont to cont_reg since we need both rcx and rdi for the copy
	 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
 	 */
	amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
	amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
	/* setup the copy of the stack */
	amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
	amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
	x86_cld (code);
	amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
	amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
	amd64_prefix (code, X86_REP_PREFIX);
	amd64_movsl (code);

	/* now restore the registers from the LMF */
	NOT_IMPLEMENTED;
	amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
	amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);

	/* restore the lmf chain */
	/*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
	x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/

	/* state is already in rax */
	amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
	g_assert ((code - start) <= kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	saved = start;
	return (MonoContinuationRestore)saved;
}
Exemple #3
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_amd64_throw_exception/
 * mono_amd64_throw_corlib_exception.
 */
static gpointer
get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
{
	guint8* start;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
	const guint kMaxCodeSize = NACL_SIZE (256, 512);

#ifdef TARGET_WIN32
	dummy_stack_space = 6 * sizeof(mgreg_t);	/* Windows expects stack space allocated for all 6 dummy args. */
#else
	dummy_stack_space = 0;
#endif

	start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);

	/* The stack is unaligned on entry */
	stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;

	code = start;

	if (info)
		unwind_ops = mono_arch_get_cie_program ();

	/* Alloc frame */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
	if (info)
		mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);

	/*
	 * To hide linux/windows calling convention differences, we pass all arguments on
	 * the stack by passing 6 dummy values in registers.
	 */

	arg_offsets [0] = dummy_stack_space + 0;
	arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
	arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
	ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
	regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);

	/* Save registers */
	for (i = 0; i < AMD64_NREG; ++i)
		if (i != AMD64_RSP)
			amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
	/* Save RSP */
	amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
	amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
	/* Save IP */
	if (llvm_abs)
		amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
	else
		amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
	amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
	/* Set arg1 == ctx */
	amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
	amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
	else
		amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
	/* Set arg3 == rethrow/pc offset */
	if (resume_unwind) {
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
	} else if (corlib) {
		amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
		if (llvm_abs)
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'rip' and passing the negated abs address as
			 * the pc offset.
			 */
			amd64_neg_membase (code, AMD64_RSP, arg_offsets [2]);
	} else {
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
	}

	if (aot) {
		const char *icall_name;

		if (resume_unwind)
			icall_name = "mono_amd64_resume_unwind";
		else if (corlib)
			icall_name = "mono_amd64_throw_corlib_exception";
		else
			icall_name = "mono_amd64_throw_exception";
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
	}
	amd64_call_reg (code, AMD64_R11);
	amd64_breakpoint (code);

	mono_arch_flush_icache (start, code - start);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);

	return start;
}
Exemple #4
0
/*
 * mono_arch_get_call_filter:
 *
 * Returns a pointer to a method which calls an exception filter. We
 * also use this function to call finally handlers (we pass NULL as 
 * @exc object in this case).
 */
gpointer
mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start;
	int i, gregs_offset;
	guint8 *code;
	guint32 pos;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	const guint kMaxCodeSize = NACL_SIZE (128, 256);

	start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);

	/* call_filter (MonoContext *ctx, unsigned long eip) */
	code = start;

	/* Alloc new frame */
	amd64_push_reg (code, AMD64_RBP);
	amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);

	/* Save callee saved regs */
	pos = 0;
	for (i = 0; i < AMD64_NREG; ++i)
		if (AMD64_IS_CALLEE_SAVED_REG (i)) {
			amd64_push_reg (code, i);
			pos += 8;
		}

	/* Save EBP */
	pos += 8;
	amd64_push_reg (code, AMD64_RBP);

	/* Make stack misaligned, the call will make it aligned again */
	if (! (pos & 8))
		amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);

	/* set new EBP */
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
	/* load callee saved regs */
	for (i = 0; i < AMD64_NREG; ++i) {
#if defined(__native_client_codegen__)
		if (i == AMD64_R15)
			continue;
#endif
		if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
			amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
	}
	/* load exc register */
	amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1,  gregs_offset + (AMD64_RAX * 8), 8);

	/* call the handler */
	amd64_call_reg (code, AMD64_ARG_REG2);

	if (! (pos & 8))
		amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);

	/* restore RBP */
	amd64_pop_reg (code, AMD64_RBP);

	/* Restore callee saved regs */
	for (i = AMD64_NREG; i >= 0; --i)
		if (AMD64_IS_CALLEE_SAVED_REG (i))
			amd64_pop_reg (code, i);

	amd64_leave (code);
	amd64_ret (code);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);

	return start;
}
Exemple #5
0
gpointer
mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *buf;
	int buf_len, cfa_offset;
	GSList *unwind_ops = NULL;
	MonoJumpInfo *ji = NULL;
	int n_arg_regs, n_arg_fregs, framesize, i;
	int info_offset, offset, rgctx_arg_reg_offset;
	int caller_reg_area_offset, callee_reg_area_offset, callee_stack_area_offset;
	guint8 *br_out, *br [64], *br_ret [64];
	int b_ret_index;
	int reg_area_size;

	buf_len = 2048;
	buf = code = mono_global_codeman_reserve (buf_len + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);

	/*
	 * We are being called by an gsharedvt arg trampoline, the info argument is in AMD64_RAX.
	 */
	n_arg_regs = PARAM_REGS;
	n_arg_fregs = FLOAT_PARAM_REGS;

	/* Compute stack frame size and offsets */
	offset = 0;
	/* info reg */
	info_offset = offset;
	offset += 8;

	/* rgctx reg */
	rgctx_arg_reg_offset = offset;
	offset += 8;

	/*callconv in regs */
	caller_reg_area_offset = offset;
	reg_area_size = ALIGN_TO ((n_arg_regs + n_arg_fregs) * 8, MONO_ARCH_FRAME_ALIGNMENT);
	offset += reg_area_size;

	framesize = offset;

	g_assert (framesize % MONO_ARCH_FRAME_ALIGNMENT == 0);
	g_assert (reg_area_size % MONO_ARCH_FRAME_ALIGNMENT == 0);

	/* unwind markers 1/3 */
	cfa_offset = sizeof (gpointer);
	mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);

	/* save the old frame pointer */
	amd64_push_reg (code, AMD64_RBP);

	/* unwind markers 2/3 */
	cfa_offset += sizeof (gpointer);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
	mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);

	/* set it as the new frame pointer */
	amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));

	/* unwind markers 3/3 */
	mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
	mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);

	/* setup the frame */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
	
	/* save stuff */

	/* save info */
	amd64_mov_membase_reg (code, AMD64_RSP, info_offset, AMD64_RAX, sizeof (mgreg_t));
	/* save rgctx */
	amd64_mov_membase_reg (code, AMD64_RSP, rgctx_arg_reg_offset, MONO_ARCH_RGCTX_REG, sizeof (mgreg_t));

	for (i = 0; i < n_arg_regs; ++i)
		amd64_mov_membase_reg (code, AMD64_RSP, caller_reg_area_offset + i * 8, param_regs [i], sizeof (mgreg_t));

	for (i = 0; i < n_arg_fregs; ++i)
		amd64_sse_movsd_membase_reg (code, AMD64_RSP, caller_reg_area_offset + (i + n_arg_regs) * 8, i);

	/* TODO Allocate stack area used to pass arguments to the method */


	/* Allocate callee register area just below the caller area so it can be accessed from start_gsharedvt_call using negative offsets */
	/* XXX figure out alignment */
	callee_reg_area_offset = reg_area_size - ((n_arg_regs + n_arg_fregs) * 8); /* Ensure alignment */
	callee_stack_area_offset = callee_reg_area_offset + reg_area_size;
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, reg_area_size);

	/* Allocate stack area used to pass arguments to the method */
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, MONO_STRUCT_OFFSET (GSharedVtCallInfo, stack_usage), 4);
	amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_R11);

	/* The stack now looks like this:

	<caller stack params area>
	<return address>
	<old frame pointer>
	<caller registers area>
	<rgctx>
	<gsharedvt info>
	<callee stack area>
	<callee reg area>
	 */

	/* Call start_gsharedvt_call () */
	/* arg1 == info */
	amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
	/* arg2 = caller stack area */
	amd64_lea_membase (code, MONO_AMD64_ARG_REG2, AMD64_RBP, -(framesize - caller_reg_area_offset)); 

	/* arg3 == callee stack area */
	amd64_lea_membase (code, MONO_AMD64_ARG_REG3, AMD64_RSP, callee_reg_area_offset);

	/* arg4 = mrgctx reg */
	amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG4, MONO_ARCH_RGCTX_REG, sizeof(mgreg_t));

	if (aot) {
		code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_start_gsharedvt_call");
		#ifdef TARGET_WIN32
			/* Since we are doing a call as part of setting up stackframe, the reserved shadow stack used by Windows platform is allocated up in
			the callee stack area but currently the callee reg area is in between. Windows calling convention dictates that room is made on stack where
			callee can save any parameters passed in registers. Since Windows x64 calling convention
			uses 4 registers for the first 4 parameters, stack needs to be adjusted before making the call.
			NOTE, Windows calling convention assumes that space for all registers have been reserved, regardless
			of the number of function parameters actually used.
			*/
			int shadow_reg_size = 0;

			shadow_reg_size = ALIGN_TO (PARAM_REGS * sizeof(gpointer), MONO_ARCH_FRAME_ALIGNMENT);
			amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, shadow_reg_size);
			amd64_call_reg (code, AMD64_R11);
			amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, shadow_reg_size);
		#else
			amd64_call_reg (code, AMD64_R11);
		#endif
	} else {
		amd64_call_code (code, mono_amd64_start_gsharedvt_call);
	}

	/* Method to call is now on RAX. Restore regs and jump */
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, sizeof(mgreg_t));

	for (i = 0; i < n_arg_regs; ++i)
		amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, callee_reg_area_offset + i * 8, sizeof (mgreg_t));

	for (i = 0; i < n_arg_fregs; ++i)
		amd64_sse_movsd_reg_membase (code, i, AMD64_RSP, callee_reg_area_offset + (i + n_arg_regs) * 8);

	//load rgctx
	amd64_mov_reg_membase (code, MONO_ARCH_RGCTX_REG, AMD64_RBP, -(framesize - rgctx_arg_reg_offset), sizeof (mgreg_t));

	/* Clear callee reg area */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, reg_area_size);

	/* Call the thing */
	amd64_call_reg (code, AMD64_R11);

	/* Marshal return value. Available registers: R10 and R11 */
	/* Load info struct */
	amd64_mov_reg_membase (code, AMD64_R10, AMD64_RBP, -(framesize - info_offset), sizeof (mgreg_t));

	/* Branch to the in/out handling code */
	amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, gsharedvt_in), 1, 4);

	b_ret_index = 0;
	br_out = code;
	x86_branch32 (code, X86_CC_NE, 0, TRUE);

	/*
	 * IN CASE
	 */

	/* Load vret_slot */
	/* Use first input parameter register as scratch since it is volatile on all platforms */
	amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
	amd64_alu_reg_imm (code, X86_SUB, MONO_AMD64_ARG_REG1, n_arg_regs + n_arg_fregs);
	amd64_shift_reg_imm (code, X86_SHL, MONO_AMD64_ARG_REG1, 3);

	/* vret address is RBP - (framesize - caller_reg_area_offset) */
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
	amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, MONO_AMD64_ARG_REG1);

	/* Load ret marshal type */
	/* Load vret address in R11 */
	amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);

	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
		br [i] = code;
		amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
	}
	x86_breakpoint (code); /* unhandled case */

	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		mono_amd64_patch (br [i], code);
		switch (i) {
		case GSHAREDVT_RET_NONE:
			break;
		case GSHAREDVT_RET_I1:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, FALSE);
			break;
		case GSHAREDVT_RET_U1:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, FALSE);
			break;
		case GSHAREDVT_RET_I2:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, TRUE, TRUE);
			break;
		case GSHAREDVT_RET_U2:
			amd64_widen_membase (code, AMD64_RAX, AMD64_R11, 0, FALSE, TRUE);
			break;
		case GSHAREDVT_RET_I4: // CORRECT
		case GSHAREDVT_RET_U4: // THIS IS INCORRECT. WHY IS IT NOT FAILING?
			amd64_movsxd_reg_membase (code, AMD64_RAX, AMD64_R11, 0);
			break;
		case GSHAREDVT_RET_I8:
			amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 0, 8);
			break;
		case GSHAREDVT_RET_IREGS_1:
			amd64_mov_reg_membase (code, return_regs [i - GSHAREDVT_RET_IREGS_1], AMD64_R11, 0, 8);
			break;
		case GSHAREDVT_RET_R8:
			amd64_sse_movsd_reg_membase (code, AMD64_XMM0, AMD64_R11, 0);
			break;
		default:
			x86_breakpoint (code); /* can't handle specific case */
		}

		br_ret [b_ret_index ++] = code;
		x86_jump32 (code, 0);
	}

	/*
	 * OUT CASE
	 */
	mono_amd64_patch (br_out, code);

	/*
		Address to write return to is in the original value of the register specified by vret_arg_reg.
		This will be either RSI, RDI (System V) or RCX, RDX (Windows) depending on whether this is a static call.
		Its location:
		We alloc 'framesize' bytes below RBP to save regs, info and rgctx. RSP = RBP - framesize
		We store RDI (System V), RCX (Windows) at RSP + caller_reg_area_offset + slot_index_of (register) * 8.

		address: RBP - framesize + caller_reg_area_offset + 8*slot
	*/

	int caller_vret_offset = caller_reg_area_offset - framesize;

	/* Load vret address in R11 */
	/* Position to return to is passed as a hidden argument. Load 'vret_arg_slot' to find it */
	amd64_movsxd_reg_membase (code, AMD64_R11, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_arg_reg));

	// In the GSHAREDVT_RET_NONE case, vret_arg_slot is -1. In this case, skip marshalling.
	amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
	br_ret [b_ret_index ++] = code;
	amd64_branch32 (code, X86_CC_LT, 0, TRUE);

	/* Compute ret area address in the caller frame, *( ((gpointer *)RBP) [R11+2] ) */
	amd64_shift_reg_imm (code, X86_SHL, AMD64_R11, 3);
	amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, caller_vret_offset);
	amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, AMD64_RBP);
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof (gpointer));

	/* Load ret marshal type in R10 */
	amd64_mov_reg_membase (code, AMD64_R10, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, ret_marshal), 4);

	// Switch table for ret_marshal value
	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		amd64_alu_reg_imm (code, X86_CMP, AMD64_R10, i);
		br [i] = code;
		amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
	}
	x86_breakpoint (code); /* unhandled case */

	for (i = GSHAREDVT_RET_NONE; i < GSHAREDVT_RET_NUM; ++i) {
		mono_amd64_patch (br [i], code);
		switch (i) {
		case GSHAREDVT_RET_NONE:
			break;
		case GSHAREDVT_RET_IREGS_1:
			amd64_mov_membase_reg (code, AMD64_R11, 0, return_regs [i - GSHAREDVT_RET_IREGS_1], 8);
			break;
		case GSHAREDVT_RET_R8:
			amd64_sse_movsd_membase_reg (code, AMD64_R11, 0, AMD64_XMM0);
			break;
		default:
			x86_breakpoint (code); /* can't handle specific case */
		}

		br_ret [b_ret_index ++] = code;
		x86_jump32 (code, 0);
	}

	/* exit path */
	for (i = 0; i < b_ret_index; ++i)
		mono_amd64_patch (br_ret [i], code);

	/* Exit code path */
#if TARGET_WIN32
	amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
	amd64_pop_reg (code, AMD64_RBP);
	mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
#else
	amd64_leave (code);
#endif
	amd64_ret (code);

	g_assert ((code - buf) < buf_len);
	g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));

	if (info)
		*info = mono_tramp_info_create ("gsharedvt_trampoline", buf, code - buf, ji, unwind_ops);

	mono_arch_flush_icache (buf, code - buf);
	return buf;
}
Exemple #6
0
gpointer
mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *start;
	guint8 *br[1];
	gpointer throw_trampoline;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	const guint kMaxCodeSize = NACL_SIZE (128, 256);

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	/* We are in the frame of a managed method after a call */
	/* 
	 * We would like to throw the pending exception in such a way that it looks to
	 * be thrown from the managed method.
	 */

	/* Save registers which might contain the return value of the call */
	amd64_push_reg (code, AMD64_RAX);
	amd64_push_reg (code, AMD64_RDX);

	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
	amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);

	/* Align stack */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* Obtain the pending exception */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
	}
	amd64_call_reg (code, AMD64_R11);

	/* Check if it is NULL, and branch */
	amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
	br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);

	/* exc != NULL branch */

	/* Save the exc on the stack */
	amd64_push_reg (code, AMD64_RAX);
	/* Align stack */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* Obtain the original ip and clear the flag in previous_lmf */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
	}
	amd64_call_reg (code, AMD64_R11);	

	/* Load exc */
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);

	/* Pop saved stuff from the stack */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);

	/* Setup arguments for the throw trampoline */
	/* Exception */
	amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
	/* The trampoline expects the caller ip to be pushed on the stack */
	amd64_push_reg (code, AMD64_RAX);

	/* Call the throw trampoline */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		throw_trampoline = mono_get_throw_exception ();
		amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
	}
	/* We use a jump instead of a call so we can push the original ip on the stack */
	amd64_jump_reg (code, AMD64_R11);

	/* ex == NULL branch */
	mono_amd64_patch (br [0], code);

	/* Obtain the original ip and clear the flag in previous_lmf */
	if (aot) {
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
	}
	amd64_call_reg (code, AMD64_R11);	
	amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);

	/* Restore registers */
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
	amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
	amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
	amd64_pop_reg (code, AMD64_RDX);
	amd64_pop_reg (code, AMD64_RAX);

	/* Return to original code */
	amd64_jump_reg (code, AMD64_R11);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);

	if (info)
		*info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops);

	return start;
}
/*
 * Perform an integer division or remainder (basically the x86 
 * version modded for 8-byte words).
 */
static void Divide(MDUnroll *unroll, int isSigned, int wantRemainder,
				      unsigned char *pc, unsigned char *label)
{
#if !defined(IL_USE_INTERRUPT_BASED_INT_DIVIDE_BY_ZERO_CHECKS)
	#define IL_NEED_DIVIDE_REEXECUTE 1
	unsigned char *patch1;
#endif

#if !defined(IL_USE_INTERRUPT_BASED_INT_OVERFLOW_CHECKS)
	#define IL_NEED_DIVIDE_REEXECUTE 1
	unsigned char  *patch2, *patch3;
#endif

	/* Get the arguments into EAX and ECX so we know where they are */
	if(unroll->pseudoStackSize != 2 ||
	   unroll->pseudoStack[0] != AMD64_RAX ||
	   unroll->pseudoStack[1] != AMD64_RCX)
	{
		FlushRegisterStack(unroll);
		unroll->stackHeight -= 2 * sizeof(CVMWord)  ;
		amd64_mov_reg_membase(unroll->out, AMD64_RAX, MD_REG_STACK,
							unroll->stackHeight, 4);
		amd64_mov_reg_membase(unroll->out, AMD64_RCX, MD_REG_STACK,
							unroll->stackHeight + sizeof(CVMWord), 4);
		unroll->pseudoStack[0] = AMD64_RAX;
		unroll->pseudoStack[1] = AMD64_RCX;
		unroll->pseudoStackSize = 2;
		unroll->regsUsed |= ((1 << AMD64_RAX) | (1 << AMD64_RCX));
	}

	/* Check for conditions that may cause an exception */
#if !defined(IL_USE_INTERRUPT_BASED_INT_DIVIDE_BY_ZERO_CHECKS)
	amd64_alu_reg_imm_size(unroll->out, X86_CMP, AMD64_RCX, 0, 4);
	patch1 = unroll->out;
	amd64_branch8(unroll->out, X86_CC_EQ, 0, 0);
#endif

#if !defined(IL_USE_INTERRUPT_BASED_INT_OVERFLOW_CHECKS)
	amd64_alu_reg_imm_size(unroll->out, X86_CMP, AMD64_RCX, -1, 4);
	patch2 = unroll->out;
	amd64_branch32(unroll->out, X86_CC_NE, 0, 0);

	amd64_alu_reg_imm_size(unroll->out, X86_CMP, AMD64_RAX, (int)0x80000000, 4);
	patch3 = unroll->out;
	amd64_branch32(unroll->out, X86_CC_NE, 0, 0);
#endif

#if !defined(IL_USE_INTERRUPT_BASED_INT_DIVIDE_BY_ZERO_CHECKS)
	amd64_patch(patch1, unroll->out);
#endif

#if defined(IL_NEED_DIVIDE_REEXECUTE)
	/* Re-execute the division instruction to throw the exception */
	ReExecute(unroll, pc, label);
#endif

#if !defined(IL_USE_INTERRUPT_BASED_INT_OVERFLOW_CHECKS)
	amd64_patch(patch2, unroll->out);
	amd64_patch(patch3, unroll->out);
#endif

	/* Perform the division */
	if(isSigned)
	{
		amd64_cdq_size(unroll->out, 4);
	}
	else
	{
		amd64_clear_reg_size(unroll->out, AMD64_RDX, 4);
	}
	amd64_div_reg_size(unroll->out, AMD64_RCX, isSigned, 4);

	/* Pop ECX from the pseudo stack */
	FreeTopRegister(unroll, -1);

	/* If we want the remainder, then replace EAX with EDX on the stack */
	if(wantRemainder)
	{
		unroll->pseudoStack[0] = AMD64_RDX;
		unroll->regsUsed = (1 << AMD64_RDX);
	}
}
Exemple #8
0
/*
 * mono_arch_get_call_filter:
 *
 * Returns a pointer to a method which calls an exception filter. We
 * also use this function to call finally handlers (we pass NULL as 
 * @exc object in this case).
 */
gpointer
mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start;
	int i;
	guint8 *code;
	guint32 pos;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	const guint kMaxCodeSize = NACL_SIZE (128, 256);

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	/* call_filter (MonoContext *ctx, unsigned long eip) */
	code = start;

	/* Alloc new frame */
	amd64_push_reg (code, AMD64_RBP);
	amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);

	/* Save callee saved regs */
	pos = 0;
	for (i = 0; i < AMD64_NREG; ++i)
		if (AMD64_IS_CALLEE_SAVED_REG (i)) {
			amd64_push_reg (code, i);
			pos += 8;
		}

	/* Save EBP */
	pos += 8;
	amd64_push_reg (code, AMD64_RBP);

	/* Make stack misaligned, the call will make it aligned again */
	if (! (pos & 8))
		amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);

	/* set new EBP */
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
	/* load callee saved regs */
	amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
	amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r12), 8);
	amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r13), 8);
	amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r14), 8);
#if !defined(__native_client_codegen__)
	amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r15), 8);
#endif
#ifdef TARGET_WIN32
	amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1,  MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
	amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1,  MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
#endif

	/* call the handler */
	amd64_call_reg (code, AMD64_ARG_REG2);

	if (! (pos & 8))
		amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);

	/* restore RBP */
	amd64_pop_reg (code, AMD64_RBP);

	/* Restore callee saved regs */
	for (i = AMD64_NREG; i >= 0; --i)
		if (AMD64_IS_CALLEE_SAVED_REG (i))
			amd64_pop_reg (code, i);

	amd64_leave (code);
	amd64_ret (code);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);

	mono_arch_flush_icache (start, code - start);

	if (info)
		*info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);

	return start;
}
Exemple #9
0
/*
 * mono_arch_get_restore_context:
 *
 * Returns a pointer to a method which restores a previously saved sigcontext.
 */
gpointer
mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start = NULL;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	/* restore_contect (MonoContext *ctx) */

	start = code = mono_global_codeman_reserve (256);

	amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);

	/* Restore all registers except %rip and %r11 */
	amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rax), 8);
	amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rcx), 8);
	amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rdx), 8);
	amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
	amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
	amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
	//amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r8), 8);
	//amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r9), 8);
	//amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r10), 8);
	amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r12), 8);
	amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r13), 8);
	amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r14), 8);
#if !defined(__native_client_codegen__)
	amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, r15), 8);
#endif

	/*
	 * The context resides on the stack, in the stack frame of the
	 * caller of this function.  The stack pointer that we need to
	 * restore is potentially many stack frames higher up, so the
	 * distance between them can easily be more than the red zone
	 * size.  Hence the stack pointer can be restored only after
	 * we have finished loading everything from the context.
	 */
	amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rsp), 8);
	amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  MONO_STRUCT_OFFSET (MonoContext, rip), 8);
	amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);

	/* jump to the saved IP */
	amd64_jump_reg (code, AMD64_R11);

	nacl_global_codeman_validate(&start, 256, &code);

	mono_arch_flush_icache (start, code - start);

	if (info)
		*info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);

	return start;
}
/*
 * Check a 2D array access operation for exception conditions.
 */
static void Check2DArrayAccess(MDUnroll *unroll, int reg, int reg2, int reg3,
							   unsigned char *pc, unsigned char *label)
{
#ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS
	unsigned char *patch1;
#endif
	unsigned char *patch2;
	unsigned char *patch3;

#ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS
	/* Check the array reference against NULL */
	amd64_alu_reg_reg(unroll->out, X86_OR, reg, reg);
	patch1 = unroll->out;
	amd64_branch8(unroll->out, X86_CC_EQ, 0, 0);
#endif

	/*
	  a->data + (((i - a->bounds[0].lower) * a->bounds[0].multiplier) +
	  	(j - a->bounds[1].lower) * a->bounds[0].multiplier)) * a->elemSize)

	  as follows

	  i = i - a->bounds[0].lower 
	  j = j - a->bounds[1].lower
	  i = i * a->bounds[0].multiplier
	  j = j * a->bounds[1].multiplier
	  i = i + j
	  i = i * a->elemSize
	  a = a->data
	  a = a + i
	*/

	/* Check the array bounds (all of which are ILInt32) */
	amd64_alu_reg_membase_size(unroll->out, X86_SUB, reg2, reg, 
							offsetof(System_MArray, bounds)
							+ offsetof(MArrayBounds, lower),
							4); /* i = i - a->bounds[0].lower */
	
	amd64_alu_reg_membase_size(unroll->out, X86_CMP, reg2, reg,
							offsetof(System_MArray, bounds)
							+ offsetof(MArrayBounds, size),
							4);
	patch2 = unroll->out;
	amd64_branch32(unroll->out, X86_CC_LT, 0, 0);
	amd64_alu_reg_membase_size(unroll->out, X86_ADD, reg2, reg, 
							offsetof(System_MArray, bounds)
							+ offsetof(MArrayBounds, lower),
							4);
	patch3 = unroll->out;
	amd64_jump8(unroll->out, 0);
	amd64_patch(patch2, unroll->out);

	amd64_alu_reg_membase_size(unroll->out, X86_SUB, reg3, reg,
							offsetof(System_MArray, bounds)
							+ sizeof(MArrayBounds)
							+ offsetof(MArrayBounds, lower),
							4); /* j = j - a->bounds[1].lower */
	
	amd64_alu_reg_membase_size(unroll->out, X86_CMP, reg3, reg, 
							offsetof(System_MArray, bounds)
							+ sizeof(MArrayBounds)
							+ offsetof(MArrayBounds, size),
							4);
	patch2 = unroll->out;
	amd64_branch32(unroll->out, X86_CC_LT, 0, 0);
	amd64_alu_reg_membase_size(unroll->out, X86_ADD, reg2, reg,
							offsetof(System_MArray, bounds) +
							offsetof(MArrayBounds, lower),
							4);
	amd64_alu_reg_membase_size(unroll->out, X86_ADD, reg3, reg,
							offsetof(System_MArray, bounds) 
							+ sizeof(MArrayBounds)
							+ offsetof(MArrayBounds, lower),
							4);

	/* Re-execute the current instruction in the interpreter */
#ifndef IL_USE_INTERRUPT_BASED_NULL_POINTER_CHECKS
	amd64_patch(patch1, unroll->out);
#endif
	amd64_patch(patch3, unroll->out);
	ReExecute(unroll, pc, label);

	/* Compute the address of the array element */
	amd64_patch(patch2, unroll->out);
	
	amd64_imul_reg_membase_size(unroll->out, reg2, reg, 
									offsetof(System_MArray, bounds)
									+ offsetof(MArrayBounds, multiplier),
									4); /* i = i * a->bounds[0].multiplier */
	amd64_imul_reg_membase_size(unroll->out, reg3, reg,
									offsetof(System_MArray, bounds)
									+ sizeof(MArrayBounds)
									+ offsetof(MArrayBounds, multiplier),
									4); /* j = j * a->bounds[1].multiplier */
	amd64_alu_reg_reg(unroll->out, X86_ADD, reg2, reg3); /* i = i + j */
	amd64_imul_reg_membase_size(unroll->out, reg2, reg,
									offsetof(System_MArray, elemSize),
									4); /* i = i * a->elemSize */
	amd64_mov_reg_membase(unroll->out, reg, reg, 
									offsetof(System_MArray, data), 
									8); /* a = a->data */
	
	/* up-convert to 32 bit */
	amd64_movsxd_reg_reg(unroll->out, reg2, reg2);
	amd64_alu_reg_reg(unroll->out, X86_ADD, reg, reg2); /* a = a + i */
}
Exemple #11
0
/*
 * mono_arch_get_restore_context:
 *
 * Returns a pointer to a method which restores a previously saved sigcontext.
 */
gpointer
mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
	guint8 *start = NULL;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	/* restore_contect (MonoContext *ctx) */

	start = code = mono_global_codeman_reserve (256);

	amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);

	/* Restore all registers except %rip and %r11 */
	amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rax), 8);
	amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rcx), 8);
	amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rdx), 8);
	amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rbx), 8);
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rbp), 8);
	amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rsi), 8);
	amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rdi), 8);
	//amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r8), 8);
	//amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r9), 8);
	//amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r10), 8);
	amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r12), 8);
	amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r13), 8);
	amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r14), 8);
#if !defined(__native_client_codegen__)
	amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, r15), 8);
#endif

	if (mono_running_on_valgrind ()) {
		/* Prevent 'Address 0x... is just below the stack ptr.' errors */
		amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rsp), 8);
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rip), 8);
		amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
	} else {
		amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rsp), 8);
		/* get return address */
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11,  G_STRUCT_OFFSET (MonoContext, rip), 8);
	}

	/* jump to the saved IP */
	amd64_jump_reg (code, AMD64_R11);

	nacl_global_codeman_validate(&start, 256, &code);

	mono_arch_flush_icache (start, code - start);

	if (info)
		*info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);

	return start;
}
Exemple #12
0
MonoContinuationRestore
mono_tasklets_arch_restore (void)
{
	static guint8* saved = NULL;
	guint8 *code, *start;
	int cont_reg = AMD64_R9; /* register usable on both call conventions */
	const guint kMaxCodeSize = NACL_SIZE (64, 128);
	

	if (saved)
		return (MonoContinuationRestore)saved;
	code = start = mono_global_codeman_reserve (kMaxCodeSize);
	/* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
	/* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
	 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
	 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
	 * We move cont to cont_reg since we need both rcx and rdi for the copy
	 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
 	 */
	amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
	amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
	/* setup the copy of the stack */
	amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
	amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
	x86_cld (code);
	amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
	amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
	amd64_prefix (code, X86_REP_PREFIX);
	amd64_movsl (code);

	/* now restore the registers from the LMF */
	amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
	amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
	amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
	amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
	amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
	amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
#if !defined(__native_client_codegen__)
	amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
#endif
#ifdef TARGET_WIN32
	amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
	amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
#endif
	amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);

	/* restore the lmf chain */
	/*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
	x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/

	/* state is already in rax */
	amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
	g_assert ((code - start) <= kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);

	saved = start;
	return (MonoContinuationRestore)saved;
}