Ejemplo n.º 1
0
gpointer
mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
{
	guint8 *code, *start;
	int buf_len;
	GSList *unwind_ops;

	MonoDomain *domain = mono_domain_get ();

	buf_len = 10;

	start = code = mono_domain_code_reserve (domain, buf_len);

	unwind_ops = mono_arch_get_cie_program ();

	x86_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, arg);
	x86_jump_code (code, addr);
	g_assert ((code - start) <= buf_len);

	mono_arch_flush_icache (start, code - start);
	MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));

	mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);

	return start;
}
Ejemplo n.º 2
0
/*
 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
 *
 *   This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
 * the trampoline to call if the slot is not filled.
 */
gpointer
mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8 *code, *buf;
	int tramp_size;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	g_assert (aot);

	unwind_ops = mono_arch_get_cie_program ();

	tramp_size = 64;

	code = buf = mono_global_codeman_reserve (tramp_size);

	// FIXME: Currently, we always go to the slow path.
	
	/* Load trampoline addr */
	x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_RGCTX_REG, 4, 4);
	/* Load mrgctx/vtable */
	x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);

	x86_jump_reg (code, X86_EAX);

	mono_arch_flush_icache (buf, code - buf);
	MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));

	g_assert (code - buf <= tramp_size);

	*info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);

	return buf;
}
Ejemplo n.º 3
0
gpointer
mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8 *tramp;
	guint8 *code, *buf;
	static int byte_offset = -1;
	static guint8 bitmask;
	guint8 *jump;
	gint32 displace;
	int tramp_size;
	GSList *unwind_ops = NULL;
	MonoJumpInfo *ji = NULL;

	tramp_size = 48;

	code = buf = mono_global_codeman_reserve (tramp_size);

	unwind_ops = mono_arch_get_cie_program ();

	if (byte_offset < 0)
		mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);

	s390_llgc(code, s390_r0, 0, MONO_ARCH_VTABLE_REG, byte_offset);
	s390_nill(code, s390_r0, bitmask);
	s390_bnzr(code, s390_r14);

	tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
		mono_get_root_domain (), NULL);

	/* jump to the actual trampoline */
	displace = (tramp - code) / 2;
	s390_jg (code, displace);

	mono_arch_flush_icache (buf, code - buf);

	g_assert (code - buf <= tramp_size);

	if (info)
		*info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);

	return(buf);
}
Ejemplo n.º 4
0
/*
 * mono_arch_get_unbox_trampoline:
 * @m: method pointer
 * @addr: pointer to native code for @m
 *
 * when value type methods are called through the vtable we need to unbox the
 * this argument. This method returns a pointer to a trampoline which does
 * unboxing before calling the method
 */
gpointer
mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
{
	guint8 *code, *start;
	int this_pos = 4, size = 16;
	MonoDomain *domain = mono_domain_get ();
	GSList *unwind_ops;

	start = code = mono_domain_code_reserve (domain, size);

	unwind_ops = mono_arch_get_cie_program ();

	x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, MONO_ABI_SIZEOF (MonoObject));
	x86_jump_code (code, addr);
	g_assert ((code - start) < size);

	MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));

	mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);

	return start;
}
Ejemplo n.º 5
0
gpointer
mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
{
#ifdef MONO_ARCH_VTABLE_REG
	guint8 *tramp;
	guint8 *code, *buf;
	guint8 **rgctx_null_jumps;
	gint32 displace;
	int tramp_size,
	    depth, 
	    index, 
	    iPatch = 0,
	    i;
	gboolean mrgctx;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
	index = MONO_RGCTX_SLOT_INDEX (slot);
	if (mrgctx)
		index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
	for (depth = 0; ; ++depth) {
		int size = mono_class_rgctx_get_array_size (depth, mrgctx);

		if (index < size - 1)
			break;
		index -= size - 1;
	}

	tramp_size = 48 + 16 * depth;
	if (mrgctx)
		tramp_size += 4;
	else
		tramp_size += 12;

	code = buf = mono_global_codeman_reserve (tramp_size);

	unwind_ops = mono_arch_get_cie_program ();

	rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));

	if (mrgctx) {
		/* get mrgctx ptr */
		s390_lgr (code, s390_r1, s390_r2);
	} else {
		/* load rgctx ptr from vtable */
		s390_lg (code, s390_r1, 0, s390_r2, G_STRUCT_OFFSET(MonoVTable, runtime_generic_context));
		/* is the rgctx ptr null? */
		s390_ltgr (code, s390_r1, s390_r1);
		/* if yes, jump to actual trampoline */
		rgctx_null_jumps [iPatch++] = code;
		s390_jge (code, 0);
	}

	for (i = 0; i < depth; ++i) {
		/* load ptr to next array */
		if (mrgctx && i == 0)
			s390_lg (code, s390_r1, 0, s390_r1, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
		else
			s390_lg (code, s390_r1, 0, s390_r1, 0);
		s390_ltgr (code, s390_r1, s390_r1);
		/* if the ptr is null then jump to actual trampoline */
		rgctx_null_jumps [iPatch++] = code;
		s390_jge (code, 0);
	}

	/* fetch slot */
	s390_lg (code, s390_r1, 0, s390_r1, (sizeof (gpointer) * (index  + 1)));
	/* is the slot null? */
	s390_ltgr (code, s390_r1, s390_r1);
	/* if yes, jump to actual trampoline */
	rgctx_null_jumps [iPatch++] = code;
	s390_jge (code, 0);
	/* otherwise return r1 */
	s390_lgr (code, s390_r2, s390_r1);
	s390_br  (code, s390_r14);

	for (i = 0; i < iPatch; i++) {
		displace = ((uintptr_t) code - (uintptr_t) rgctx_null_jumps[i]) / 2;
		s390_patch_rel ((rgctx_null_jumps [i] + 2), displace);
	}

	g_free (rgctx_null_jumps);

	/* move the rgctx pointer to the VTABLE register */
	s390_lgr (code, MONO_ARCH_VTABLE_REG, s390_r2);

	tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot),
		MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);

	/* jump to the actual trampoline */
	displace = (tramp - code) / 2;
	s390_jg (code, displace);

	mono_arch_flush_icache (buf, code - buf);

	g_assert (code - buf <= tramp_size);

	if (info) {
		char *name = mono_get_rgctx_fetch_trampoline_name (slot);
		*info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
		g_free (name);
	}

	return(buf);
#else
	g_assert_not_reached ();
#endif
	return(NULL);
}	
Ejemplo n.º 6
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_x86_throw_exception/
 * mono_x86_throw_corlib_exception.
 * If LLVM is true, generate code which assumes the caller is LLVM generated code, 
 * which doesn't push the arguments.
 */
static guint8*
get_throw_trampoline (const char *name, gboolean rethrow, gboolean llvm, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, MonoTrampInfo **info, gboolean aot)
{
	guint8 *start, *code, *labels [16];
	int i, stack_size, stack_offset, arg_offsets [5], regs_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	guint kMaxCodeSize = 192;

	start = code = mono_global_codeman_reserve (kMaxCodeSize);

	stack_size = 128;

	/* 
	 * On apple, the stack is misaligned by the pushing of the return address.
	 */
	if (!llvm && corlib)
		/* On OSX, we don't generate alignment code to save space */
		stack_size += 4;
	else
		stack_size += MONO_ARCH_FRAME_ALIGNMENT - 4;

	/*
	 * The stack looks like this:
	 * <pc offset> (only if corlib is TRUE)
	 * <exception object>/<type token>
	 * <return addr> <- esp (unaligned on apple)
	 */

	unwind_ops = mono_arch_get_cie_program ();

	/* Alloc frame */
	x86_alu_reg_imm (code, X86_SUB, X86_ESP, stack_size);
	mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 4);

	arg_offsets [0] = 0;
	arg_offsets [1] = 4;
	arg_offsets [2] = 8;
	arg_offsets [3] = 12;
	regs_offset = 16;

	/* Save registers */
	for (i = 0; i < X86_NREG; ++i)
		if (i != X86_ESP)
			x86_mov_membase_reg (code, X86_ESP, regs_offset + (i * 4), i, 4);
	/* Calculate the offset between the current sp and the sp of the caller */
	if (llvm) {
		/* LLVM doesn't push the arguments */
		stack_offset = stack_size + 4;
	} else {
		if (corlib) {
			/* Two arguments */
			stack_offset = stack_size + 4 + 8;
#ifdef __APPLE__
			/* We don't generate stack alignment code on osx to save space */
#endif
		} else {
			/* One argument + stack alignment */
			stack_offset = stack_size + 4 + 4;
#ifdef __APPLE__
			/* Pop the alignment added by OP_THROW too */
			stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#else
			if (mono_do_x86_stack_align)
				stack_offset += MONO_ARCH_FRAME_ALIGNMENT - 4;
#endif
		}
	}
	/* Save ESP */
	x86_lea_membase (code, X86_EAX, X86_ESP, stack_offset);
	x86_mov_membase_reg (code, X86_ESP, regs_offset + (X86_ESP * 4), X86_EAX, 4);

	/* Clear fp stack */
	labels [0] = code;
	x86_fnstsw (code);
	x86_shift_reg_imm (code, X86_SHR, X86_EAX, 11);
	x86_alu_reg_imm (code, X86_AND, X86_EAX, 7);
	x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
	labels [1] = code;
	x86_branch8 (code, X86_CC_EQ, 0, FALSE);
	x86_fstp (code, 0);
	x86_jump_code (code, labels [0]);
	mono_x86_patch (labels [1], code);

	/* Set arg1 == regs */
	x86_lea_membase (code, X86_EAX, X86_ESP, regs_offset);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [0], X86_EAX, 4);
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		x86_mov_reg_imm (code, X86_EAX, 0);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 4, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [1], X86_EAX, 4);
	/* Set arg3 == eip */
	if (llvm_abs)
		x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
	else
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size, 4);
	x86_mov_membase_reg (code, X86_ESP, arg_offsets [2], X86_EAX, 4);
	/* Set arg4 == rethrow/pc_offset */
	if (resume_unwind) {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], 0, 4);
	} else if (corlib) {
		x86_mov_reg_membase (code, X86_EAX, X86_ESP, stack_size + 8, 4);
		if (llvm_abs) {
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'ip' and passing the negated abs address as
			 * the pc offset.
			 */
			x86_neg_reg (code, X86_EAX);
		}
		x86_mov_membase_reg (code, X86_ESP, arg_offsets [3], X86_EAX, 4);
	} else {
		x86_mov_membase_imm (code, X86_ESP, arg_offsets [3], rethrow, 4);
	}
	/* Make the call */
	if (aot) {
		// This can be called from runtime code, which can't guarantee that
		// ebx contains the got address.
		// So emit the got address loading code too
		code = mono_arch_emit_load_got_addr (start, code, NULL, &ji);
		code = mono_arch_emit_load_aotconst (start, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_x86_throw_corlib_exception" : "mono_x86_throw_exception");
		x86_call_reg (code, X86_EAX);
	} else {
		x86_call_code (code, resume_unwind ? (gpointer)(mono_x86_resume_unwind) : (corlib ? (gpointer)mono_x86_throw_corlib_exception : (gpointer)mono_x86_throw_exception));
	}
	x86_breakpoint (code);

	g_assert ((code - start) < kMaxCodeSize);

	if (info)
		*info = mono_tramp_info_create (name, start, code - start, ji, unwind_ops);
	else {
		GSList *l;

		for (l = unwind_ops; l; l = l->next)
			g_free (l->data);
		g_slist_free (unwind_ops);
	}

	mono_arch_flush_icache (start, code - start);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	return start;
}
Ejemplo n.º 7
0
/*
 * get_throw_trampoline:
 *
 *  Generate a call to mono_amd64_throw_exception/
 * mono_amd64_throw_corlib_exception.
 */
static gpointer
get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
{
	guint8* start;
	guint8 *code;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
	const guint kMaxCodeSize = NACL_SIZE (256, 512);

#ifdef TARGET_WIN32
	dummy_stack_space = 6 * sizeof(mgreg_t);	/* Windows expects stack space allocated for all 6 dummy args. */
#else
	dummy_stack_space = 0;
#endif

	start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);

	/* The stack is unaligned on entry */
	stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;

	code = start;

	if (info)
		unwind_ops = mono_arch_get_cie_program ();

	/* Alloc frame */
	amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
	if (info)
		mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);

	/*
	 * To hide linux/windows calling convention differences, we pass all arguments on
	 * the stack by passing 6 dummy values in registers.
	 */

	arg_offsets [0] = dummy_stack_space + 0;
	arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
	arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
	ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
	regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);

	/* Save registers */
	for (i = 0; i < AMD64_NREG; ++i)
		if (i != AMD64_RSP)
			amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
	/* Save RSP */
	amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
	amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
	/* Save IP */
	if (llvm_abs)
		amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
	else
		amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
	amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
	/* Set arg1 == ctx */
	amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
	amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
	/* Set arg2 == exc/ex_token_index */
	if (resume_unwind)
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
	else
		amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
	/* Set arg3 == rethrow/pc offset */
	if (resume_unwind) {
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
	} else if (corlib) {
		amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
		if (llvm_abs)
			/* 
			 * The caller is LLVM code which passes the absolute address not a pc offset,
			 * so compensate by passing 0 as 'rip' and passing the negated abs address as
			 * the pc offset.
			 */
			amd64_neg_membase (code, AMD64_RSP, arg_offsets [2]);
	} else {
		amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
	}

	if (aot) {
		const char *icall_name;

		if (resume_unwind)
			icall_name = "mono_amd64_resume_unwind";
		else if (corlib)
			icall_name = "mono_amd64_throw_corlib_exception";
		else
			icall_name = "mono_amd64_throw_exception";
		ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
		amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
	} else {
		amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
	}
	amd64_call_reg (code, AMD64_R11);
	amd64_breakpoint (code);

	mono_arch_flush_icache (start, code - start);

	g_assert ((code - start) < kMaxCodeSize);

	nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
	mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);

	if (info)
		*info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);

	return start;
}
Ejemplo n.º 8
0
gpointer
mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
{
	guint8	*tramp,
		*code, *buf;
	gint16	*jump_obj_null, 
		*jump_have_waiters, 
		*jump_sync_null, 
		*jump_not_owned, 
		*jump_cs_failed,
		*jump_next,
		*jump_sync_thin_hash = NULL;
	int	tramp_size,
		status_offset, nest_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;
	int	obj_reg = s390_r2,
		sync_reg = s390_r3,
		status_reg = s390_r4;

	g_assert (obj_reg == MONO_ARCH_MONITOR_OBJECT_REG);

	mono_monitor_threads_sync_members_offset (&status_offset, &nest_offset);
	g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (status_offset) == sizeof (guint32));
	g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
	status_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (status_offset);
	nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);

	tramp_size = 160;

	code = buf = mono_global_codeman_reserve (tramp_size);

	unwind_ops = mono_arch_get_cie_program ();

	if (mono_thread_get_tls_offset () != -1) {
		/* MonoObject* obj is in obj_reg */
		/* is obj null? */
		s390_ltgr (code, obj_reg, obj_reg);
		/* if yes, jump to actual trampoline */
		s390_jz (code, 0); CODEPTR(code, jump_obj_null);

		/* load obj->synchronization to RCX */
		s390_lg (code, sync_reg, 0, obj_reg, MONO_STRUCT_OFFSET (MonoObject, synchronisation));

		if (mono_gc_is_moving ()) {
			/*if bit zero is set it's a thin hash*/
			s390_tmll (code, sync_reg, 1);
			s390_jo   (code, 0); CODEPTR(code, jump_sync_thin_hash);

			/* Clear bits used by the gc */
			s390_nill (code, sync_reg, ~0x3);
		}

		/* is synchronization null? */
		s390_ltgr (code, sync_reg, sync_reg);
		/* if yes, jump to actual trampoline */
		s390_jz (code, 0); CODEPTR(code, jump_sync_null);

		/* next case: synchronization is not null */
		/* load MonoInternalThread* into r5 */
		s390_ear (code, s390_r5, 0);
		s390_sllg(code, s390_r5, s390_r5, 0, 32);
		s390_ear (code, s390_r5, 1);
		/* load TID into r1 */
		s390_lg  (code, s390_r1, 0, s390_r5, mono_thread_get_tls_offset ());
		s390_lgf (code, s390_r1, 0, s390_r1, MONO_STRUCT_OFFSET (MonoInternalThread, small_id));
		/* is synchronization->owner == TID */
		s390_lgf (code, status_reg, 0, sync_reg, status_offset);
		s390_xr  (code, s390_r1, status_reg);
		s390_tmlh (code, s390_r1, OWNER_MASK);
		/* if not, jump to actual trampoline */
		s390_jno (code, 0); CODEPTR(code, jump_not_owned);

		/* next case: synchronization->owner == TID */
		/* is synchronization->nest == 1 */
		s390_lgf (code, s390_r0, 0, sync_reg, nest_offset);
		s390_chi (code, s390_r0, 1);
		/* if not, jump to next case */
		s390_jne (code, 0); CODEPTR(code, jump_next);
		/* if yes, is synchronization->entry_count greater than zero */
		s390_cfi (code, status_reg, ENTRY_COUNT_WAITERS);
		/* if not, jump to actual trampoline */
		s390_jnz (code, 0); CODEPTR(code, jump_have_waiters);
		/* if yes, try to set synchronization->owner to null and return */
		/* old status in s390_r0 */
		s390_lgfr (code, s390_r0, status_reg);
		/* form new status */
		s390_nilf (code, status_reg, ENTRY_COUNT_MASK);
		/* compare and exchange */
		s390_cs (code, s390_r0, status_reg, sync_reg, status_offset);
		/* if not successful, jump to actual trampoline */
		s390_jnz (code, 0); CODEPTR(code, jump_cs_failed);
		s390_br  (code, s390_r14);

		/* next case: synchronization->nest is not 1 */
		PTRSLOT (code, jump_next);
		/* decrease synchronization->nest and return */
		s390_lgf (code, s390_r0, 0, sync_reg, nest_offset);
		s390_ahi (code, s390_r0, -1);
		s390_st  (code, s390_r0, 0, sync_reg, nest_offset);
		s390_br  (code, s390_r14);

		PTRSLOT (code, jump_obj_null);
		if (jump_sync_thin_hash)
			PTRSLOT (code, jump_sync_thin_hash);
		PTRSLOT (code, jump_have_waiters);
		PTRSLOT (code, jump_not_owned);
		PTRSLOT (code, jump_cs_failed);
		PTRSLOT (code, jump_sync_null);
	}

	/* jump to the actual trampoline */
	tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);

	S390_SET (code, s390_r1, tramp);
	s390_br (code, s390_r1);

	mono_arch_flush_icache (code, code - buf);
	mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_MONITOR, NULL);
	g_assert (code - buf <= tramp_size);

	if (info)
		*info = mono_tramp_info_create ("monitor_exit_trampoline", buf, code - buf, ji, unwind_ops);

	return buf;
}
Ejemplo n.º 9
0
gpointer
mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean is_v4, gboolean aot)
{
	guint8	*tramp,
		*code, *buf;
	gint16	*jump_obj_null, 
		*jump_sync_null, 
		*jump_cs_failed, 
		*jump_other_owner, 
		*jump_tid, 
		*jump_sync_thin_hash = NULL,
		*jump_lock_taken_true = NULL;
	int tramp_size,
	    status_reg = s390_r0,
	    lock_taken_reg = s390_r1,
	    obj_reg = s390_r2,
	    sync_reg = s390_r3,
	    tid_reg = s390_r4,
	    status_offset,
	    nest_offset;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	g_assert (MONO_ARCH_MONITOR_OBJECT_REG == obj_reg);
#ifdef MONO_ARCH_MONITOR_LOCK_TAKEN_REG
	g_assert (MONO_ARCH_MONITOR_LOCK_TAKEN_REG == lock_taken_reg);
#else
	g_assert (!is_v4);
#endif

	mono_monitor_threads_sync_members_offset (&status_offset, &nest_offset);
	g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (status_offset) == sizeof (guint32));
	g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
	status_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (status_offset);
	nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);

	tramp_size = 160;

	code = buf = mono_global_codeman_reserve (tramp_size);

	unwind_ops = mono_arch_get_cie_program ();

	if (mono_thread_get_tls_offset () != -1) {
		/* MonoObject* obj is in obj_reg */
		/* is obj null? */
		s390_ltgr (code, obj_reg, obj_reg);
		/* if yes, jump to actual trampoline */
		s390_jz (code, 0); CODEPTR(code, jump_obj_null);

		if (is_v4) {
			s390_cli (code, lock_taken_reg, 0, 1);
			/* if *lock_taken is 1, jump to actual trampoline */
			s390_je (code, 0); CODEPTR(code, jump_lock_taken_true);
		}

		/* load obj->synchronization to sync_reg */
		s390_lg (code, sync_reg, 0, obj_reg, MONO_STRUCT_OFFSET (MonoObject, synchronisation));

		if (mono_gc_is_moving ()) {
			/*if bit zero is set it's a thin hash*/
			s390_tmll (code, sync_reg, 1);
			s390_jo  (code, 0); CODEPTR(code, jump_sync_thin_hash);

			/* Clear bits used by the gc */
			s390_nill (code, sync_reg, ~0x3);
		}

		/* is synchronization null? */
		s390_ltgr (code, sync_reg, sync_reg);
		/* if yes, jump to actual trampoline */
		s390_jz (code, 0); CODEPTR(code, jump_sync_null);

		/* load MonoInternalThread* into tid_reg */
		s390_ear (code, s390_r5, 0);
		s390_sllg(code, s390_r5, s390_r5, 0, 32);
		s390_ear (code, s390_r5, 1);
		/* load tid */
		s390_lg  (code, tid_reg, 0, s390_r5, mono_thread_get_tls_offset ());
		s390_lgf (code, tid_reg, 0, tid_reg, MONO_STRUCT_OFFSET (MonoInternalThread, small_id));

		/* is synchronization->owner free */
		s390_lgf  (code, status_reg, 0, sync_reg, status_offset);
		s390_nilf (code, status_reg, OWNER_MASK);
		/* if not, jump to next case */
		s390_jnz  (code, 0); CODEPTR(code, jump_tid);

		/* if yes, try a compare-exchange with the TID */
		/* Form new status in tid_reg */
		s390_xr (code, tid_reg, status_reg);
		/* compare and exchange */
		s390_cs (code, status_reg, tid_reg, sync_reg, status_offset);
		s390_jnz (code, 0); CODEPTR(code, jump_cs_failed);
		/* if successful, return */
		if (is_v4)
			s390_mvi (code, lock_taken_reg, 0, 1);
		s390_br (code, s390_r14);

		/* next case: synchronization->owner is not null */
		PTRSLOT(code, jump_tid);
		/* is synchronization->owner == TID? */
		s390_nilf (code, status_reg, OWNER_MASK);
		s390_cr (code, status_reg, tid_reg);
		/* if not, jump to actual trampoline */
		s390_jnz (code, 0); CODEPTR(code, jump_other_owner);
		/* if yes, increment nest */
		s390_lgf (code, s390_r5, 0, sync_reg, nest_offset);
		s390_ahi (code, s390_r5, 1);
		s390_st  (code, s390_r5, 0, sync_reg, nest_offset);
		/* return */
		if (is_v4)
			s390_mvi (code, lock_taken_reg, 0, 1);
		s390_br (code, s390_r14);

		PTRSLOT (code, jump_obj_null);
		if (jump_sync_thin_hash)
			PTRSLOT (code, jump_sync_thin_hash);
		PTRSLOT (code, jump_sync_null);
		PTRSLOT (code, jump_cs_failed);
		PTRSLOT (code, jump_other_owner);
		if (is_v4)
			PTRSLOT (code, jump_lock_taken_true);
	}

	/* jump to the actual trampoline */
	if (is_v4)
		tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER_V4, mono_get_root_domain (), NULL);
	else
		tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);

	/* jump to the actual trampoline */
	S390_SET (code, s390_r1, tramp);
	s390_br (code, s390_r1);

	mono_arch_flush_icache (code, code - buf);
	mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_MONITOR, NULL);
	g_assert (code - buf <= tramp_size);

	if (info) {
		if (is_v4)
			*info = mono_tramp_info_create ("monitor_enter_v4_trampoline", buf, code - buf, ji, unwind_ops);
		else
			*info = mono_tramp_info_create ("monitor_enter_trampoline", buf, code - buf, ji, unwind_ops);
	}

	return buf;
}
Ejemplo n.º 10
0
gpointer
mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
{
	guint8 *tramp;
	guint8 *code, *buf;
	guint8 **rgctx_null_jumps;
	int tramp_size;
	int depth, index;
	int i;
	gboolean mrgctx;
	MonoJumpInfo *ji = NULL;
	GSList *unwind_ops = NULL;

	unwind_ops = mono_arch_get_cie_program ();

	mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
	index = MONO_RGCTX_SLOT_INDEX (slot);
	if (mrgctx)
		index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (target_mgreg_t);
	for (depth = 0; ; ++depth) {
		int size = mono_class_rgctx_get_array_size (depth, mrgctx);

		if (index < size - 1)
			break;
		index -= size - 1;
	}

	tramp_size = (aot ? 64 : 36) + 6 * depth;

	code = buf = mono_global_codeman_reserve (tramp_size);

	rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));

	/* load vtable/mrgctx ptr */
	x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
	if (!mrgctx) {
		/* load rgctx ptr from vtable */
		x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
		/* is the rgctx ptr null? */
		x86_test_reg_reg (code, X86_EAX, X86_EAX);
		/* if yes, jump to actual trampoline */
		rgctx_null_jumps [0] = code;
		x86_branch8 (code, X86_CC_Z, -1, 1);
	}

	for (i = 0; i < depth; ++i) {
		/* load ptr to next array */
		if (mrgctx && i == 0)
			x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 4);
		else
			x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
		/* is the ptr null? */
		x86_test_reg_reg (code, X86_EAX, X86_EAX);
		/* if yes, jump to actual trampoline */
		rgctx_null_jumps [i + 1] = code;
		x86_branch8 (code, X86_CC_Z, -1, 1);
	}

	/* fetch slot */
	x86_mov_reg_membase (code, X86_EAX, X86_EAX, sizeof (target_mgreg_t) * (index + 1), 4);
	/* is the slot null? */
	x86_test_reg_reg (code, X86_EAX, X86_EAX);
	/* if yes, jump to actual trampoline */
	rgctx_null_jumps [depth + 1] = code;
	x86_branch8 (code, X86_CC_Z, -1, 1);
	/* otherwise return */
	x86_ret (code);

	for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
		x86_patch (rgctx_null_jumps [i], code);

	g_free (rgctx_null_jumps);

	x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);

	if (aot) {
		code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
		x86_jump_reg (code, X86_EAX);
	} else {
		tramp = (guint8*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);

		/* jump to the actual trampoline */
		x86_jump_code (code, tramp);
	}

	mono_arch_flush_icache (buf, code - buf);
	MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));

	g_assert (code - buf <= tramp_size);

	char *name = mono_get_rgctx_fetch_trampoline_name (slot);
	*info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
	g_free (name);

	return buf;
}