Esempio n. 1
0
void syn_memsetw( struct machine_ops* mop, struct emitter* e, struct machine* m, operand d, operand v, operand size ){
	operand iter = OP_TARGETREG( acquire_temp( mop, e, m ) );			
	operand dst = OP_TARGETREG( acquire_temp( mop, e, m ) );

	// init iterator
	mop->move( e, m, iter, OP_TARGETIMMED( 0 ) );
	mop->move( e, m, dst, d );
	
	// start loop 
	e->ops->label_local( e, 0 );
	mop->beq( e, m, iter, size, LBL_NEXT( 0 ) );  

	// copy 
	mop->move( e, m, OP_TARGETDADDR( dst.reg, 0 ), v );
	
	// update pointers 
	mop->add( e, m, dst, dst, OP_TARGETIMMED( 4 ) );
	
	// update iterator
	mop->add( e, m, iter, iter, OP_TARGETIMMED( 1 ) );
	
	mop->b( e, m, LBL_PREV( 0 ) );
	e->ops->label_local( e, 0 );
	
	release_tempn( mop, e, m, 2 );
}
Esempio n. 2
0
/*
* Load the following and call epilogue:
* 	r[ RA_NR_ARGS ] = number of results
*	r[ RA_BASE ] = start address of results 
*/
void do_ret( struct machine_ops* mop, struct emitter* e, struct frame* f, int vregbase, int nret ){
	assert( RA_COUNT <= f->m->nr_reg );	// regargs are passed by register NOT stack
	vreg_operand basestack = vreg_to_operand( f, vregbase, true );

	/*
	* if nret == 0 then prev instruction was call and calls save vregs on stack. Therefore
	* only when nret > 0 do we need to save live regs onto stack. Do it first so it can
	* use as many temps as it wants before we reserve them for return procedure. 
	*/
	if( nret > 0 ) 
#if 1
		save_frame_limit( mop, e, f, vregbase, nret - 1 );
#else
		// TODO: the above is from vregbase - so need to think about this one
		jfunc_call( mop, e, f->m, JF_STORE_LOCALS, jf_storelocal_offset( f->m, nret - 1 ), JFUNC_UNLIMITED_STACK, 0 );
#endif

	operand rargs[ RA_COUNT ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_COUNT, rargs );

	mop->add( e, f->m, rargs[ RA_BASE ], OP_TARGETREG( basestack.value.base ), OP_TARGETIMMED( basestack.value.offset ) );

	if( nret > 0 ) {
		mop->move( e, f->m, rargs[ RA_NR_ARGS ], OP_TARGETIMMED( nret - 1 ) );
	} else {
		mop->sub( e, f->m, rargs[ RA_NR_ARGS ], rargs[ RA_BASE ], OP_TARGETREG( f->m->sp ) );	
		mop->udiv( e, f->m, rargs[ RA_NR_ARGS ], rargs[ RA_NR_ARGS ], OP_TARGETIMMED( 8 ) );
	}

	mop->b( e, f->m, LBL_ABS( OP_TARGETIMMED( (uintptr_t)jfunc_addr( e, JF_EPILOGUE ) ) ) ); 
}
Esempio n. 3
0
// this function could be inline
void syn_memcpyw( struct machine_ops* mop, struct emitter* e, struct machine* m, operand d, operand s, operand size ){
	operand iter = OP_TARGETREG( acquire_temp( mop, e, m ) );			
	operand src = OP_TARGETREG( acquire_temp( mop, e, m ) );
	operand dst = OP_TARGETREG( acquire_temp( mop, e, m ) );

	// init iterator
	mop->move( e, m, iter, OP_TARGETIMMED( 0 ) );
	mop->move( e, m, src, s );
	mop->move( e, m, dst, d );
	
	// start loop 
	e->ops->label_local( e, 0 );
	mop->beq( e, m, iter, size, LBL_NEXT( 0 ) );  

	// copy 
	mop->move( e, m, OP_TARGETDADDR( dst.reg, 0 ), OP_TARGETDADDR( src.reg, 0 ) );
	
	// update pointers 
	mop->add( e, m, dst, dst, OP_TARGETIMMED( -4 ) );	// TODO: this is incorrect in general but correct for copyargs
	mop->add( e, m, src, src, OP_TARGETIMMED( -4 ) ); 
	
	// update iterator
	mop->add( e, m, iter, iter, OP_TARGETIMMED( 1 ) );
	
	mop->b( e, m, LBL_PREV( 0 ) );
	e->ops->label_local( e, 0 );
	
	release_tempn( mop, e, m, 3 );
}
Esempio n. 4
0
void jinit_cpy_arg_res( struct JFunc* jf, struct machine_ops* mop, struct emitter* e, struct machine* m ){
	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, m, RA_SIZE, rargs );

	syn_min( mop, e, m, rargs[ RA_EXIST ], rargs[ RA_EXIST ], rargs[ RA_EXPECT ] );
	
	// init iterator 
	operand iter = OP_TARGETREG( acquire_temp( mop, e, m ) );			
	mop->move( e, m, iter, OP_TARGETIMMED( 0 ) );
	
	do_copying( mop, e, m, iter, rargs[ RA_EXIST ], rargs[ RA_DST ], rargs[ RA_SRC ] );
	
	/* 
	* TODO: Below logic is incorrect the first is most likely to get spilled. So change enum order. BUT 
	* then you have to think about prefer saved reg and the order there.
	* TODO: add error prefer_nontemps to error when not all live simultenously 
	*
	* RA_EXPECT is last register therefore its the most likely to be spilled. So to stop repeat
	* spill/unspill move it to exist.
	*/
	mop->move( e, m, rargs[ RA_SIZE ], rargs[ RA_EXIST ] );
	do_nilling( mop, e, m, iter, rargs[ RA_EXIST ], rargs[ RA_DST ] );

	release_temp( mop, e, m );	
	prefer_nontemp_release_reg( mop, e, m, RA_SIZE );

	mop->ret( e, m );
}
Esempio n. 5
0
void address_of( struct machine_ops *mop, struct emitter *e, struct machine *m,
							operand d,
							operand s ){
	assert( ISO_DADDR( s ) );
	mop->add( e, m, d, OP_TARGETREG( s.base ), 
			OP_TARGETIMMED( s.offset ) );
}
Esempio n. 6
0
static void postcall( struct machine_ops* mop, struct emitter* e, struct frame* f, int vregbase, int narg, int nret ){
 	vreg_operand basestack = vreg_to_operand( f, vregbase, true );
	
	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_SIZE, rargs );

	// max stack clobber 
	const int maxstack = 3;	// prior frame has buffer of pushed return addr, frame pointer and closure
#define USE_JFUNC_FOR_VARRES

	// set dst and expect
#if 0 
	mop->add( e, f->m, rargs[ RA_DST ], OP_TARGETREG( basestack.value.base ), OP_TARGETIMMED( basestack.value.offset ) );
#endif

	if( nret == 0 ){
#ifdef USE_JFUNC_FOR_VARRES 
		jfunc_call( mop, e, f->m, JF_VARRES_POSTCALL, 0, maxstack, 4, rargs[ RA_SRC ], rargs[ RA_DST ], 
						rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
#else
		mop->move( e, f->m, rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
		
		// update stack position 
		mop->mul( e, f->m, rargs[ RA_NR_ARGS ], rargs[ RA_NR_ARGS ], OP_TARGETIMMED( 8 ) );	// in word units
		mop->add( e, f->m, rargs[ RA_NR_ARGS ], rargs[ RA_NR_ARGS ], OP_TARGETIMMED( 8 + 8 * vregbase ) );
		mop->sub( e, f->m, OP_TARGETREG( f->m->sp ), OP_TARGETREG( f->m->fp ), rargs[ RA_NR_ARGS ] );

		mop->move( e, f->m, rargs[ RA_EXIST ], rargs[ RA_EXPECT ]  );	
#endif
	} else {
		mop->move( e, f->m, rargs[ RA_EXPECT ], OP_TARGETIMMED( nret - 1 ) );
#ifdef USE_JFUNC_FOR_VARRES 
		jfunc_call( mop, e, f->m, JF_ARG_RES_CPY, 0, maxstack, 4, rargs[ RA_SRC ], rargs[ RA_DST ], 
						rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
#endif
	}

#ifndef USE_JFUNC_FOR_VARRES
		jfunc_call( mop, e, f->m, JF_ARG_RES_CPY, 0, maxstack, 4, rargs[ RA_SRC ], rargs[ RA_DST ], 
						rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
#endif

	prefer_nontemp_release_reg( mop, e, f->m, RA_SIZE );
}
Esempio n. 7
0
static void do_nilling( struct machine_ops* mop, struct emitter* e, struct machine* m, 
		operand iter, operand limit, operand dst ){

	// start loop 
	e->ops->label_local( e, 0 );
	mop->beq( e, m, iter, limit, LBL_NEXT( 0 ) ); 	// TODO: bgt is equiv to beq so swap? 

	// copy 
	mop->move( e, m, OP_TARGETDADDR( dst.reg, -4 ), OP_TARGETIMMED( LUA_TNIL ) );
	
	// update pointers 
	mop->add( e, m, dst, dst, OP_TARGETIMMED( -8 ) );	
	
	// update iterator
	mop->add( e, m, iter, iter, OP_TARGETIMMED( 1 ) );
	
	mop->b( e, m, LBL_PREV( 0 ) );
	e->ops->label_local( e, 0 );
}
Esempio n. 8
0
static void precall( struct machine_ops* mop, struct emitter* e, struct frame* f, int vregbase, int narg, int nret ){
	// new frame assumes no temporaries have been used yet 
	assert( temps_accessed( f->m ) == 0 );
	assert( RA_COUNT + 1 <= f->m->nr_reg );		// regargs are passed by register NOT stack

	vreg_operand clive = vreg_to_operand( f, vregbase, false );
 	vreg_operand cstack = vreg_to_operand( f, vregbase, true );
	assert( cstack.value.tag == OT_DIRECTADDR );

	// TODO: verify its a closure using clive 
	
	// get arg passing registers
	operand rargs[ RA_COUNT ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_COUNT, rargs );

	// calculate number of args
	if( narg > 0 )
		mop->move( e, f->m, rargs[ RA_NR_ARGS ], OP_TARGETIMMED( narg - 1 ) );
	else{
		// calculate total by subtracting basereg address from stack.

		// 2 becuase 8 for (ebp,closure) another 8 for the function being called ( rem actual args = args - 1 )
		mop->add( e, f->m, rargs[ RA_NR_ARGS ], OP_TARGETREG( f->m->fp ), OP_TARGETIMMED( -8 * ( 2 + vregbase ) ) );
		mop->sub( e, f->m, rargs[ RA_NR_ARGS ], rargs[ RA_NR_ARGS ], OP_TARGETREG( f->m->sp ) );	
		mop->udiv( e, f->m, rargs[ RA_NR_ARGS ], rargs[ RA_NR_ARGS ], OP_TARGETIMMED( 8 ) );
	}

	// calcualte base address	
	mop->add( e, f->m, rargs[ RA_BASE ], OP_TARGETREG( cstack.value.base ), OP_TARGETIMMED( cstack.value.offset ) );
	
	// call function without spilling any temps
#if 0
	bool prior = disable_spill( f->m );
	mop->call( e, f->m, LBL_ABS( clive.value ) );
	restore_spill( f->m, prior );
#else
	jfunc_call( mop, e, f->m, JF_PROLOGUE, 0, JFUNC_UNLIMITED_STACK, 2, rargs[ RA_EXIST ], rargs[ RA_SRC ] );
#endif

	// release temps used in call
	prefer_nontemp_release_reg( mop, e, f->m, RA_COUNT );
}
Esempio n. 9
0
void popn( struct machine_ops* mop, struct emitter* e, struct machine* m, int nr_operands, ... ){
	va_list ap;
	const operand stack = OP_TARGETREG( m->sp );

	va_start( ap, nr_operands );

	if( nr_operands == 1 && mop->pop ){
		mop->pop( e, m, va_arg( ap, operand ) );
	} else {
		for( int i = 0; i < nr_operands; i++ )
			mop->move( e, m, va_arg( ap, operand ), OP_TARGETDADDR( m->sp, 4 * i ) );
		
		mop->add( e, m, stack, stack, OP_TARGETIMMED( 4 * nr_operands ) ); 
	}

	va_end( ap );
}
Esempio n. 10
0
void setk_string( struct frame* f, int k, char* str ){
	f->consts[k].value = OP_TARGETIMMED( (uintptr_t)str );
	f->consts[k].type = OP_TARGETIMMED( LUA_TSTRING );
}
Esempio n. 11
0
void setk_number( struct frame* f, int k, int value ){
	f->consts[k].value = OP_TARGETIMMED( value );
	f->consts[k].type = OP_TARGETIMMED( LUA_TNUMBER );
}
Esempio n. 12
0
static lua_Number ljc_relational( lua_Number st, lua_Number sv
					, lua_Number tt, lua_Number tv
					, int op ) {
	assert( !( st == LUA_TNUMBER && tt == LUA_TNUMBER ) );
	
	struct TValue s = { .t = st, .v = (union Value)sv };
	struct TValue t = { .t = tt, .v = (union Value)tv };
	
	switch( op ){
		case REL_LT:
			return do_lt( &s, &t ); 
		case REL_LEQ:
			return do_leq( &s, &t );
		case REL_EQ:
			return do_eq( &s, &t );
		default:
			assert( false );	
	}
}


typedef void (*arch_rel)( struct emitter*, struct machine*
					, operand, operand, label );

static void emit_relational( struct emitter *me, struct machine_ops *mop
					, struct frame* f 
					, loperand s, loperand t
					, arch_rel ar, int op
					, bool expect ){

	vreg_operand os = loperand_to_operand( f, s ),
			ot = loperand_to_operand( f, t );

	unsigned int pc = me->ops->pc( me ) + 2;
	label l = LBL_PC( pc );

	// determine if coercion is required 
	operand tag = OP_TARGETREG( acquire_temp( mop, me, f->m ) );
	mop->bor( me, f->m, tag, os.type, ot.type );
	mop->beq( me, f->m, tag, OP_TARGETIMMED( 0 ), LBL_NEXT( 0 ) );

	// do coercion 
	mop->call_static_cfn( me, f, (uintptr_t)&ljc_relational
					, &tag, 5, os.type, os.value 
					, ot.type, ot.value
					, OP_TARGETIMMED( op ) ); 
	mop->beq( me, f->m, tag, OP_TARGETIMMED( expect ), l );
	mop->b( me, f->m, LBL_NEXT( 1 ) );	

	// do primitive relational  
	me->ops->label_local( me, 0 );
	ar( me, f->m, os.value, ot.value, l );
	me->ops->label_local( me, 1 );
	

	release_temp( mop, me, f->m );	

	return;
}

void emit_jmp( struct emitter** mce, struct machine_ops* mop
					, struct frame *f
					, loperand a
					, int offset ){
	assert( a.islocal );

	// if not zero then any upvalues below the vreg need to be closed.
	if( a.index > 0 ){
		vreg_operand op = vreg_to_operand( f, a.index + 1, true );
		operand base = OP_TARGETREG( acquire_temp( mop, REF, f->m ) );
		address_of( mop, REF, f->m, base, op.type );  
		mop->call_static_cfn( REF, f, (uintptr_t)&closure_close, NULL
				, 1
				, base ); 
		release_temp( mop, REF, f->m ); 
	}
	
	unsigned int pc = (int)REF->ops->pc( REF ) + offset + 1;
	mop->b( REF, f->m, LBL_PC( pc ) ); 
} 
Esempio n. 13
0
void jinit_epi( struct JFunc* jf, struct machine_ops* mop, struct emitter* e, struct machine* m ){
	// phoney frame 
	struct frame F = { .m = m, .nr_locals = 1, .nr_params = 0 };
	struct frame *f = &F;

	const operand sp = OP_TARGETREG( m->sp );
	const operand fp = OP_TARGETREG( m->fp );

	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_SIZE, rargs );

	// reset stack 
//	mop->move( e, m, sp, fp );
	mop->add( e, m, sp, fp, OP_TARGETIMMED( -4 ) );
	if( m->is_ra ) 
		popn( mop, e, m , 3, rargs[ RA_DST ], fp, OP_TARGETREG( m->ra ) );
	else
		popn( mop, e, m, 2, rargs[ RA_DST ], fp ); 
//		pop( mop, e, m, fp );

	mop->ret( e, m );
	
	prefer_nontemp_release_reg( mop, e, f->m, RA_SIZE );
}

/*
* Do the majority ( function independent ) part of the prologue. That is: store the frame section,
* update src and dst pointers and call the memcpy.
*
* The function specific code needs to set the number of params, update the stack ( requires # of locals )
* and then unspill params.  
*/
void jinit_pro( struct JFunc* jf, struct machine_ops* mop, struct emitter* e, struct machine* m ){
	// phoney frame 
	struct frame F = { .m = m, .nr_locals = 1, .nr_params = 0 };
	struct frame *f = &F;

	const operand sp = OP_TARGETREG( f->m->sp );
	const operand fp = OP_TARGETREG( f->m->fp );
	const vreg_operand basestack = vreg_to_operand( f, 0, true );		// destination is first local
	const int maxstack = JFUNC_UNLIMITED_STACK; 
	
	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_SIZE, rargs );

	// push old frame pointer, closure addr / result start addr, expected nr or results 
	if( f->m->is_ra )
		pushn( mop, e, f->m, 3, OP_TARGETREG( f->m->ra), fp, rargs[ RA_SRC ] ); 
	else
		pushn( mop, e, f->m, 2, fp, rargs[ RA_SRC ] ); 
	
	// set ebp and update stack
	mop->add( e, f->m, fp, sp, OP_TARGETIMMED( 4 ) );	// point to ebp so add 4 

	// set src ( always start after closure see Lua VM for reason )
	mop->add( e, f->m, rargs[ RA_SRC ], rargs[ RA_SRC ], OP_TARGETIMMED( -8 ) );
	mop->add( e, f->m, rargs[ RA_DST ], OP_TARGETREG( basestack.value.base ), OP_TARGETIMMED( basestack.value.offset ) );
		

	/*
	* Call the actual function, which is the closure. On RISC this will clobber
	* temp hopefully this isn't a live reg or we will get exception. On CISC
	* there is probably indirect direct address jmp instruction ( x86 does 0 ). 
	*/
	mop->b( e, f->m, LBL_ABS( OP_TARGETDADDR( rargs[ RA_SRC ].reg, 8 ) ) );

	prefer_nontemp_release_reg( mop, e, f->m, RA_SIZE );

}

/*
* The number of results is not know before hand. Need to update stack for future
* calls.
*/
void jinit_vresult_postcall( struct JFunc* jf, struct machine_ops* mop, struct emitter* e, struct machine* m ){
	// phoney frame 
	struct frame F = { .m = m, .nr_locals = 1, .nr_params = 0 };
	struct frame *f = &F;

	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_SIZE, rargs );

	// max stack clobber 
	const int maxstack = 3;	// prior frame has buffer of pushed return addr, frame pointer and closure

	// consume as many results as available 
	mop->move( e, f->m, rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
	
	// if register based remember return address 
	if( f->m->is_ra )
		pushn( mop, e, f->m, 1, OP_TARGETREG( f->m->ra) );	// not safe cause of stack

	// copy args across 
	jfunc_call( mop, e, f->m, JF_ARG_RES_CPY, 0, maxstack, 4, rargs[ RA_SRC ], rargs[ RA_DST ], 
						rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );

	if( f->m->is_ra )
		popn( mop, e, f->m, 1, OP_TARGETREG( f->m->ra) );

	
	/*
	* this depends heavily on copy arg implementation, it assumes ptrs will point to 
	* the top of the stack after copying i.e. the last result copied.
	*/
	mop->add( e, m, OP_TARGETREG( f->m->sp ), rargs[ RA_DST ], OP_TARGETIMMED( 0 ) ); 

	prefer_nontemp_release_reg( mop, e, f->m, RA_SIZE );

	// return 
	mop->ret( e, m );

	
}
Esempio n. 14
0
void prologue( struct machine_ops* mop, struct emitter* e, struct frame* f ){
#if 1
	const operand sp = OP_TARGETREG( f->m->sp );
	const operand fp = OP_TARGETREG( f->m->fp );
	const int nparams = f->nr_params;

	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_SIZE, rargs );
	
	mop->add( e, f->m, sp, sp, OP_TARGETIMMED( -( 8 * f->nr_locals ) ) );
	if( nparams ){
		// set nparams
		mop->move( e, f->m, rargs[ RA_EXPECT ], OP_TARGETIMMED( nparams ) );
		
		// do argument cpy
		jfunc_call( mop, e, f->m, JF_ARG_RES_CPY, 0, JFUNC_UNLIMITED_STACK, 4, rargs[ RA_SRC ], rargs[ RA_DST ], 
							rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
		
		// do call
		jfunc_call( mop, e, f->m, JF_LOAD_LOCALS, jf_loadlocal_offset( f->m, nparams ), JFUNC_UNLIMITED_STACK, 0 );
	}
	
	prefer_nontemp_release_reg( mop, e, f->m, RA_SIZE );
#else
	// new frame assumes no temporaries have been used yet 
	assert( temps_accessed( f->m ) == 0 );

	const operand sp = OP_TARGETREG( f->m->sp );
	const operand fp = OP_TARGETREG( f->m->fp );
	const int nparams = f->nr_params;
	
	operand rargs[ RA_SIZE ];
	prefer_nontemp_acquire_reg( mop, e, f->m, RA_SIZE, rargs );

	// push old frame pointer, closure addr / result start addr, expected nr or results 
	if( f->m->is_ra )
		pushn( mop, e, f->m, 3, OP_TARGETREG( f->m->ra), fp, rargs[ RA_SRC ] ); 
	else
		pushn( mop, e, f->m, 2, fp, rargs[ RA_SRC ] ); 
	
	// set ebp and update stack
	mop->add( e, f->m, fp, sp, OP_TARGETIMMED( 4 ) );	// point to ebp so add 4 
	mop->add( e, f->m, sp, sp, OP_TARGETIMMED( -( 8 * f->nr_locals ) ) );

	if( nparams ) {
		const vreg_operand basestack = vreg_to_operand( f, 0, true );		// destination is first local
		const int maxstack = JFUNC_UNLIMITED_STACK; 

		// set src ( always start after closure see Lua VM for reason )
		mop->add( e, f->m, rargs[ RA_SRC ], rargs[ RA_SRC ], OP_TARGETIMMED( -8 ) );

		// set dst and expect
		mop->add( e, f->m, rargs[ RA_DST ], OP_TARGETREG( basestack.value.base ), OP_TARGETIMMED( basestack.value.offset ) );
		mop->move( e, f->m, rargs[ RA_EXPECT ], OP_TARGETIMMED( nparams ) );
		
		jfunc_call( mop, e, f->m, JF_ARG_RES_CPY, 0, maxstack, 4, rargs[ RA_SRC ], rargs[ RA_DST ], 
							rargs[ RA_EXPECT ], rargs[ RA_EXIST ] );
		prefer_nontemp_release_reg( mop, e, f->m, RA_SIZE );

#if 0	 	
		load_frame_limit( mop, e, f, 0, nparams );	// load locals living in registers 
#else
		jfunc_call( mop, e, f->m, JF_LOAD_LOCALS, jf_loadlocal_offset( f->m, nparams ), maxstack, 0 );
#endif
	}
#endif
}
Esempio n. 15
0
void loadim( struct machine_ops* mop, struct emitter* e, struct machine* m, int reg, int value ) {
	operand r = OP_TARGETREG( reg );
	operand v = OP_TARGETIMMED( value );
	mop->move( e, m, r, v );
}