static int fast_interp(uint8 *msg, unsigned nbytes, Atom a, int fail_pid) { /* msg += (msg[off]:nbits & mask) << shift */ # define do_shift(aligned, unchecked) do { \ if(!load_lhs(&off, msg, nbytes, ir, aligned, unchecked)) \ continue; \ off <<= ir->u.shift.shift; \ if(off >= nbytes) \ break; \ if((pid = fast_interp(msg+off, nbytes-off,a->kids.lh_first, a->pid)))\ return pid; \ } while(0) /* msg[off]:nbits & mask == val */ # define do_eq(aligned, unchecked) do { \ assert(!a->ht); \ if(!load_lhs(&lhs, msg, nbytes, ir, aligned, unchecked)) \ break; \ if(lhs != ir->u.eq.val) \ break; \ if((pid = fast_interp(msg, nbytes, a->kids.lh_first, a->pid))) \ return pid; \ } while(0) # define do_deq(aligned, unchecked) do { \ Atom hte; \ \ assert(a->ht); \ if(!load_lhs(&lhs, msg, nbytes, ir, aligned, unchecked)) \ break; \ if(!(hte = ht_lookup(a->ht, lhs))) \ break; \ if((pid = fast_interp(msg, nbytes, hte->kids.lh_first, hte->pid)))\ return pid; \ } while(0) unsigned lhs, off, pid; struct ir *ir; for(; a; a = a->sibs.le_next) { ir = &a->ir; switch((state_t)a->code[0]) { case EQ: do_eq(0, 0); break; case EQ_UNCHECKED: do_eq(0, 1); break; case EQ_ALIGNED: do_eq(1, 0); break; case EQ_ALIGNED_UNCHECKED: do_eq(1, 1); break; case EQ_HASH: do_deq(0, 0); break; case EQ_ALIGNED_HASH: do_deq(1, 0); break; case EQ_UNCHECKED_HASH: do_deq(0, 1); break; case EQ_ALIGNED_UNCHECKED_HASH: do_deq(1, 1); break; case SHIFT: do_shift(0, 0); break; case SHIFT_UNCHECKED: do_shift(0, 1); break; case SHIFT_ALIGNED: do_shift(1, 0); break; case SHIFT_ALIGNED_UNCHECKED: do_shift(1, 1); break; default: fatal(Bogus op); } } return fail_pid; }
static void codegen14(enum node_op op, enum size_tag size, gp_boolean is_const, int value, char *name) { switch (op) { case op_assign: assert(0); break; case op_add: do_add(size, is_const, value, name); break; case op_sub: do_sub(size, is_const, value, name); break; case op_neg: do_neg(size, is_const, value, name); break; case op_com: do_com(size, is_const, value, name); break; case op_and: do_and(size, is_const, value, name); break; case op_or: do_or(size, is_const, value, name); break; case op_xor: do_xor(size, is_const, value, name); break; case op_not: do_not(size, is_const, value, name); break; case op_lsh: do_lsh(size, is_const, value, name); break; case op_rsh: do_rsh(size, is_const, value, name); break; case op_land: do_and(size_uint8, is_const, value, name); break; case op_lor: do_or(size_uint8, is_const, value, name); break; case op_eq: do_eq(size, is_const, value, name); break; case op_ne: do_ne(size, is_const, value, name); break; case op_lt: do_lt(size, is_const, value, name); break; case op_lte: do_lte(size, is_const, value, name); break; case op_gt: case op_gte: /* This is replaced in the optimizer.*/ assert(0); break; case op_mult: do_mult(size, is_const, value, name); break; case op_div: do_div(size, is_const, value, name); break; case op_mod: do_mod(size, is_const, value, name); break; case op_clr: case op_inc: case op_dec: /* Shoud use unopgen14.*/ assert(0); break; default: assert(0); /* Unhandled binary operator */ } }
static lua_Number ljc_relational( lua_Number st, lua_Number sv , lua_Number tt, lua_Number tv , int op ) { assert( !( st == LUA_TNUMBER && tt == LUA_TNUMBER ) ); struct TValue s = { .t = st, .v = (union Value)sv }; struct TValue t = { .t = tt, .v = (union Value)tv }; switch( op ){ case REL_LT: return do_lt( &s, &t ); case REL_LEQ: return do_leq( &s, &t ); case REL_EQ: return do_eq( &s, &t ); default: assert( false ); } } typedef void (*arch_rel)( struct emitter*, struct machine* , operand, operand, label ); static void emit_relational( struct emitter *me, struct machine_ops *mop , struct frame* f , loperand s, loperand t , arch_rel ar, int op , bool expect ){ vreg_operand os = loperand_to_operand( f, s ), ot = loperand_to_operand( f, t ); unsigned int pc = me->ops->pc( me ) + 2; label l = LBL_PC( pc ); // determine if coercion is required operand tag = OP_TARGETREG( acquire_temp( mop, me, f->m ) ); mop->bor( me, f->m, tag, os.type, ot.type ); mop->beq( me, f->m, tag, OP_TARGETIMMED( 0 ), LBL_NEXT( 0 ) ); // do coercion mop->call_static_cfn( me, f, (uintptr_t)&ljc_relational , &tag, 5, os.type, os.value , ot.type, ot.value , OP_TARGETIMMED( op ) ); mop->beq( me, f->m, tag, OP_TARGETIMMED( expect ), l ); mop->b( me, f->m, LBL_NEXT( 1 ) ); // do primitive relational me->ops->label_local( me, 0 ); ar( me, f->m, os.value, ot.value, l ); me->ops->label_local( me, 1 ); release_temp( mop, me, f->m ); return; } void emit_jmp( struct emitter** mce, struct machine_ops* mop , struct frame *f , loperand a , int offset ){ assert( a.islocal ); // if not zero then any upvalues below the vreg need to be closed. if( a.index > 0 ){ vreg_operand op = vreg_to_operand( f, a.index + 1, true ); operand base = OP_TARGETREG( acquire_temp( mop, REF, f->m ) ); address_of( mop, REF, f->m, base, op.type ); mop->call_static_cfn( REF, f, (uintptr_t)&closure_close, NULL , 1 , base ); release_temp( mop, REF, f->m ); } unsigned int pc = (int)REF->ops->pc( REF ) + offset + 1; mop->b( REF, f->m, LBL_PC( pc ) ); }