/* Narrowing of power operator or math.pow. */ TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vc) { lua_Number n; if (tvisstr(vc) && !lj_str_tonum(strV(vc), vc)) lj_trace_err(J, LJ_TRERR_BADTYPE); n = numV(vc); /* Limit narrowing for pow to small exponents (or for two constants). */ if ((tref_isk(rc) && tref_isint(rc) && tref_isk(rb)) || ((J->flags & JIT_F_OPT_NARROW) && (numisint(n) && n >= -65536.0 && n <= 65536.0))) { TRef tmp; if (!tref_isinteger(rc)) { if (tref_isstr(rc)) rc = emitir(IRTG(IR_STRTO, IRT_NUM), rc, 0); rc = emitir(IRTGI(IR_TOINT), rc, IRTOINT_CHECK); /* Guarded TOINT! */ } if (!tref_isk(rc)) { /* Range guard: -65536 <= i <= 65536 */ tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536-2147483647-1)); emitir(IRTGI(IR_LE), tmp, lj_ir_kint(J, 2*65536-2147483647-1)); } return emitir(IRTN(IR_POWI), rb, rc); } /* FOLD covers most cases, but some are easier to do here. */ if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb))))) return rb; /* 1 ^ x ==> 1 */ rc = lj_ir_tonum(J, rc); if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5) return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT); /* x ^ 0.5 ==> sqrt(x) */ /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */ rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2); rc = emitir(IRTN(IR_MUL), rb, rc); return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2); }
static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd) { GCudata *ud = udataV(&J->fn->c.upvalue[0]); TRef tr, one; lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */ tr = lj_ir_call(J, IRCALL_lj_math_random_step, lj_ir_kptr(J, uddata(ud))); one = lj_ir_knum_one(J); tr = emitir(IRTN(IR_SUB), tr, one); if (J->base[0]) { TRef tr1 = lj_ir_tonum(J, J->base[0]); if (J->base[1]) { /* d = floor(d*(r2-r1+1.0)) + r1 */ TRef tr2 = lj_ir_tonum(J, J->base[1]); tr2 = emitir(IRTN(IR_SUB), tr2, tr1); tr2 = emitir(IRTN(IR_ADD), tr2, one); tr = emitir(IRTN(IR_MUL), tr, tr2); tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); tr = emitir(IRTN(IR_ADD), tr, tr1); } else { /* d = floor(d*r1) + 1.0 */ tr = emitir(IRTN(IR_MUL), tr, tr1); tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR); tr = emitir(IRTN(IR_ADD), tr, one); } } J->base[0] = tr; UNUSED(rd); }
/* Record math.asin, math.acos, math.atan. */ static void LJ_FASTCALL recff_math_atrig(jit_State *J, RecordFFData *rd) { TRef y = lj_ir_tonum(J, J->base[0]); TRef x = lj_ir_knum_one(J); uint32_t ffid = rd->data; if (ffid != FF_math_atan) { TRef tmp = emitir(IRTN(IR_MUL), y, y); tmp = emitir(IRTN(IR_SUB), x, tmp); tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_SQRT); if (ffid == FF_math_asin) { x = tmp; } else { x = y; y = tmp; } } J->base[0] = emitir(IRTN(IR_ATAN2), y, x); }
static void LJ_FASTCALL recff_math_modf(jit_State *J, RecordFFData *rd) { TRef tr = J->base[0]; if (tref_isinteger(tr)) { J->base[0] = tr; J->base[1] = lj_ir_kint(J, 0); } else { TRef trt; tr = lj_ir_tonum(J, tr); trt = emitir(IRTN(IR_FPMATH), tr, IRFPM_TRUNC); J->base[0] = trt; J->base[1] = emitir(IRTN(IR_SUB), tr, trt); } rd->nres = 2; }
static void LJ_FASTCALL recff_math_degrad(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonum(J, J->base[0]); TRef trm = lj_ir_knum(J, numV(&J->fn->c.upvalue[0])); J->base[0] = emitir(IRTN(IR_MUL), tr, trm); UNUSED(rd); }
/* Record math.atan2. */ static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonum(J, J->base[0]); TRef tr2 = lj_ir_tonum(J, J->base[1]); J->base[0] = emitir(IRTN(IR_ATAN2), tr, tr2); UNUSED(rd); }
static void LJ_FASTCALL recff_math_minmax(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonumber(J, J->base[0]); uint32_t op = rd->data; BCReg i; for (i = 1; J->base[i] != 0; i++) { TRef tr2 = lj_ir_tonumber(J, J->base[i]); IRType t = IRT_INT; if (!(tref_isinteger(tr) && tref_isinteger(tr2))) { if (tref_isinteger(tr)) tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); if (tref_isinteger(tr2)) tr2 = emitir(IRTN(IR_CONV), tr2, IRCONV_NUM_INT); t = IRT_NUM; } tr = emitir(IRT(op, t), tr, tr2); } J->base[0] = tr; }
/* Narrowing of modulo operator. */ TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc) { TRef tmp; if ((J->flags & JIT_F_OPT_NARROW) && tref_isk(rc) && tref_isint(rc)) { /* Optimize x % k. */ int32_t k = IR(tref_ref(rc))->i; if (k > 0 && (k & (k-1)) == 0) { /* i % 2^k ==> band(i, 2^k-1) */ if (tref_isinteger(rb)) return emitir(IRTI(IR_BAND), rb, lj_ir_kint(J, k-1)); } } /* b % c ==> b - floor(b/c)*c */ rb = lj_ir_tonum(J, rb); rc = lj_ir_tonum(J, rc); tmp = emitir(IRTN(IR_DIV), rb, rc); tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR); tmp = emitir(IRTN(IR_MUL), tmp, rc); return emitir(IRTN(IR_SUB), rb, tmp); }
/* Record math.ldexp. */ static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonum(J, J->base[0]); #if LJ_TARGET_X86ORX64 TRef tr2 = lj_ir_tonum(J, J->base[1]); #else TRef tr2 = lj_opt_narrow_toint(J, J->base[1]); #endif J->base[0] = emitir(IRTN(IR_LDEXP), tr, tr2); UNUSED(rd); }
/* Record math.log. */ static void LJ_FASTCALL recff_math_log(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonum(J, J->base[0]); if (J->base[1]) { #ifdef LUAJIT_NO_LOG2 uint32_t fpm = IRFPM_LOG; #else uint32_t fpm = IRFPM_LOG2; #endif TRef trb = lj_ir_tonum(J, J->base[1]); tr = emitir(IRTN(IR_FPMATH), tr, fpm); trb = emitir(IRTN(IR_FPMATH), trb, fpm); trb = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), trb); tr = emitir(IRTN(IR_MUL), tr, trb); } else { tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_LOG); } J->base[0] = tr; UNUSED(rd); }
/* Record rounding functions math.floor and math.ceil. */ static void LJ_FASTCALL recff_math_round(jit_State *J, RecordFFData *rd) { TRef tr = J->base[0]; if (!tref_isinteger(tr)) { /* Pass through integers unmodified. */ tr = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, tr), rd->data); /* Result is integral (or NaN/Inf), but may not fit an int32_t. */ if (LJ_DUALNUM) { /* Try to narrow using a guarded conversion to int. */ lua_Number n = lj_vm_foldfpm(numberVnum(&rd->argv[0]), rd->data); if (n == (lua_Number)lj_num2int(n)) tr = emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_CHECK); } J->base[0] = tr; } }
static void LJ_FASTCALL recff_math_htrig(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonum(J, J->base[0]); J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data); }
/* Record unary math.* functions, mapped to IR_FPMATH opcode. */ static void LJ_FASTCALL recff_math_unary(jit_State *J, RecordFFData *rd) { J->base[0] = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, J->base[0]), rd->data); }
static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd) { TRef tr = lj_ir_tonum(J, J->base[0]); J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_knum_abs(J)); UNUSED(rd); }
/* Unroll loop. */ static void loop_unroll(jit_State *J) { IRRef1 phi[LJ_MAX_PHI]; uint32_t nphi = 0; IRRef1 *subst; SnapNo onsnap; SnapShot *osnap, *loopsnap; SnapEntry *loopmap, *psentinel; IRRef ins, invar; /* Use temp buffer for substitution table. ** Only non-constant refs in [REF_BIAS,invar) are valid indexes. ** Caveat: don't call into the VM or run the GC or the buffer may be gone. */ invar = J->cur.nins; subst = (IRRef1 *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, (invar-REF_BIAS)*sizeof(IRRef1)) - REF_BIAS; subst[REF_BASE] = REF_BASE; /* LOOP separates the pre-roll from the loop body. */ emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0); /* Grow snapshot buffer and map for copy-substituted snapshots. ** Need up to twice the number of snapshots minus #0 and loop snapshot. ** Need up to twice the number of entries plus fallback substitutions ** from the loop snapshot entries for each new snapshot. ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap! */ onsnap = J->cur.nsnap; lj_snap_grow_buf(J, 2*onsnap-2); lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent); /* The loop snapshot is used for fallback substitutions. */ loopsnap = &J->cur.snap[onsnap-1]; loopmap = &J->cur.snapmap[loopsnap->mapofs]; /* The PC of snapshot #0 and the loop snapshot must match. */ psentinel = &loopmap[loopsnap->nent]; lua_assert(*psentinel == J->cur.snapmap[J->cur.snap[0].nent]); *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */ /* Start substitution with snapshot #1 (#0 is empty for root traces). */ osnap = &J->cur.snap[1]; /* Copy and substitute all recorded instructions and snapshots. */ for (ins = REF_FIRST; ins < invar; ins++) { IRIns *ir; IRRef op1, op2; if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */ loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */ /* Substitute instruction operands. */ ir = IR(ins); op1 = ir->op1; if (!irref_isk(op1)) op1 = subst[op1]; op2 = ir->op2; if (!irref_isk(op2)) op2 = subst[op2]; if (irm_kind(lj_ir_mode[ir->o]) == IRM_N && op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */ subst[ins] = (IRRef1)ins; /* Shortcut. */ } else { /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */ IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */ IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2)); subst[ins] = (IRRef1)ref; if (ref != ins) { IRIns *irr = IR(ref); if (ref < invar) { /* Loop-carried dependency? */ /* Potential PHI? */ if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) { irt_setphi(irr->t); if (nphi >= LJ_MAX_PHI) lj_trace_err(J, LJ_TRERR_PHIOV); phi[nphi++] = (IRRef1)ref; } /* Check all loop-carried dependencies for type instability. */ if (!irt_sametype(t, irr->t)) { if (irt_isinteger(t) && irt_isinteger(irr->t)) continue; else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */ ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT)); else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */ ref = tref_ref(emitir(IRTGI(IR_CONV), ref, IRCONV_INT_NUM|IRCONV_CHECK)); else lj_trace_err(J, LJ_TRERR_TYPEINS); subst[ins] = (IRRef1)ref; irr = IR(ref); goto phiconv; } } else if (ref != REF_DROP && irr->o == IR_CONV && ref > invar && irr->op1 < invar) { /* May need an extra PHI for a CONV. */ ref = irr->op1; irr = IR(ref); phiconv: if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) { irt_setphi(irr->t); if (nphi >= LJ_MAX_PHI) lj_trace_err(J, LJ_TRERR_PHIOV); phi[nphi++] = (IRRef1)ref; } } } } } if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */ J->cur.nsnapmap = (uint16_t)J->cur.snap[--J->cur.nsnap].mapofs; lua_assert(J->cur.nsnapmap <= J->sizesnapmap); *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */ loop_emit_phi(J, subst, phi, nphi, onsnap); }
/* Replay snapshot state to setup side trace. */ void lj_snap_replay(jit_State *J, GCtrace *T) { SnapShot *snap = &T->snap[J->exitno]; SnapEntry *map = &T->snapmap[snap->mapofs]; MSize n, nent = snap->nent; BloomFilter seen = 0; int pass23 = 0; J->framedepth = 0; /* Emit IR for slots inherited from parent snapshot. */ for (n = 0; n < nent; n++) { SnapEntry sn = map[n]; BCReg s = snap_slot(sn); IRRef ref = snap_ref(sn); IRIns *ir = &T->ir[ref]; TRef tr; /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */ if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0) goto setslot; bloomset(seen, ref); if (irref_isk(ref)) { tr = snap_replay_const(J, ir); } else if (!regsp_used(ir->prev)) { pass23 = 1; lua_assert(s != 0); tr = s; } else { IRType t = irt_type(ir->t); uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT; if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM; if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY); tr = emitir_raw(IRT(IR_SLOAD, t), s, mode); } setslot: J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME)); /* Same as TREF_* flags. */ J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && s); if ((sn & SNAP_FRAME)) J->baseslot = s+1; } if (pass23) { IRIns *irlast = &T->ir[snap->ref]; pass23 = 0; /* Emit dependent PVALs. */ for (n = 0; n < nent; n++) { SnapEntry sn = map[n]; IRRef refp = snap_ref(sn); IRIns *ir = &T->ir[refp]; if (regsp_reg(ir->r) == RID_SUNK) { if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue; pass23 = 1; lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW || ir->o == IR_CNEWI); if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1); if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2); if (LJ_HASFFI && ir->o == IR_CNEWI) { if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) snap_pref(J, T, map, nent, seen, (ir+1)->op2); } else { IRIns *irs; for (irs = ir+1; irs < irlast; irs++) if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { if (snap_pref(J, T, map, nent, seen, irs->op2) == 0) snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1); else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && irs+1 < irlast && (irs+1)->o == IR_HIOP) snap_pref(J, T, map, nent, seen, (irs+1)->op2); } } } else if (!irref_isk(refp) && !regsp_used(ir->prev)) { lua_assert(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT); J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1); } } /* Replay sunk instructions. */ for (n = 0; pass23 && n < nent; n++) { SnapEntry sn = map[n]; IRRef refp = snap_ref(sn); IRIns *ir = &T->ir[refp]; if (regsp_reg(ir->r) == RID_SUNK) { TRef op1, op2; if (J->slot[snap_slot(sn)] != snap_slot(sn)) { /* De-dup allocs. */ J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]]; continue; } op1 = ir->op1; if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1); op2 = ir->op2; if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2); if (LJ_HASFFI && ir->o == IR_CNEWI) { if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) { lj_needsplit(J); /* Emit joining HIOP. */ op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2, snap_pref(J, T, map, nent, seen, (ir+1)->op2)); } J->slot[snap_slot(sn)] = emitir(ir->ot & ~(IRT_MARK|IRT_ISPHI), op1, op2); } else { IRIns *irs; TRef tr = emitir(ir->ot, op1, op2); J->slot[snap_slot(sn)] = tr; for (irs = ir+1; irs < irlast; irs++) if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) { IRIns *irr = &T->ir[irs->op1]; TRef val, key = irr->op2, tmp = tr; if (irr->o != IR_FREF) { IRIns *irk = &T->ir[key]; if (irr->o == IR_HREFK) key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]), irk->op2); else key = snap_replay_const(J, irk); if (irr->o == IR_HREFK || irr->o == IR_AREF) { IRIns *irf = &T->ir[irr->op1]; tmp = emitir(irf->ot, tmp, irf->op2); } } tmp = emitir(irr->ot, tmp, key); val = snap_pref(J, T, map, nent, seen, irs->op2); if (val == 0) { IRIns *irc = &T->ir[irs->op2]; lua_assert(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT); val = snap_pref(J, T, map, nent, seen, irc->op1); val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT); } else if ((LJ_SOFTFP || (LJ_32 && LJ_HASFFI)) && irs+1 < irlast && (irs+1)->o == IR_HIOP) { IRType t = IRT_I64; if (LJ_SOFTFP && irt_type((irs+1)->t) == IRT_SOFTFP) t = IRT_NUM; lj_needsplit(J); if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) { uint64_t k = (uint32_t)T->ir[irs->op2].i + ((uint64_t)T->ir[(irs+1)->op2].i << 32); val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, lj_ir_k64_find(J, k)); } else { val = emitir_raw(IRT(IR_HIOP, t), val, snap_pref(J, T, map, nent, seen, (irs+1)->op2)); } tmp = emitir(IRT(irs->o, t), tmp, val); continue; } tmp = emitir(irs->ot, tmp, val); } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) { emitir(IRT(IR_XBAR, IRT_NIL), 0, 0); } } } } } J->base = J->slot + J->baseslot; J->maxslot = snap->nslots - J->baseslot; lj_snap_add(J); if (pass23) /* Need explicit GC step _after_ initial snapshot. */ emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0); }