/* Record N-ary bit.band, bit.bor, bit.bxor. */ static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd) { TRef tr = lj_opt_narrow_tobit(J, J->base[0]); uint32_t op = rd->data; BCReg i; for (i = 1; J->base[i] != 0; i++) tr = emitir(IRTI(op), tr, lj_opt_narrow_tobit(J, J->base[i])); J->base[0] = tr; }
static void LJ_FASTCALL recff_rawlen(jit_State *J, RecordFFData *rd) { TRef tr = J->base[0]; if (tref_isstr(tr)) J->base[0] = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN); else if (tref_istab(tr)) J->base[0] = lj_ir_call(J, IRCALL_lj_tab_len, tr); /* else: Interpreter will throw. */ UNUSED(rd); }
/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */ static IRRef split_ptr(jit_State *J, IRRef ref) { IRIns *ir = IR(ref); int32_t ofs = 4; if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Reassociate address. */ ofs += IR(ir->op2)->i; ref = ir->op1; if (ofs == 0) return ref; } return split_emit(J, IRTI(IR_ADD), ref, lj_ir_kint(J, ofs)); }
/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */ static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref) { IRRef nref = oir[ref].prev; IRIns *ir = IR(nref); int32_t ofs = 4; if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) { /* Reassociate address. */ ofs += IR(ir->op2)->i; nref = ir->op1; if (ofs == 0) return nref; } return split_emit(J, IRTI(IR_ADD), nref, lj_ir_kint(J, ofs)); }
/* Emit a CALLN with one split 64 bit argument. */ static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir, IRIns *ir, IRCallID id) { IRRef tmp, op1 = ir->op1; J->cur.nins--; #if LJ_LE tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); #else tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); #endif ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); }
static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd) { RecordIndex ix; ix.tab = J->base[0]; if (tref_istab(ix.tab)) { if (!tvisnumber(&rd->argv[1])) /* No support for string coercion. */ lj_trace_err(J, LJ_TRERR_BADTYPE); setintV(&ix.keyv, numberVint(&rd->argv[1])+1); settabV(J->L, &ix.tabv, tabV(&rd->argv[0])); ix.val = 0; ix.idxchain = 0; ix.key = lj_opt_narrow_toint(J, J->base[1]); J->base[0] = ix.key = emitir(IRTI(IR_ADD), ix.key, lj_ir_kint(J, 1)); J->base[1] = lj_record_idx(J, &ix); rd->nres = tref_isnil(J->base[1]) ? 0 : 2; } /* else: Interpreter will throw. */ }
/* Narrowing of modulo operator. */ TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc) { TRef tmp; if ((J->flags & JIT_F_OPT_NARROW) && tref_isk(rc) && tref_isint(rc)) { /* Optimize x % k. */ int32_t k = IR(tref_ref(rc))->i; if (k > 0 && (k & (k-1)) == 0) { /* i % 2^k ==> band(i, 2^k-1) */ if (tref_isinteger(rb)) return emitir(IRTI(IR_BAND), rb, lj_ir_kint(J, k-1)); } } /* b % c ==> b - floor(b/c)*c */ rb = lj_ir_tonum(J, rb); rc = lj_ir_tonum(J, rc); tmp = emitir(IRTN(IR_DIV), rb, rc); tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR); tmp = emitir(IRTN(IR_MUL), tmp, rc); return emitir(IRTN(IR_SUB), rb, tmp); }
/* Emit a CALLN with two split 64 bit arguments. */ static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir, IRIns *ir, IRCallID id) { IRRef tmp, op1 = ir->op1, op2 = ir->op2; J->cur.nins--; #if LJ_LE tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); #else tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]); tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev); #endif ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id); return split_emit(J, IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), tmp, tmp); }
static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd) { RecordIndex ix; ix.tab = J->base[0]; ix.val = J->base[1]; rd->nres = 0; if (tref_istab(ix.tab) && ix.val) { if (!J->base[2]) { /* Simple push: t[#t+1] = v */ TRef trlen = lj_ir_call(J, IRCALL_lj_tab_len, ix.tab); GCtab *t = tabV(&rd->argv[0]); ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1)); settabV(J->L, &ix.tabv, t); setintV(&ix.keyv, lj_tab_len(t) + 1); ix.idxchain = 0; lj_record_idx(J, &ix); /* Set new value. */ } else { /* Complex case: insert in the middle. */ recff_nyiu(J); } } /* else: Interpreter will throw. */ }
static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd) { TRef fp = recff_io_fp(J, rd->data); TRef zero = lj_ir_kint(J, 0); TRef one = lj_ir_kint(J, 1); ptrdiff_t i = rd->data == 0 ? 1 : 0; for (; J->base[i]; i++) { TRef str = lj_ir_tostr(J, J->base[i]); TRef buf = emitir(IRT(IR_STRREF, IRT_P32), str, zero); TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN); if (tref_isk(len) && IR(tref_ref(len))->i == 1) { TRef tr = emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY); tr = lj_ir_call(J, IRCALL_fputc, tr, fp); if (results_wanted(J) != 0) /* Check result only if not ignored. */ emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1)); } else { TRef tr = lj_ir_call(J, IRCALL_fwrite, buf, one, len, fp); if (results_wanted(J) != 0) /* Check result only if not ignored. */ emitir(IRTGI(IR_EQ), tr, len); } } J->base[0] = TREF_TRUE; }
static void LJ_FASTCALL recff_string_len(jit_State *J, RecordFFData *rd) { J->base[0] = emitir(IRTI(IR_FLOAD), lj_ir_tostr(J, J->base[0]), IRFL_STR_LEN); UNUSED(rd); }
/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */ static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd) { TRef trstr = lj_ir_tostr(J, J->base[0]); TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN); TRef tr0 = lj_ir_kint(J, 0); TRef trstart, trend; GCstr *str = argv2str(J, &rd->argv[0]); int32_t start, end; if (rd->data) { /* string.sub(str, start [,end]) */ start = argv2int(J, &rd->argv[1]); trstart = lj_opt_narrow_toint(J, J->base[1]); trend = J->base[2]; if (tref_isnil(trend)) { trend = lj_ir_kint(J, -1); end = -1; } else { trend = lj_opt_narrow_toint(J, trend); end = argv2int(J, &rd->argv[2]); } } else { /* string.byte(str, [,start [,end]]) */ if (J->base[1]) { start = argv2int(J, &rd->argv[1]); trstart = lj_opt_narrow_toint(J, J->base[1]); trend = J->base[2]; if (tref_isnil(trend)) { trend = trstart; end = start; } else { trend = lj_opt_narrow_toint(J, trend); end = argv2int(J, &rd->argv[2]); } } else { trend = trstart = lj_ir_kint(J, 1); end = start = 1; } } if (end < 0) { emitir(IRTGI(IR_LT), trend, tr0); trend = emitir(IRTI(IR_ADD), emitir(IRTI(IR_ADD), trlen, trend), lj_ir_kint(J, 1)); end = end+(int32_t)str->len+1; } else if ((MSize)end <= str->len) { emitir(IRTGI(IR_ULE), trend, trlen); } else { emitir(IRTGI(IR_GT), trend, trlen); end = (int32_t)str->len; trend = trlen; } if (start < 0) { emitir(IRTGI(IR_LT), trstart, tr0); trstart = emitir(IRTI(IR_ADD), trlen, trstart); start = start+(int32_t)str->len; emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), trstart, tr0); if (start < 0) { trstart = tr0; start = 0; } } else { if (start == 0) { emitir(IRTGI(IR_EQ), trstart, tr0); trstart = tr0; } else { trstart = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, -1)); emitir(IRTGI(IR_GE), trstart, tr0); start--; } } if (rd->data) { /* Return string.sub result. */ if (end - start >= 0) { /* Also handle empty range here, to avoid extra traces. */ TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart); emitir(IRTGI(IR_GE), trslen, tr0); trptr = emitir(IRT(IR_STRREF, IRT_P32), trstr, trstart); J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen); } else { /* Range underflow: return empty string. */ emitir(IRTGI(IR_LT), trend, trstart); J->base[0] = lj_ir_kstr(J, lj_str_new(J->L, strdata(str), 0)); } } else { /* Return string.byte result(s). */ ptrdiff_t i, len = end - start; if (len > 0) { TRef trslen = emitir(IRTI(IR_SUB), trend, trstart); emitir(IRTGI(IR_EQ), trslen, lj_ir_kint(J, (int32_t)len)); if (J->baseslot + len > LJ_MAX_JSLOTS) lj_trace_err_info(J, LJ_TRERR_STACKOV); rd->nres = len; for (i = 0; i < len; i++) { TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i)); tmp = emitir(IRT(IR_STRREF, IRT_P32), trstr, tmp); J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY); } } else { /* Empty range or range underflow: return no results. */ emitir(IRTGI(IR_LE), trend, trstart); rd->nres = 0; } } }
/* Record unary bit.tobit, bit.bnot, bit.bswap. */ static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd) { TRef tr = lj_opt_narrow_tobit(J, J->base[0]); J->base[0] = (rd->data == IR_TOBIT) ? tr : emitir(IRTI(rd->data), tr, 0); }
/* Backpropagate narrowing conversion. Return number of needed conversions. */ static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth) { jit_State *J = nc->J; IRIns *ir = IR(ref); IRRef cref; /* Check the easy cases first. */ if (ir->o == IR_TONUM) { /* Undo inverse conversion. */ *nc->sp++ = NARROWINS(NARROW_REF, ir->op1); return 0; } else if (ir->o == IR_KNUM) { /* Narrow FP constant. */ lua_Number n = ir_knum(ir)->n; if (nc->mode == IRTOINT_TOBIT) { /* Allows a wider range of constants. */ int64_t k64 = (int64_t)n; if (n == cast_num(k64)) { /* Only if constant doesn't lose precision. */ *nc->sp++ = NARROWINS(NARROW_INT, 0); *nc->sp++ = (NarrowIns)k64; /* But always truncate to 32 bits. */ return 0; } } else { int32_t k = lj_num2int(n); if (n == cast_num(k)) { /* Only if constant is really an integer. */ *nc->sp++ = NARROWINS(NARROW_INT, 0); *nc->sp++ = (NarrowIns)k; return 0; } } return 10; /* Never narrow other FP constants (this is rare). */ } /* Try to CSE the conversion. Stronger checks are ok, too. */ for (cref = J->chain[fins->o]; cref > ref; cref = IR(cref)->prev) if (IR(cref)->op1 == ref && irt_isguard(IR(cref)->t) >= irt_isguard(fins->t)) { *nc->sp++ = NARROWINS(NARROW_REF, cref); return 0; /* Already there, no additional conversion needed. */ } /* Backpropagate across ADD/SUB. */ if (ir->o == IR_ADD || ir->o == IR_SUB) { /* Try cache lookup first. */ IRRef bpref, mode = nc->mode; if (mode == IRTOINT_INDEX && depth > 0) mode = IRTOINT_CHECK; /* Inner conversions need a stronger check. */ bpref = narrow_bpc_get(nc->J, (IRRef1)ref, mode); if (bpref) { *nc->sp++ = NARROWINS(NARROW_REF, bpref); return 0; } if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) { NarrowIns *savesp = nc->sp; int count = narrow_conv_backprop(nc, ir->op1, depth); count += narrow_conv_backprop(nc, ir->op2, depth); if (count <= nc->lim) { /* Limit total number of conversions. */ *nc->sp++ = NARROWINS(IRTI(ir->o), ref); return count; } nc->sp = savesp; /* Too many conversions, need to backtrack. */ } } /* Otherwise add a conversion. */ *nc->sp++ = NARROWINS(NARROW_CONV, ref); return 1; }
/* XLOAD forwarding. */ TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J) { IRRef xref = fins->op1; IRIns *xr = IR(xref); IRRef lim = xref; /* Search limit. */ IRRef ref; if ((fins->op2 & IRXLOAD_READONLY)) goto cselim; if ((fins->op2 & IRXLOAD_VOLATILE)) goto doemit; /* Search for conflicting stores. */ ref = J->chain[IR_XSTORE]; retry: if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS]; if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR]; while (ref > lim) { IRIns *store = IR(ref); switch (aa_xref(J, xr, fins, store)) { case ALIAS_NO: break; /* Continue searching. */ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */ case ALIAS_MUST: /* Emit conversion if the loaded type doesn't match the forwarded type. */ if (!irt_sametype(fins->t, IR(store->op2)->t)) { IRType st = irt_type(fins->t); if (st == IRT_I8 || st == IRT_I16) { /* Trunc + sign-extend. */ st |= IRCONV_SEXT; } else if (st == IRT_U8 || st == IRT_U16) { /* Trunc + zero-extend. */ } else if (st == IRT_INT) { st = irt_type(IR(store->op2)->t); /* Needs dummy CONV.int.*. */ } else { /* I64/U64 are boxed, U32 is hidden behind a CONV.num.u32. */ goto store_fwd; } fins->ot = IRTI(IR_CONV); fins->op1 = store->op2; fins->op2 = (IRT_INT<<5)|st; return RETRYFOLD; } store_fwd: return store->op2; /* Store forwarding. */ } ref = store->prev; } cselim: /* Try to find a matching load. Below the conflicting store, if any. */ ref = J->chain[IR_XLOAD]; while (ref > lim) { /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */ if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t)) return ref; ref = IR(ref)->prev; } /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */ if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] && xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) { ref = J->chain[IR_XSTORE]; while (ref > lim) /* Skip stores that have already been checked. */ ref = IR(ref)->prev; lim = xref; xr = IR(xref); goto retry; /* Retry with the reassociated reference. */ } doemit: return EMITFOLD; }
/* Transform the old IR to the new IR. */ static void split_ir(jit_State *J) { IRRef nins = J->cur.nins, nk = J->cur.nk; MSize irlen = nins - nk; MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1)); IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need); IRRef1 *hisubst; IRRef ref, snref; SnapShot *snap; /* Copy old IR to buffer. */ memcpy(oir, IR(nk), irlen*sizeof(IRIns)); /* Bias hiword substitution table and old IR. Loword kept in field prev. */ hisubst = (IRRef1 *)&oir[irlen] - nk; oir -= nk; /* Remove all IR instructions, but retain IR constants. */ J->cur.nins = REF_FIRST; J->loopref = 0; /* Process constants and fixed references. */ for (ref = nk; ref <= REF_BASE; ref++) { IRIns *ir = &oir[ref]; if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) { /* Split up 64 bit constant. */ TValue tv = *ir_k64(ir); ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo); hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi); } else { ir->prev = ref; /* Identity substitution for loword. */ hisubst[ref] = 0; } } /* Process old IR instructions. */ snap = J->cur.snap; snref = snap->ref; for (ref = REF_FIRST; ref < nins; ref++) { IRIns *ir = &oir[ref]; IRRef nref = lj_ir_nextins(J); IRIns *nir = IR(nref); IRRef hi = 0; if (ref >= snref) { snap->ref = nref; split_subst_snap(J, snap++, oir); snref = snap < &J->cur.snap[J->cur.nsnap] ? snap->ref : ~(IRRef)0; } /* Copy-substitute old instruction to new instruction. */ nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev; nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev; ir->prev = nref; /* Loword substitution. */ nir->o = ir->o; nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI); hisubst[ref] = 0; /* Split 64 bit instructions. */ #if LJ_SOFTFP if (irt_isnum(ir->t)) { nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */ switch (ir->o) { case IR_ADD: hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add); break; case IR_SUB: hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub); break; case IR_MUL: hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul); break; case IR_DIV: hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div); break; case IR_POW: hi = split_call_li(J, hisubst, oir, ir, IRCALL_lj_vm_powi); break; case IR_FPMATH: /* Try to rejoin pow from EXP2, MUL and LOG2. */ if (nir->op2 == IRFPM_EXP2 && nir->op1 > J->loopref) { IRIns *irp = IR(nir->op1); if (irp->o == IR_CALLN && irp->op2 == IRCALL_softfp_mul) { IRIns *irm4 = IR(irp->op1); IRIns *irm3 = IR(irm4->op1); IRIns *irm12 = IR(irm3->op1); IRIns *irl1 = IR(irm12->op1); if (irm12->op1 > J->loopref && irl1->o == IR_CALLN && irl1->op2 == IRCALL_lj_vm_log2) { IRRef tmp = irl1->op1; /* Recycle first two args from LOG2. */ IRRef arg3 = irm3->op2, arg4 = irm4->op2; J->cur.nins--; tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg3); tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, arg4); ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_pow); hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp); break; } } } hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2); break; case IR_ATAN2: hi = split_call_ll(J, hisubst, oir, ir, IRCALL_atan2); break; case IR_LDEXP: hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp); break; case IR_NEG: case IR_ABS: nir->o = IR_CONV; /* Pass through loword. */ nir->op2 = (IRT_INT << 5) | IRT_INT; hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP), hisubst[ir->op1], hisubst[ir->op2]); break; case IR_SLOAD: if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */ nir->op2 &= ~IRSLOAD_CONVERT; ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref, IRCALL_softfp_i2d); hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); break; } /* fallthrough */ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: case IR_STRTO: hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); break; case IR_XLOAD: { IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */ J->cur.nins--; hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */ nref = lj_ir_nextins(J); nir = IR(nref); *nir = inslo; /* Re-emit lo XLOAD immediately before hi XLOAD. */ hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2); #if LJ_LE ir->prev = nref; #else ir->prev = hi; hi = nref; #endif break; } case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_XSTORE: split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]); break; case IR_CONV: { /* Conversion to number. Others handled below. */ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); UNUSED(st); #if LJ_32 && LJ_HASFFI if (st == IRT_I64 || st == IRT_U64) { hi = split_call_l(J, hisubst, oir, ir, st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d); break; } #endif lua_assert(st == IRT_INT || (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT))); nir->o = IR_CALLN; #if LJ_32 && LJ_HASFFI nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d : st == IRT_FLOAT ? IRCALL_softfp_f2d : IRCALL_softfp_ui2d; #else nir->op2 = IRCALL_softfp_i2d; #endif hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); break; } case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS: goto split_call; case IR_PHI: if (nir->op1 == nir->op2) J->cur.nins--; /* Drop useless PHIs. */ if (hisubst[ir->op1] != hisubst[ir->op2]) split_emit(J, IRT(IR_PHI, IRT_SOFTFP), hisubst[ir->op1], hisubst[ir->op2]); break; case IR_HIOP: J->cur.nins--; /* Drop joining HIOP. */ ir->prev = nir->op1; hi = nir->op2; break; default: lua_assert(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX); hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), hisubst[ir->op1], hisubst[ir->op2]); break; } } else #endif #if LJ_32 && LJ_HASFFI if (irt_isint64(ir->t)) { IRRef hiref = hisubst[ir->op1]; nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ switch (ir->o) { case IR_ADD: case IR_SUB: /* Use plain op for hiword if loword cannot produce a carry/borrow. */ if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) { ir->prev = nir->op1; /* Pass through loword. */ nir->op1 = hiref; nir->op2 = hisubst[ir->op2]; hi = nref; break; } /* fallthrough */ case IR_NEG: hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]); break; case IR_MUL: hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64); break; case IR_DIV: hi = split_call_ll(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : IRCALL_lj_carith_divu64); break; case IR_MOD: hi = split_call_ll(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : IRCALL_lj_carith_modu64); break; case IR_POW: hi = split_call_ll(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : IRCALL_lj_carith_powu64); break; case IR_FLOAD: lua_assert(ir->op2 == IRFL_CDATA_INT64); hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4); #if LJ_BE ir->prev = hi; hi = nref; #endif break; case IR_XLOAD: hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2); #if LJ_BE ir->prev = hi; hi = nref; #endif break; case IR_XSTORE: split_emit(J, IRTI(IR_HIOP), nir->op1, hisubst[ir->op2]); break; case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); #if LJ_SOFTFP if (st == IRT_NUM) { /* NUM to 64 bit int conv. */ hi = split_call_l(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul); } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */ nir->o = IR_CALLN; nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul; hi = split_emit(J, IRTI(IR_HIOP), nref, nref); } #else if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */ hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref); } #endif else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */ /* Drop cast, since assembler doesn't care. But fwd both parts. */ hi = hiref; goto fwdlo; } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */ IRRef k31 = lj_ir_kint(J, 31); nir = IR(nref); /* May have been reallocated. */ ir->prev = nir->op1; /* Pass through loword. */ nir->o = IR_BSAR; /* hi = bsar(lo, 31). */ nir->op2 = k31; hi = nref; } else { /* Zero-extend to 64 bit. */ hi = lj_ir_kint(J, 0); goto fwdlo; } break; } case IR_CALLXS: goto split_call; case IR_PHI: { IRRef hiref2; if ((irref_isk(nir->op1) && irref_isk(nir->op2)) || nir->op1 == nir->op2) J->cur.nins--; /* Drop useless PHIs. */ hiref2 = hisubst[ir->op2]; if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2)) split_emit(J, IRTI(IR_PHI), hiref, hiref2); break; } case IR_HIOP: J->cur.nins--; /* Drop joining HIOP. */ ir->prev = nir->op1; hi = nir->op2; break; default: lua_assert(ir->o <= IR_NE); /* Comparisons. */ split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); break; } } else #endif #if LJ_SOFTFP if (ir->o == IR_SLOAD) { if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */ nir->op2 &= ~IRSLOAD_CONVERT; if (!(nir->op2 & IRSLOAD_TYPECHECK)) nir->t.irt = IRT_INT; /* Drop guard. */ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref); ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t)); } } else if (ir->o == IR_TOBIT) { IRRef tmp, op1 = ir->op1; J->cur.nins--; #if LJ_LE tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]); #else tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev); #endif ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit); } else if (ir->o == IR_TOSTR) { if (hisubst[ir->op1]) { if (irref_isk(ir->op1)) nir->op1 = ir->op1; else split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref); } } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) { if (irref_isk(ir->op2) && hisubst[ir->op2]) nir->op2 = ir->op2; } else #endif if (ir->o == IR_CONV) { /* See above, too. */ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); #if LJ_32 && LJ_HASFFI if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */ #if LJ_SOFTFP if (irt_isfloat(ir->t)) { split_call_l(J, hisubst, oir, ir, st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f); J->cur.nins--; /* Drop unused HIOP. */ } #else if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */ ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)), hisubst[ir->op1], nref); } #endif else { /* Truncate to lower 32 bits. */ fwdlo: ir->prev = nir->op1; /* Forward loword. */ /* Replace with NOP to avoid messing up the snapshot logic. */ nir->ot = IRT(IR_NOP, IRT_NIL); nir->op1 = nir->op2 = 0; } } #endif #if LJ_SOFTFP && LJ_32 && LJ_HASFFI else if (irt_isfloat(ir->t)) { if (st == IRT_NUM) { split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f); J->cur.nins--; /* Drop unused HIOP. */ } else { nir->o = IR_CALLN; nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f; } } else if (st == IRT_FLOAT) { nir->o = IR_CALLN; nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui; } else #endif #if LJ_SOFTFP if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) { if (irt_isguard(ir->t)) { lua_assert(st == IRT_NUM && irt_isint(ir->t)); J->cur.nins--; ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1); } else { split_call_l(J, hisubst, oir, ir, #if LJ_32 && LJ_HASFFI st == IRT_NUM ? (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) : (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui) #else IRCALL_softfp_d2i #endif ); J->cur.nins--; /* Drop unused HIOP. */ } } #endif } else if (ir->o == IR_CALLXS) { IRRef hiref; split_call: hiref = hisubst[ir->op1]; if (hiref) { IROpT ot = nir->ot; IRRef op2 = nir->op2; nir->ot = IRT(IR_CARG, IRT_NIL); #if LJ_LE nir->op2 = hiref; #else nir->op2 = nir->op1; nir->op1 = hiref; #endif ir->prev = nref = split_emit(J, ot, nref, op2); } if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t)) hi = split_emit(J, IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT), nref, nref); } else if (ir->o == IR_CARG) { IRRef hiref = hisubst[ir->op1]; if (hiref) { IRRef op2 = nir->op2; #if LJ_LE nir->op2 = hiref; #else nir->op2 = nir->op1; nir->op1 = hiref; #endif ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); nir = IR(nref); } hiref = hisubst[ir->op2]; if (hiref) { #if !LJ_TARGET_X86 int carg = 0; IRIns *cir; for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1)) carg++; if ((carg & 1) == 0) { /* Align 64 bit arguments. */ IRRef op2 = nir->op2; nir->op2 = REF_NIL; nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2); nir = IR(nref); } #endif #if LJ_BE { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; } #endif ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref); } } else if (ir->o == IR_CNEWI) { if (hisubst[ir->op2]) split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]); } else if (ir->o == IR_LOOP) { J->loopref = nref; /* Needed by assembler. */ } hisubst[ref] = hi; /* Store hiword substitution. */ } if (snref == nins) { /* Substitution for last snapshot. */ snap->ref = J->cur.nins; split_subst_snap(J, snap, oir); } /* Add PHI marks. */ for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) { IRIns *ir = IR(ref); if (ir->o != IR_PHI) break; if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t); if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t); } }
/* Transform the old IR to the new IR. */ static void split_ir(jit_State *J) { IRRef nins = J->cur.nins, nk = J->cur.nk; MSize irlen = nins - nk; MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1)); IRIns *oir = (IRIns *)lj_str_needbuf(J->L, &G(J->L)->tmpbuf, need); IRRef1 *hisubst; IRRef ref; /* Copy old IR to buffer. */ memcpy(oir, IR(nk), irlen*sizeof(IRIns)); /* Bias hiword substitution table and old IR. Loword kept in field prev. */ hisubst = (IRRef1 *)&oir[irlen] - nk; oir -= nk; /* Remove all IR instructions, but retain IR constants. */ J->cur.nins = REF_FIRST; /* Process constants and fixed references. */ for (ref = nk; ref <= REF_BASE; ref++) { IRIns *ir = &oir[ref]; if (ir->o == IR_KINT64) { /* Split up 64 bit constant. */ TValue tv = *ir_k64(ir); ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo); hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi); } else { ir->prev = ref; /* Identity substitution for loword. */ hisubst[ref] = 0; } } /* Process old IR instructions. */ for (ref = REF_FIRST; ref < nins; ref++) { IRIns *ir = &oir[ref]; IRRef nref = lj_ir_nextins(J); IRIns *nir = IR(nref); IRRef hi = 0; /* Copy-substitute old instruction to new instruction. */ nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev; nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev; ir->prev = nref; /* Loword substitution. */ nir->o = ir->o; nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI); hisubst[ref] = 0; /* Split 64 bit instructions. */ if (irt_isint64(ir->t)) { IRRef hiref = hisubst[ir->op1]; nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */ switch (ir->o) { case IR_ADD: case IR_SUB: /* Use plain op for hiword if loword cannot produce a carry/borrow. */ if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) { ir->prev = nir->op1; /* Pass through loword. */ nir->op1 = hiref; nir->op2 = hisubst[ir->op2]; hi = nref; break; } /* fallthrough */ case IR_NEG: hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]); break; case IR_MUL: hi = split_call64(J, hisubst, oir, ir, IRCALL_lj_carith_mul64); break; case IR_DIV: hi = split_call64(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : IRCALL_lj_carith_divu64); break; case IR_MOD: hi = split_call64(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : IRCALL_lj_carith_modu64); break; case IR_POW: hi = split_call64(J, hisubst, oir, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : IRCALL_lj_carith_powu64); break; case IR_FLOAD: lua_assert(ir->op2 == IRFL_CDATA_INT64); hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64HI); #if LJ_BE ir->prev = hi; hi = nref; #endif break; case IR_XLOAD: hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, nir->op1), ir->op2); #if LJ_BE ir->prev = hi; hi = nref; #endif break; case IR_XSTORE: #if LJ_LE hiref = hisubst[ir->op2]; #else hiref = nir->op2; nir->op2 = hisubst[ir->op2]; #endif split_emit(J, IRTI(IR_XSTORE), split_ptr(J, nir->op1), hiref); break; case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */ hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref); } else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */ /* Drop cast, since assembler doesn't care. */ goto fwdlo; } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */ IRRef k31 = lj_ir_kint(J, 31); nir = IR(nref); /* May have been reallocated. */ ir->prev = nir->op1; /* Pass through loword. */ nir->o = IR_BSAR; /* hi = bsar(lo, 31). */ nir->op2 = k31; hi = nref; } else { /* Zero-extend to 64 bit. */ hi = lj_ir_kint(J, 0); goto fwdlo; } break; } case IR_PHI: { IRRef hiref2; if ((irref_isk(nir->op1) && irref_isk(nir->op2)) || nir->op1 == nir->op2) J->cur.nins--; /* Drop useless PHIs. */ hiref2 = hisubst[ir->op2]; if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2)) split_emit(J, IRTI(IR_PHI), hiref, hiref2); break; } default: lua_assert(ir->o <= IR_NE); /* Comparisons. */ split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]); break; } } else if (ir->o == IR_CONV) { /* See above, too. */ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */ if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */ ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)), hisubst[ir->op1], nref); } else { /* Truncate to lower 32 bits. */ fwdlo: ir->prev = nir->op1; /* Forward loword. */ /* Replace with NOP to avoid messing up the snapshot logic. */ nir->ot = IRT(IR_NOP, IRT_NIL); nir->op1 = nir->op2 = 0; } } } else if (ir->o == IR_CNEWI) { if (hisubst[ir->op2]) split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]); } else if (ir->o == IR_LOOP) { J->loopref = nref; /* Needed by assembler. */ } hisubst[ref] = hi; /* Store hiword substitution. */ } /* Add PHI marks. */ for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) { IRIns *ir = IR(ref); if (ir->o != IR_PHI) break; if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t); if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t); } /* Substitute snapshot maps. */ oir[nins].prev = J->cur.nins; /* Substitution for last snapshot. */ { SnapNo i, nsnap = J->cur.nsnap; for (i = 0; i < nsnap; i++) { SnapShot *snap = &J->cur.snap[i]; SnapEntry *map = &J->cur.snapmap[snap->mapofs]; MSize n, nent = snap->nent; snap->ref = oir[snap->ref].prev; for (n = 0; n < nent; n++) { SnapEntry sn = map[n]; map[n] = ((sn & 0xffff0000) | oir[snap_ref(sn)].prev); } } } }