/* Sweep instructions and tag sunken allocations and stores. */ static void sink_sweep_ins(jit_State *J) { IRIns *ir, *irbase = IR(REF_BASE); for (ir = IR(J->cur.nins-1) ; ir >= irbase; ir--) { switch (ir->o) { case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: { IRIns *ira = sink_checkalloc(J, ir); if (ira && !irt_ismarked(ira->t)) { int delta = (int)(ir - ira); ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta); } else { ir->prev = REGSP_INIT; } break; } case IR_NEWREF: if (!irt_ismarked(IR(ir->op1)->t)) { ir->prev = REGSP(RID_SINK, 0); } else { irt_clearmark(ir->t); ir->prev = REGSP_INIT; } break; #if LJ_HASFFI case IR_CNEW: case IR_CNEWI: #endif case IR_TNEW: case IR_TDUP: if (!irt_ismarked(ir->t)) { ir->t.irt &= ~IRT_GUARD; ir->prev = REGSP(RID_SINK, 0); J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */ } else { irt_clearmark(ir->t); ir->prev = REGSP_INIT; } break; case IR_PHI: { IRIns *ira = IR(ir->op2); if (!irt_ismarked(ira->t) && (ira->o == IR_TNEW || ira->o == IR_TDUP || (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) { ir->prev = REGSP(RID_SINK, 0); } else { ir->prev = REGSP_INIT; } break; } default: irt_clearmark(ir->t); ir->prev = REGSP_INIT; break; } } for (ir = IR(J->cur.nk); ir < irbase; ir++) { irt_clearmark(ir->t); ir->prev = REGSP_INIT; if (irt_is64(ir->t) && ir->o != IR_KNULL) ir++; } }
/* Emit or eliminate collected PHIs. */ static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi) { int pass2 = 0; IRRef i, nslots; IRRef invar = J->chain[IR_LOOP]; /* Pass #1: mark redundant and potentially redundant PHIs. */ for (i = 0; i < nphi; i++) { IRRef lref = phi[i]; IRRef rref = subst[lref]; if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */ irt_setmark(IR(lref)->t); } else if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) { /* Quick check for simple recurrences failed, need pass2. */ irt_setmark(IR(lref)->t); pass2 = 1; } } /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */ if (pass2) { for (i = J->cur.nins-1; i > invar; i--) { IRIns *ir = IR(i); if (!irref_isk(ir->op1)) irt_clearmark(IR(ir->op1)->t); if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); } } /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */ nslots = J->baseslot+J->maxslot; for (i = 1; i < nslots; i++) { IRRef ref = tref_ref(J->slot[i]); while (!irref_isk(ref) && ref != subst[ref]) { IRIns *ir = IR(ref); irt_clearmark(ir->t); /* Unmark potential uses, too. */ if (irt_isphi(ir->t) || irt_ispri(ir->t)) break; irt_setphi(ir->t); if (nphi >= LJ_MAX_PHI) lj_trace_err(J, LJ_TRERR_PHIOV); phi[nphi++] = (IRRef1)ref; ref = subst[ref]; if (ref > invar) break; } } /* Pass #4: emit PHI instructions or eliminate PHIs. */ for (i = 0; i < nphi; i++) { IRRef lref = phi[i]; IRIns *ir = IR(lref); if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */ IRRef rref = subst[lref]; if (rref > invar) irt_setphi(IR(rref)->t); emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref); } else { /* Otherwise eliminate PHI. */ irt_clearmark(ir->t); irt_clearphi(ir->t); } } }
/* Backwards propagate marks. Replace unused instructions with NOPs. */ static void dce_propagate(jit_State *J) { IRRef1 *pchain[IR__MAX]; IRRef ins; uint32_t i; for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i]; for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) { IRIns *ir = IR(ins); if (irt_ismarked(ir->t)) { irt_clearmark(ir->t); pchain[ir->o] = &ir->prev; } else if (!ir_sideeff(ir)) { *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */ *pchain[IR_NOP] = (IRRef1)ins; ir->t.irt = IRT_NIL; ir->o = IR_NOP; /* Replace instruction with NOP. */ ir->op1 = ir->op2 = 0; pchain[IR_NOP] = &ir->prev; continue; } if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t); if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t); } *pchain[IR_NOP] = 0; /* Terminate NOP chain. */ }
/* Undo any partial changes made by the loop optimization. */ static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap) { ptrdiff_t i; SnapShot *snap = &J->cur.snap[nsnap-1]; SnapEntry *map = J->cur.snapmap; map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */ J->cur.nsnapmap = (uint16_t)nsnapmap; J->cur.nsnap = nsnap; J->guardemit.irt = 0; lj_ir_rollback(J, ins); for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */ BPropEntry *bp = &J->bpropcache[i]; if (bp->val >= ins) bp->key = 0; } for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */ IRIns *ir = IR(ins); irt_clearphi(ir->t); irt_clearmark(ir->t); } }
/* Emit or eliminate collected PHIs. */ static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi, SnapNo onsnap) { int passx = 0; IRRef i, j, nslots; IRRef invar = J->chain[IR_LOOP]; /* Pass #1: mark redundant and potentially redundant PHIs. */ for (i = 0, j = 0; i < nphi; i++) { IRRef lref = phi[i]; IRRef rref = subst[lref]; if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */ irt_clearphi(IR(lref)->t); } else { phi[j++] = (IRRef1)lref; if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) { /* Quick check for simple recurrences failed, need pass2. */ irt_setmark(IR(lref)->t); passx = 1; } } } nphi = j; /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */ if (passx) { SnapNo s; for (i = J->cur.nins-1; i > invar; i--) { IRIns *ir = IR(i); if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); if (!irref_isk(ir->op1)) { irt_clearmark(IR(ir->op1)->t); if (ir->op1 < invar && ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */ ir = IR(ir->op1); while (ir->o == IR_CARG) { if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t); if (irref_isk(ir->op1)) break; ir = IR(ir->op1); irt_clearmark(ir->t); } } } } for (s = J->cur.nsnap-1; s >= onsnap; s--) { SnapShot *snap = &J->cur.snap[s]; SnapEntry *map = &J->cur.snapmap[snap->mapofs]; MSize n, nent = snap->nent; for (n = 0; n < nent; n++) { IRRef ref = snap_ref(map[n]); if (!irref_isk(ref)) irt_clearmark(IR(ref)->t); } } } /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */ nslots = J->baseslot+J->maxslot; for (i = 1; i < nslots; i++) { IRRef ref = tref_ref(J->slot[i]); while (!irref_isk(ref) && ref != subst[ref]) { IRIns *ir = IR(ref); irt_clearmark(ir->t); /* Unmark potential uses, too. */ if (irt_isphi(ir->t) || irt_ispri(ir->t)) break; irt_setphi(ir->t); if (nphi >= LJ_MAX_PHI) lj_trace_err(J, LJ_TRERR_PHIOV); phi[nphi++] = (IRRef1)ref; ref = subst[ref]; if (ref > invar) break; } } /* Pass #4: propagate non-redundant PHIs. */ while (passx) { passx = 0; for (i = 0; i < nphi; i++) { IRRef lref = phi[i]; IRIns *ir = IR(lref); if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */ IRIns *irr = IR(subst[lref]); if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */ irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */ passx = 1; /* Retry. */ } } } } /* Pass #5: emit PHI instructions or eliminate PHIs. */ for (i = 0; i < nphi; i++) { IRRef lref = phi[i]; IRIns *ir = IR(lref); if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */ IRRef rref = subst[lref]; if (rref > invar) irt_setphi(IR(rref)->t); emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref); } else { /* Otherwise eliminate PHI. */ irt_clearmark(ir->t); irt_clearphi(ir->t); } } }