void ViewReadLinetree3::setLigFile( const QString& f ){ editLig->setText(f); if(__open){ QString dta = f; dta = dta.replace(QRegExp("\\.lig"),".dta"); qWarning("Dta1 = "+dta); if(QFileInfo(dta).exists()){ setDtaFile(dta); } else { dta = f; dta = dta.replace(QRegExp("[0-9]*\\.lig"),".dta"); qWarning("Dta2 = "+dta); if(QFileInfo(dta).exists()) setDtaFile(dta); } } else { QString dta = f; dta = dta.replace(QRegExp("[0-9]*\\.lig"),".dta"); setDtaFile(dta); } testEndianess(); checkCfg(QFileInfo(f).dirPath()); }
bool checkRegisters(IRTrace* trace, const IRFactory& factory, const RegAllocInfo& regs) { assert(checkCfg(trace, factory)); auto blocks = rpoSortCfg(trace, factory); auto children = findDomChildren(blocks); forPreorderDoms(blocks.front(), children, RegState(), [&] (Block* block, RegState& state) { for (IRInstruction& inst : *block) { for (SSATmp* src : inst.srcs()) { auto const &info = regs[src]; if (!info.spilled() && (info.reg(0) == Transl::rVmSp || info.reg(0) == Transl::rVmFp)) { // hack - ignore rbx and rbp continue; } for (unsigned i = 0, n = info.numAllocatedRegs(); i < n; ++i) { assert(state.tmp(info, i) == src); } } for (SSATmp& dst : inst.dsts()) { auto const &info = regs[dst]; for (unsigned i = 0, n = info.numAllocatedRegs(); i < n; ++i) { state.tmp(info, i) = &dst; } } } }); return true; }
void optimize(IRUnit& unit, IRBuilder& irBuilder, TransKind kind) { auto finishPass = [&](const char* msg) { dumpTrace(6, unit, folly::format("after {}", msg).str().c_str()); assert(checkCfg(unit)); assert(checkTmpsSpanningCalls(unit)); if (debug) { forEachInst(rpoSortCfg(unit), assertOperandTypes); } }; auto doPass = [&](void (*fn)(IRUnit&), const char* msg) { fn(unit); finishPass(msg); }; auto dce = [&](const char* which) { if (!RuntimeOption::EvalHHIRDeadCodeElim) return; eliminateDeadCode(unit); finishPass(folly::format("{} DCE", which).str().c_str()); }; if (RuntimeOption::EvalHHIRRelaxGuards) { auto const simpleRelax = kind == TransProfile; auto changed = relaxGuards(unit, *irBuilder.guards(), simpleRelax); if (changed) finishPass("guard relaxation"); } if (RuntimeOption::EvalHHIRRefcountOpts) { optimizeRefcounts(unit); finishPass("refcount opts"); } dce("initial"); if (RuntimeOption::EvalHHIRPredictionOpts) { doPass(optimizePredictions, "prediction opts"); } if (RuntimeOption::EvalHHIRExtraOptPass && (RuntimeOption::EvalHHIRCse || RuntimeOption::EvalHHIRSimplification)) { irBuilder.reoptimize(); finishPass("reoptimize"); // Cleanup any dead code left around by CSE/Simplification // Ideally, this would be controlled by a flag returned // by optimzeTrace indicating whether DCE is necessary dce("reoptimize"); } if (RuntimeOption::EvalHHIRJumpOpts) { doPass(optimizeJumps, "jumpopts"); dce("jump opts"); } if (RuntimeOption::EvalHHIRGenerateAsserts) { doPass(insertAsserts, "RefCnt asserts"); } }
void optimizeTrace(IRTrace* trace, TraceBuilder& traceBuilder) { auto& irFactory = traceBuilder.factory(); auto finishPass = [&](const char* msg) { dumpTrace(6, trace, folly::format("after {}", msg).str().c_str()); assert(checkCfg(trace, irFactory)); assert(checkTmpsSpanningCalls(trace, irFactory)); if (debug) forEachTraceInst(trace, assertOperandTypes); }; auto doPass = [&](void (*fn)(IRTrace*, IRFactory&), const char* msg) { fn(trace, irFactory); finishPass(msg); }; auto dce = [&](const char* which) { if (!RuntimeOption::EvalHHIRDeadCodeElim) return; eliminateDeadCode(trace, irFactory); finishPass(folly::format("{} DCE", which).str().c_str()); }; if (RuntimeOption::EvalHHIRRelaxGuards) { auto changed = relaxGuards(trace, irFactory, *traceBuilder.guards()); if (changed) finishPass("guard relaxation"); } dce("initial"); if (RuntimeOption::EvalHHIRPredictionOpts) { doPass(optimizePredictions, "prediction opts"); } if (RuntimeOption::EvalHHIRExtraOptPass && (RuntimeOption::EvalHHIRCse || RuntimeOption::EvalHHIRSimplification)) { traceBuilder.reoptimize(); finishPass("reoptimize"); // Cleanup any dead code left around by CSE/Simplification // Ideally, this would be controlled by a flag returned // by optimzeTrace indicating whether DCE is necessary dce("reoptimize"); } if (RuntimeOption::EvalHHIRJumpOpts) { doPass(optimizeJumps, "jumpopts"); dce("jump opts"); } if (RuntimeOption::EvalHHIRGenerateAsserts) { doPass(insertAsserts, "RefCnt asserts"); } }
bool checkRegisters(IRTrace* trace, const IRFactory& factory, const RegAllocInfo& regs) { assert(checkCfg(trace, factory)); auto blocks = rpoSortCfg(trace, factory); StateVector<Block, RegState> states(&factory, RegState()); StateVector<Block, bool> reached(&factory, false); for (auto* block : blocks) { RegState state = states[block]; for (IRInstruction& inst : *block) { for (SSATmp* src : inst.srcs()) { auto const &info = regs[src]; if (!info.spilled() && (info.reg(0) == Transl::rVmSp || info.reg(0) == Transl::rVmFp)) { // hack - ignore rbx and rbp continue; } for (unsigned i = 0, n = info.numAllocatedRegs(); i < n; ++i) { assert(state.tmp(info, i) == src); } } for (SSATmp& dst : inst.dsts()) { auto const &info = regs[dst]; for (unsigned i = 0, n = info.numAllocatedRegs(); i < n; ++i) { state.tmp(info, i) = &dst; } } } // State contains register/spill info at current block end. auto updateEdge = [&](Block* succ) { if (!reached[succ]) { states[succ] = state; } else { states[succ].merge(state); } }; if (auto* next = block->next()) updateEdge(next); if (auto* taken = block->taken()) updateEdge(taken); } return true; }
void optimize(IRUnit& unit, IRBuilder& irBuilder, TransKind kind) { Timer _t(Timer::optimize); auto const finishPass = [&] (const char* msg) { if (msg) { printUnit(6, unit, folly::format("after {}", msg).str().c_str()); } assertx(checkCfg(unit)); assertx(checkTmpsSpanningCalls(unit)); if (debug) { forEachInst(rpoSortCfg(unit), [&](IRInstruction* inst) { assertx(checkOperandTypes(inst, &unit)); }); } }; auto const doPass = [&] (void (*fn)(IRUnit&), const char* msg = nullptr) { fn(unit); finishPass(msg); }; auto const dce = [&] (const char* which) { if (!RuntimeOption::EvalHHIRDeadCodeElim) return; eliminateDeadCode(unit); finishPass(folly::format("{} DCE", which).str().c_str()); }; auto const simplifyPass = [] (IRUnit& unit) { boost::dynamic_bitset<> reachable(unit.numBlocks()); reachable.set(unit.entry()->id()); auto const blocks = rpoSortCfg(unit); for (auto block : blocks) { // Skip unreachable blocks, or simplify() cries. if (!reachable.test(block->id())) continue; for (auto& inst : *block) simplify(unit, &inst); if (auto const b = block->back().next()) reachable.set(b->id()); if (auto const b = block->back().taken()) reachable.set(b->id()); } }; auto const doSimplify = RuntimeOption::EvalHHIRExtraOptPass && RuntimeOption::EvalHHIRSimplification; auto const hasLoop = RuntimeOption::EvalJitLoops && cfgHasLoop(unit); auto const traceMode = kind != TransKind::Optimize || RuntimeOption::EvalJitPGORegionSelector == "hottrace"; // TODO (#5792564): Guard relaxation doesn't work with loops. // TODO (#6599498): Guard relaxation is broken in wholecfg mode. if (shouldHHIRRelaxGuards() && !hasLoop && traceMode) { Timer _t(Timer::optimize_relaxGuards); const bool simple = kind == TransKind::Profile && (RuntimeOption::EvalJitRegionSelector == "tracelet" || RuntimeOption::EvalJitRegionSelector == "method"); RelaxGuardsFlags flags = (RelaxGuardsFlags) (RelaxReflow | (simple ? RelaxSimple : RelaxNormal)); auto changed = relaxGuards(unit, *irBuilder.guards(), flags); if (changed) finishPass("guard relaxation"); if (doSimplify) { doPass(simplifyPass, "guard relaxation simplify"); } } // This is vestigial (it removes some instructions needed by the old refcount // opts pass), and will be removed soon. eliminateTakes(unit); dce("initial"); if (RuntimeOption::EvalHHIRPredictionOpts) { doPass(optimizePredictions, "prediction opts"); } if (doSimplify) { doPass(simplifyPass, "simplify"); dce("simplify"); } if (RuntimeOption::EvalHHIRGlobalValueNumbering) { doPass(gvn); dce("gvn"); } if (kind != TransKind::Profile && RuntimeOption::EvalHHIRMemoryOpts) { doPass(optimizeLoads); dce("loadelim"); } /* * Note: doing this pass this late might not be ideal, in particular because * we've already turned some StLoc instructions into StLocNT. * * But right now there are assumptions preventing us from doing it before * refcount opts. (Refcount opts needs to see all the StLocs explicitly * because it makes assumptions about whether references are consumed based * on that.) */ if (kind != TransKind::Profile && RuntimeOption::EvalHHIRMemoryOpts) { doPass(optimizeStores); dce("storeelim"); } if (kind != TransKind::Profile && RuntimeOption::EvalHHIRRefcountOpts) { doPass(optimizeRefcounts2); dce("refcount"); } if (RuntimeOption::EvalHHIRGenerateAsserts) { doPass(insertAsserts); } }
bool checkRegisters(const IRUnit& unit, const RegAllocInfo& regs) { assert(checkCfg(unit)); auto blocks = rpoSortCfg(unit); StateVector<Block, RegState> states(unit, RegState()); StateVector<Block, bool> reached(unit, false); for (auto* block : blocks) { RegState state = states[block]; for (IRInstruction& inst : *block) { if (inst.op() == Jmp) continue; // handled by Shuffle auto& inst_regs = regs[inst]; for (int i = 0, n = inst.numSrcs(); i < n; ++i) { auto const &rs = inst_regs.src(i); if (!rs.spilled()) { // hack - ignore rbx and rbp bool ignore_frame_regs; switch (arch()) { case Arch::X64: ignore_frame_regs = (rs.reg(0) == X64::rVmSp || rs.reg(0) == X64::rVmFp); break; case Arch::ARM: ignore_frame_regs = (rs.reg(0) == ARM::rVmSp || rs.reg(0) == ARM::rVmFp); break; } if (ignore_frame_regs) continue; } DEBUG_ONLY auto src = inst.src(i); assert(rs.numWords() == src->numWords() || (src->isConst() && rs.numWords() == 0)); DEBUG_ONLY auto allocated = rs.numAllocated(); if (allocated == 2) { if (rs.spilled()) { assert(rs.slot(0) != rs.slot(1)); } else { assert(rs.reg(0) != rs.reg(1)); } } for (unsigned i = 0, n = rs.numAllocated(); i < n; ++i) { assert(state.tmp(rs, i) == src); } } auto update = [&](SSATmp* tmp, const PhysLoc& loc) { for (unsigned i = 0, n = loc.numAllocated(); i < n; ++i) { state.tmp(loc, i) = tmp; } }; if (inst.op() == Shuffle) { checkShuffle(inst, regs); for (unsigned i = 0; i < inst.numSrcs(); ++i) { update(inst.src(i), inst.extra<Shuffle>()->dests[i]); } } else { for (unsigned i = 0; i < inst.numDsts(); ++i) { update(inst.dst(i), inst_regs.dst(i)); } } } // State contains the PhysLoc->SSATmp reverse mappings at block end; // propagate the state to succ auto updateEdge = [&](Block* succ) { if (!reached[succ]) { states[succ] = state; } else { states[succ].merge(state); } }; if (auto* next = block->next()) updateEdge(next); if (auto* taken = block->taken()) updateEdge(taken); } return true; }
void optimize(IRUnit& unit, IRBuilder& irBuilder, TransKind kind) { Timer _t(Timer::optimize); auto finishPass = [&](const char* msg) { if (msg) { printUnit(6, unit, folly::format("after {}", msg).str().c_str()); } assert(checkCfg(unit)); assert(checkTmpsSpanningCalls(unit)); if (debug) { forEachInst(rpoSortCfg(unit), [&](IRInstruction* inst) { assert(checkOperandTypes(inst, &unit)); }); } }; auto doPass = [&](void (*fn)(IRUnit&), const char* msg = nullptr) { fn(unit); finishPass(msg); }; auto dce = [&](const char* which) { if (!RuntimeOption::EvalHHIRDeadCodeElim) return; eliminateDeadCode(unit); finishPass(folly::format("{} DCE", which).str().c_str()); }; auto const doReoptimize = RuntimeOption::EvalHHIRExtraOptPass && (RuntimeOption::EvalHHIRCse || RuntimeOption::EvalHHIRSimplification); auto const hasLoop = RuntimeOption::EvalJitLoops && cfgHasLoop(unit); // TODO(#5792564): Guard relaxation doesn't work with loops. if (shouldHHIRRelaxGuards() && !hasLoop) { Timer _t(Timer::optimize_relaxGuards); const bool simple = kind == TransKind::Profile && (RuntimeOption::EvalJitRegionSelector == "tracelet" || RuntimeOption::EvalJitRegionSelector == "method"); RelaxGuardsFlags flags = (RelaxGuardsFlags) (RelaxReflow | (simple ? RelaxSimple : RelaxNormal)); auto changed = relaxGuards(unit, *irBuilder.guards(), flags); if (changed) finishPass("guard relaxation"); if (doReoptimize) { irBuilder.reoptimize(); finishPass("guard relaxation reoptimize"); } } if (RuntimeOption::EvalHHIRRefcountOpts) { optimizeRefcounts(unit, FrameStateMgr{unit.entry()->front().marker()}); finishPass("refcount opts"); } dce("initial"); if (RuntimeOption::EvalHHIRPredictionOpts) { doPass(optimizePredictions, "prediction opts"); } if (doReoptimize) { irBuilder.reoptimize(); finishPass("reoptimize"); dce("reoptimize"); } if (RuntimeOption::EvalHHIRGlobalValueNumbering) { doPass(gvn); dce("gvn"); } if (kind != TransKind::Profile && RuntimeOption::EvalHHIRMemoryOpts) { doPass(optimizeLoads); dce("loadelim"); } /* * Note: doing this pass this late might not be ideal, in particular because * we've already turned some StLoc instructions into StLocNT. * * But right now there are assumptions preventing us from doing it before * refcount opts. (Refcount opts needs to see all the StLocs explicitly * because it makes assumptions about whether references are consumed based * on that.) */ if (kind != TransKind::Profile && RuntimeOption::EvalHHIRMemoryOpts) { doPass(optimizeStores); dce("storeelim"); } if (RuntimeOption::EvalHHIRGenerateAsserts) { doPass(insertAsserts, "RefCnt asserts"); } }
void optimize(IRUnit& unit, IRBuilder& irBuilder, TransKind kind) { Timer _t(Timer::optimize); auto finishPass = [&](const char* msg) { dumpTrace(6, unit, folly::format("after {}", msg).str().c_str()); assert(checkCfg(unit)); assert(checkTmpsSpanningCalls(unit)); if (debug) { forEachInst(rpoSortCfg(unit), assertOperandTypes); } }; auto doPass = [&](void (*fn)(IRUnit&), const char* msg) { fn(unit); finishPass(msg); }; auto dce = [&](const char* which) { if (!RuntimeOption::EvalHHIRDeadCodeElim) return; eliminateDeadCode(unit); finishPass(folly::format("{} DCE", which).str().c_str()); }; if (RuntimeOption::EvalHHIRRelaxGuards) { /* * In TransProfile mode, we can only relax the guards in tracelet * region mode. If the region came from analyze() and we relax the * guards here, then the RegionDesc's TypePreds in ProfData won't * accurately reflect the generated guards. This can result in a * TransOptimze region to be formed with types that are incompatible, * e.g.: * B1: TypePred: Loc0: Bool // but this gets relaxed to Uncounted * PostCond: Loc0: Uncounted // post-conds are accurate * B2: TypePred: Loc0: Int // this will always fail */ const bool relax = kind != TransProfile || RuntimeOption::EvalJitRegionSelector == "tracelet"; if (relax) { Timer _t(Timer::optimize_relaxGuards); const bool simple = kind == TransProfile && RuntimeOption::EvalJitRegionSelector == "tracelet"; auto changed = relaxGuards(unit, *irBuilder.guards(), simple); if (changed) finishPass("guard relaxation"); } } if (RuntimeOption::EvalHHIRRefcountOpts) { optimizeRefcounts(unit, FrameState{unit, unit.entry()->front().marker()}); finishPass("refcount opts"); } dce("initial"); if (RuntimeOption::EvalHHIRPredictionOpts) { doPass(optimizePredictions, "prediction opts"); } if (RuntimeOption::EvalHHIRExtraOptPass && (RuntimeOption::EvalHHIRCse || RuntimeOption::EvalHHIRSimplification)) { irBuilder.reoptimize(); finishPass("reoptimize"); // Cleanup any dead code left around by CSE/Simplification // Ideally, this would be controlled by a flag returned // by optimizeTrace indicating whether DCE is necessary dce("reoptimize"); } if (RuntimeOption::EvalHHIRJumpOpts) { doPass(optimizeJumps, "jumpopts"); dce("jump opts"); } if (RuntimeOption::EvalHHIRGenerateAsserts) { doPass(insertAsserts, "RefCnt asserts"); } }