Example #1
0
SSATmp* Simplifier::simplifyNot(SSATmp* src) {
  IRInstruction* inst = src->getInstruction();
  Opcode op = inst->getOpcode();

  // TODO: Add more algebraic simplification rules for NOT
  switch (op) {
    case ConvToBool:
      return simplifyNot(inst->getSrc(0));
    case OpXor: {
      // !!X --> bool(X)
      if (isNotInst(inst->getSrc(0))) {
        return m_tb->genConvToBool(inst->getSrc(0));
      }
      break;
    }
    // !(X cmp Y) --> X opposite_cmp Y
    case OpLt:
    case OpLte:
    case OpGt:
    case OpGte:
    case OpEq:
    case OpNeq:
    case OpSame:
    case OpNSame:
      // XXX: this could technically be losing a ConvToBool, except
      // that we kinda know "not" instructions (Xor with 1) are always
      // going to be followed by ConvToBool.
      //
      // TODO(#2058865): This would make more sense with a real Not
      // instruction and allowing boolean output types for query ops.
      return m_tb->genCmp(negateQueryOp(op),
                          inst->getSrc(0),
                          inst->getSrc(1));
    case InstanceOf:
    case NInstanceOf:
    case InstanceOfBitmask:
    case NInstanceOfBitmask:
      // TODO: combine this with the above check and use isQueryOp or
      // add an isNegatable.
      return m_tb->gen(negateQueryOp(op),
                       inst->getNumSrcs(),
                       inst->getSrcs().begin());
    // TODO !(X | non_zero) --> 0
    default: (void)op;
  }
  return nullptr;
}
Example #2
0
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) {
  IRInstruction::List wl; // worklist of live instructions
  Trace::List& exitTraces = trace->getExitTraces();
  // first mark all exit traces as unreachable by setting the id on
  // their labels to 0
  for (Trace::Iterator it = exitTraces.begin();
       it != exitTraces.end();
       it++) {
    Trace* trace = *it;
    trace->getLabel()->setId(DEAD);
  }

  // mark the essential instructions and add them to the initial
  // work list; also mark the exit traces that are reachable by
  // any control flow instruction in the main trace.
  initInstructions(trace, wl);
  for (Trace::Iterator it = exitTraces.begin();
       it != exitTraces.end();
       it++) {
    // only process those exit traces that are reachable from
    // the main trace
    Trace* trace = *it;
    if (trace->getLabel()->getId() != DEAD) {
      initInstructions(trace, wl);
    }
  }

  // process the worklist
  while (!wl.empty()) {
    IRInstruction* inst = wl.front();
    wl.pop_front();
    for (uint32 i = 0; i < inst->getNumSrcs(); i++) {
      SSATmp* src = inst->getSrc(i);
      if (src->getInstruction()->isDefConst()) {
        continue;
      }
      IRInstruction* srcInst = src->getInstruction();
      if (srcInst->getId() == DEAD) {
        srcInst->setId(LIVE);
        wl.push_back(srcInst);
      }
      // <inst> consumes <srcInst> which is an IncRef,
      // so we mark <srcInst> as REFCOUNT_CONSUMED.
      if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) {
        if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) {
          // <srcInst> is consumed from its own trace.
          srcInst->setId(REFCOUNT_CONSUMED);
        } else {
          // <srcInst> is consumed off trace.
          if (srcInst->getId() != REFCOUNT_CONSUMED) {
            // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is
            // also consumed from its own trace.
            srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE);
          }
        }
      }
    }
  }

  // Optimize IncRefs and DecRefs.
  optimizeRefCount(trace);
  for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) {
    optimizeRefCount(*it);
  }

  if (RuntimeOption::EvalHHIREnableSinking) {
    // Sink IncRefs consumed off trace.
    IRInstruction::List toSink;
    sinkIncRefs(trace, irFactory, toSink);
  }

  // now remove instructions whose id == DEAD
  removeDeadInstructions(trace);
  for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) {
    removeDeadInstructions(*it);
  }

  // If main trace ends with an unconditional jump, copy the target of
  // the jump to the end of the trace
  IRInstruction::List& instList = trace->getInstructionList();
  IRInstruction::Iterator lastInst = instList.end();
  lastInst--; // go back to the last instruction
  IRInstruction* jmpInst = *lastInst;
  if (jmpInst->getOpcode() == Jmp_) {
    Trace* targetTrace = jmpInst->getLabel()->getTrace();
    IRInstruction::List& targetInstList = targetTrace->getInstructionList();
    IRInstruction::Iterator instIter = targetInstList.begin();
    instIter++; // skip over label
    // update the parent trace of the moved instructions
    for (IRInstruction::Iterator it = instIter;
         it != targetInstList.end();
         ++it) {
      (*it)->setParent(trace);
    }
    instList.splice(lastInst, targetInstList, instIter, targetInstList.end());
    // delete the jump instruction
    instList.erase(lastInst);
  }

  // If main trace ends with a conditional jump with no side-effects on exit,
  // hook it to the exitTrace and make it a TraceExitType::NormalCc
  if (RuntimeOption::EvalHHIRDirectExit) {
    IRInstruction::List& instList = trace->getInstructionList();
    IRInstruction::Iterator tail  = instList.end();
    IRInstruction* jccInst        = NULL;
    IRInstruction* exitInst       = NULL;
    IRInstruction* exitCcInst     = NULL;
    Opcode opc = OpAdd;
    // Normally Jcc comes before a Marker
    for (int idx = 3; idx >= 0; idx--) {
      tail--; // go back to the previous instruction
      IRInstruction* inst = *tail;
      opc = inst->getOpcode();
      if (opc == ExitTrace) {
        exitInst = *tail;
        continue;
      }
      if (opc == Marker) {
        continue;
      }
      if (jccCanBeDirectExit(opc)) {
        jccInst = inst;
        break;
      }
      break;
    }
    if (jccCanBeDirectExit(opc)) {
      SSATmp* dst = jccInst->getDst();
      Trace* targetTrace = jccInst->getLabel()->getTrace();
      IRInstruction::List& targetInstList = targetTrace->getInstructionList();
      IRInstruction::Iterator targetInstIter = targetInstList.begin();
      targetInstIter++; // skip over label

      // Check for a NormalCc exit with no side effects
      for (IRInstruction::Iterator it = targetInstIter;
           it != targetInstList.end();
           ++it) {
        IRInstruction* instr = (*it);
        // Extend to support ExitSlow, ExitSlowNoProgress, ...
        Opcode opc = instr->getOpcode();
        if (opc == ExitTraceCc) {
          exitCcInst = instr;
          break;
        } else if (opc == Marker) {
          continue;
        } else {
          // Do not optimize if there are other instructions
          break;
        }
      }

      if (exitInst && exitCcInst &&
          exitCcInst->getNumSrcs() > NUM_FIXED_SRCS &&
          exitInst->getNumSrcs() > NUM_FIXED_SRCS) {
        // Found both exits, link them to Jcc for codegen
        ASSERT(dst);
        ExtendedInstruction* exCcInst = (ExtendedInstruction*)exitCcInst;
        exCcInst->appendExtendedSrc(*irFactory, dst);
        ExtendedInstruction* exInst = (ExtendedInstruction*)exitInst;
        exInst->appendExtendedSrc(*irFactory, dst);
        // Set flag so Jcc and exits know this is active
        dst->setTCA(kIRDirectJccJmpActive);
      }
    }
  }

  // If main trace starts with guards, have them generate a patchable jump
  // to the anchor trace
  if (RuntimeOption::EvalHHIRDirectExit) {
    LabelInstruction* guardLabel = NULL;
    IRInstruction::List& instList = trace->getInstructionList();
    // Check the beginning of the trace for guards
    for (IRInstruction::Iterator it = instList.begin(); it != instList.end();
         ++it) {
      IRInstruction* inst = *it;
      Opcode opc = inst->getOpcode();
      if (inst->getLabel() &&
          (opc == LdLoc    || opc == LdStack ||
           opc == GuardLoc || opc == GuardStk)) {
        LabelInstruction* exitLabel = inst->getLabel();
        // Find the GuardFailure's label and confirm this branches there
        if (guardLabel == NULL) {
          Trace* exitTrace = exitLabel->getTrace();
          IRInstruction::List& xList = exitTrace->getInstructionList();
          IRInstruction::Iterator instIter = xList.begin();
          instIter++; // skip over label
          // Confirm this is a GuardExit
          for (IRInstruction::Iterator it = instIter; it != xList.end(); ++it) {
            IRInstruction* i = *it;
            Opcode op = i->getOpcode();
            if (op == Marker) {
              continue;
            }
            if (op == ExitGuardFailure) {
              guardLabel = exitLabel;
            }
            // Do not optimize if other instructions are on exit trace
            break;
          }
        }
        if (exitLabel == guardLabel) {
          inst->setTCA(kIRDirectGuardActive);
          continue;
        }
        break;
      }
      if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP ||
          opc == LdStack) {
        continue;
      }
      break;
    }
  }
}
Example #3
0
void LinearScan::rematerializeAux(Trace* trace,
                                  SSATmp* curSp,
                                  SSATmp* curFp,
                                  std::vector<SSATmp*> localValues) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end();
       ++it) {
    IRInstruction* inst = *it;
    Opcode opc = inst->getOpcode();
    SSATmp* dst = inst->getDst();
    if (opc == DefFP || opc == FreeActRec) {
      curFp = dst;
      assert(dst && dst->getReg() == rVmFp);
    }
    if (opc == Reload) {
      // s = Spill t0
      // t = Reload s
      SSATmp* spilledTmp = getSpilledTmp(dst);
      IRInstruction* spilledInst = spilledTmp->getInstruction();
      IRInstruction* newInst = NULL;
      if (spilledInst->isRematerializable() ||
          (spilledInst->getOpcode() == LdStack &&
           spilledInst->getSrc(0) == curSp)) {
        // XXX: could change <newInst> to the non-check version.
        // Rematerialize those rematerializable instructions (i.e.,
        // isRematerializable returns true) and LdStack.
        newInst = spilledInst->clone(m_irFactory);
        // The new instruction needn't have an exit label, because it is always
        // dominated by the original instruction.
        newInst->setLabel(NULL);
      } else {
        // Rematerialize LdLoc.
        std::vector<SSATmp*>::iterator pos =
          std::find(localValues.begin(),
                    localValues.end(),
                    canonicalize(spilledTmp));
        // Search for a local that stores the value of <spilledTmp>.
        if (pos != localValues.end()) {
          size_t locId = pos - localValues.begin();
          assert(curFp != NULL);
          ConstInstruction constInst(curFp, Local(locId));
          IRInstruction* ldHomeInst =
            m_irFactory->cloneInstruction(&constInst);
          newInst = m_irFactory->gen(LdLoc,
                                     dst->getType(),
                                     m_irFactory->getSSATmp(ldHomeInst));
        }
      }
      if (newInst) {
        UNUSED Type::Tag oldType = dst->getType();
        newInst->setDst(dst);
        dst->setInstruction(newInst);
        assert(outputType(newInst) == oldType);
        *it = newInst;
        newInst->setParent(trace);
      }
    }

    // Updating <curSp> and <localValues>.
    if (dst && dst->getReg() == rVmSp) {
      // <inst> modifies the stack pointer.
      curSp = dst;
    }
    if (opc == LdLoc || opc == StLoc || opc == StLocNT) {
      // dst = LdLoc home
      // StLoc/StLocNT home, src
      int locId = getLocalIdFromHomeOpnd(inst->getSrc(0));
      // Note that when we implement inlining, we will need to deal
      // with the new local id space of the inlined function.
      SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1));
      if (int(localValues.size()) < locId + 1) {
        localValues.resize(locId + 1);
      }
      localValues[locId] = canonicalize(localValue);
    }
    // Other instructions that may have side effects on locals must
    // kill the local variable values.
    else if (opc == IterInit) {
      int valLocId = inst->getSrc(3)->getConstValAsInt();
      localValues[valLocId] = NULL;
      if (inst->getNumSrcs() == 5) {
        int keyLocId = inst->getSrc(4)->getConstValAsInt();
        localValues[keyLocId] = NULL;
      }
    } else if (opc == IterNext) {
      int valLocId = inst->getSrc(2)->getConstValAsInt();
      localValues[valLocId] = NULL;
      if (inst->getNumSrcs() == 4) {
        int keyLocId = inst->getSrc(3)->getConstValAsInt();
        localValues[keyLocId] = NULL;
      }
    }

    if (inst->isControlFlowInstruction()) {
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        rematerializeAux(label->getParent(), curSp, curFp, localValues);
      }
    }
  }
}
Example #4
0
// Sink IncRefs consumed off trace.
// When <trace> is an exit trace, <toSink> contains all live IncRefs in the
// main trace that are consumed off trace.
void sinkIncRefs(Trace* trace,
                 IRFactory* irFactory,
                 IRInstruction::List& toSink) {
  IRInstruction::List& instList = trace->getInstructionList();
  IRInstruction::Iterator it;

  std::map<SSATmp*, SSATmp*> sunkTmps;
  if (!trace->isMain()) {
    // Sink REFCOUNT_CONSUMED_OFF_TRACE IncRefs before the first non-label
    // instruction, and create a mapping between the original tmps to the sunk
    // tmps so that we can later replace the original ones with the sunk ones.
    for (IRInstruction::ReverseIterator j = toSink.rbegin();
         j != toSink.rend();
         ++j) {
      // prependInstruction inserts an instruction to the beginning. Therefore,
      // we iterate through toSink in the reversed order.
      IRInstruction* sunkInst = irFactory->incRef((*j)->getSrc(0));
      sunkInst->setId(LIVE);
      trace->prependInstruction(sunkInst);

      ASSERT((*j)->getDst());
      ASSERT(!sunkTmps.count((*j)->getDst()));
      sunkTmps[(*j)->getDst()] = irFactory->getSSATmp(sunkInst);
    }
  }

  // An exit trace may be entered from multiple exit points. We keep track of
  // which exit traces we already pushed sunk IncRefs to, so that we won't push
  // them multiple times.
  std::set<Trace*> pushedTo;
  for (it = instList.begin(); it != instList.end(); ++it) {
    IRInstruction* inst = *it;
    if (trace->isMain()) {
      if (inst->getOpcode() == IncRef) {
        // Must be REFCOUNT_CONSUMED or REFCOUNT_CONSUMED_OFF_TRACE;
        // otherwise, it should be already removed in optimizeRefCount.
        ASSERT(inst->getId() == REFCOUNT_CONSUMED ||
               inst->getId() == REFCOUNT_CONSUMED_OFF_TRACE);
        if (inst->getId() == REFCOUNT_CONSUMED_OFF_TRACE) {
          inst->setOpcode(Mov);
          // Mark them as dead so that they'll be removed later.
          inst->setId(DEAD);
          // Put all REFCOUNT_CONSUMED_OFF_TRACE IncRefs to the sinking list.
          toSink.push_back(inst);
        }
      }
      if (inst->getOpcode() == DecRefNZ) {
        IRInstruction* srcInst = inst->getSrc(0)->getInstruction();
        if (srcInst->getId() == DEAD) {
          inst->setId(DEAD);
          // This may take O(I) time where I is the number of IncRefs
          // in the main trace.
          toSink.remove(srcInst);
        }
      }
      if (LabelInstruction* label = inst->getLabel()) {
        Trace* exitTrace = label->getTrace();
        if (!pushedTo.count(exitTrace)) {
          pushedTo.insert(exitTrace);
          sinkIncRefs(exitTrace, irFactory, toSink);
        }
      }
    } else {
      // Replace the original tmps with the sunk tmps.
      for (uint32 i = 0; i < inst->getNumSrcs(); ++i) {
        SSATmp* src = inst->getSrc(i);
        if (SSATmp* sunkTmp = sunkTmps[src]) {
          inst->setSrc(i, sunkTmp);
        }
      }
    }
  }

  // Do copyProp at last, because we need to keep REFCOUNT_CONSUMED_OFF_TRACE
  // Movs as the prototypes for sunk instructions.
  for (it = instList.begin(); it != instList.end(); ++it) {
    Simplifier::copyProp(*it);
  }
}
Example #5
0
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) {
  IRInstruction::List wl; // worklist of live instructions
  Trace::List& exitTraces = trace->getExitTraces();
  // first mark all exit traces as unreachable by setting the id on
  // their labels to 0
  for (Trace::Iterator it = exitTraces.begin();
       it != exitTraces.end();
       it++) {
    Trace* trace = *it;
    trace->getLabel()->setId(DEAD);
  }

  // mark the essential instructions and add them to the initial
  // work list; also mark the exit traces that are reachable by
  // any control flow instruction in the main trace.
  initInstructions(trace, wl);
  for (Trace::Iterator it = exitTraces.begin();
       it != exitTraces.end();
       it++) {
    // only process those exit traces that are reachable from
    // the main trace
    Trace* trace = *it;
    if (trace->getLabel()->getId() != DEAD) {
      initInstructions(trace, wl);
    }
  }

  // process the worklist
  while (!wl.empty()) {
    IRInstruction* inst = wl.front();
    wl.pop_front();
    for (uint32 i = 0; i < inst->getNumSrcs(); i++) {
      SSATmp* src = inst->getSrc(i);
      if (src->getInstruction()->isDefConst()) {
        continue;
      }
      IRInstruction* srcInst = src->getInstruction();
      if (srcInst->getId() == DEAD) {
        srcInst->setId(LIVE);
        wl.push_back(srcInst);
      }
      // <inst> consumes <srcInst> which is an IncRef,
      // so we mark <srcInst> as REFCOUNT_CONSUMED.
      if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) {
        if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) {
          // <srcInst> is consumed from its own trace.
          srcInst->setId(REFCOUNT_CONSUMED);
        } else {
          // <srcInst> is consumed off trace.
          if (srcInst->getId() != REFCOUNT_CONSUMED) {
            // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is
            // also consumed from its own trace.
            srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE);
          }
        }
      }
    }
  }

  // Optimize IncRefs and DecRefs.
  optimizeRefCount(trace);
  for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) {
    optimizeRefCount(*it);
  }

  if (RuntimeOption::EvalHHIREnableSinking) {
    // Sink IncRefs consumed off trace.
    IRInstruction::List toSink;
    sinkIncRefs(trace, irFactory, toSink);
  }

  // now remove instructions whose id == DEAD
  removeDeadInstructions(trace);
  for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) {
    removeDeadInstructions(*it);
  }
}