Esempio n. 1
0
void LoadAndStorePromoter::
run(const SmallVectorImpl<Instruction*> &Insts) const {
  
  // First step: bucket up uses of the alloca by the block they occur in.
  // This is important because we have to handle multiple defs/uses in a block
  // ourselves: SSAUpdater is purely for cross-block references.
  DenseMap<BasicBlock*, TinyPtrVector<Instruction*> > UsesByBlock;
  
  for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
    Instruction *User = Insts[i];
    UsesByBlock[User->getParent()].push_back(User);
  }
  
  // Okay, now we can iterate over all the blocks in the function with uses,
  // processing them.  Keep track of which loads are loading a live-in value.
  // Walk the uses in the use-list order to be determinstic.
  SmallVector<LoadInst*, 32> LiveInLoads;
  DenseMap<Value*, Value*> ReplacedLoads;
  
  for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
    Instruction *User = Insts[i];
    BasicBlock *BB = User->getParent();
    TinyPtrVector<Instruction*> &BlockUses = UsesByBlock[BB];
    
    // If this block has already been processed, ignore this repeat use.
    if (BlockUses.empty()) continue;
    
    // Okay, this is the first use in the block.  If this block just has a
    // single user in it, we can rewrite it trivially.
    if (BlockUses.size() == 1) {
      // If it is a store, it is a trivial def of the value in the block.
      if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
        updateDebugInfo(SI);
        SSA.AddAvailableValue(BB, SI->getOperand(0));
      } else 
        // Otherwise it is a load, queue it to rewrite as a live-in load.
        LiveInLoads.push_back(cast<LoadInst>(User));
      BlockUses.clear();
      continue;
    }
    
    // Otherwise, check to see if this block is all loads.
    bool HasStore = false;
    for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) {
      if (isa<StoreInst>(BlockUses[i])) {
        HasStore = true;
        break;
      }
    }
    
    // If so, we can queue them all as live in loads.  We don't have an
    // efficient way to tell which on is first in the block and don't want to
    // scan large blocks, so just add all loads as live ins.
    if (!HasStore) {
      for (unsigned i = 0, e = BlockUses.size(); i != e; ++i)
        LiveInLoads.push_back(cast<LoadInst>(BlockUses[i]));
      BlockUses.clear();
      continue;
    }
    
    // Otherwise, we have mixed loads and stores (or just a bunch of stores).
    // Since SSAUpdater is purely for cross-block values, we need to determine
    // the order of these instructions in the block.  If the first use in the
    // block is a load, then it uses the live in value.  The last store defines
    // the live out value.  We handle this by doing a linear scan of the block.
    Value *StoredValue = 0;
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      if (LoadInst *L = dyn_cast<LoadInst>(II)) {
        // If this is a load from an unrelated pointer, ignore it.
        if (!isInstInList(L, Insts)) continue;
        
        // If we haven't seen a store yet, this is a live in use, otherwise
        // use the stored value.
        if (StoredValue) {
          replaceLoadWithValue(L, StoredValue);
          L->replaceAllUsesWith(StoredValue);
          ReplacedLoads[L] = StoredValue;
        } else {
          LiveInLoads.push_back(L);
        }
        continue;
      }
      
      if (StoreInst *SI = dyn_cast<StoreInst>(II)) {
        // If this is a store to an unrelated pointer, ignore it.
        if (!isInstInList(SI, Insts)) continue;
        updateDebugInfo(SI);

        // Remember that this is the active value in the block.
        StoredValue = SI->getOperand(0);
      }
    }
    
    // The last stored value that happened is the live-out for the block.
    assert(StoredValue && "Already checked that there is a store in block");
    SSA.AddAvailableValue(BB, StoredValue);
    BlockUses.clear();
  }
  
  // Okay, now we rewrite all loads that use live-in values in the loop,
  // inserting PHI nodes as necessary.
  for (unsigned i = 0, e = LiveInLoads.size(); i != e; ++i) {
    LoadInst *ALoad = LiveInLoads[i];
    Value *NewVal = SSA.GetValueInMiddleOfBlock(ALoad->getParent());
    replaceLoadWithValue(ALoad, NewVal);

    // Avoid assertions in unreachable code.
    if (NewVal == ALoad) NewVal = UndefValue::get(NewVal->getType());
    ALoad->replaceAllUsesWith(NewVal);
    ReplacedLoads[ALoad] = NewVal;
  }
  
  // Allow the client to do stuff before we start nuking things.
  doExtraRewritesBeforeFinalDeletion();
  
  // Now that everything is rewritten, delete the old instructions from the
  // function.  They should all be dead now.
  for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
    Instruction *User = Insts[i];
    
    // If this is a load that still has uses, then the load must have been added
    // as a live value in the SSAUpdate data structure for a block (e.g. because
    // the loaded value was stored later).  In this case, we need to recursively
    // propagate the updates until we get to the real value.
    if (!User->use_empty()) {
      Value *NewVal = ReplacedLoads[User];
      assert(NewVal && "not a replaced load?");
      
      // Propagate down to the ultimate replacee.  The intermediately loads
      // could theoretically already have been deleted, so we don't want to
      // dereference the Value*'s.
      DenseMap<Value*, Value*>::iterator RLI = ReplacedLoads.find(NewVal);
      while (RLI != ReplacedLoads.end()) {
        NewVal = RLI->second;
        RLI = ReplacedLoads.find(NewVal);
      }
      
      replaceLoadWithValue(cast<LoadInst>(User), NewVal);
      User->replaceAllUsesWith(NewVal);
    }
    
    instructionDeleted(User);
    User->eraseFromParent();
  }
}
Esempio n. 2
0
MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
    MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
    SmallVectorImpl<MachineOperand> &Cond, bool AllowModify,
    SmallVectorImpl<MachineInstr *> &BranchInstrs) const {

  MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend();

  // Skip all the debug instructions.
  while (I != REnd && I->isDebugValue())
    ++I;

  if (I == REnd || !isUnpredicatedTerminator(*I)) {
    // This block ends with no branches (it just falls through to its succ).
    // Leave TBB/FBB null.
    TBB = FBB = nullptr;
    return BT_NoBranch;
  }

  MachineInstr *LastInst = &*I;
  unsigned LastOpc = LastInst->getOpcode();
  BranchInstrs.push_back(LastInst);

  // Not an analyzable branch (e.g., indirect jump).
  if (!getAnalyzableBrOpc(LastOpc))
    return LastInst->isIndirectBranch() ? BT_Indirect : BT_None;

  // Get the second to last instruction in the block.
  unsigned SecondLastOpc = 0;
  MachineInstr *SecondLastInst = nullptr;

  if (++I != REnd) {
    SecondLastInst = &*I;
    SecondLastOpc = getAnalyzableBrOpc(SecondLastInst->getOpcode());

    // Not an analyzable branch (must be an indirect jump).
    if (isUnpredicatedTerminator(*SecondLastInst) && !SecondLastOpc)
      return BT_None;
  }

  // If there is only one terminator instruction, process it.
  if (!SecondLastOpc) {
    // Unconditional branch.
    if (LastInst->isUnconditionalBranch()) {
      TBB = LastInst->getOperand(0).getMBB();
      return BT_Uncond;
    }

    // Conditional branch
    AnalyzeCondBr(LastInst, LastOpc, TBB, Cond);
    return BT_Cond;
  }

  // If we reached here, there are two branches.
  // If there are three terminators, we don't know what sort of block this is.
  if (++I != REnd && isUnpredicatedTerminator(*I))
    return BT_None;

  BranchInstrs.insert(BranchInstrs.begin(), SecondLastInst);

  // If second to last instruction is an unconditional branch,
  // analyze it and remove the last instruction.
  if (SecondLastInst->isUnconditionalBranch()) {
    // Return if the last instruction cannot be removed.
    if (!AllowModify)
      return BT_None;

    TBB = SecondLastInst->getOperand(0).getMBB();
    LastInst->eraseFromParent();
    BranchInstrs.pop_back();
    return BT_Uncond;
  }

  // Conditional branch followed by an unconditional branch.
  // The last one must be unconditional.
  if (!LastInst->isUnconditionalBranch())
    return BT_None;

  AnalyzeCondBr(SecondLastInst, SecondLastOpc, TBB, Cond);
  FBB = LastInst->getOperand(0).getMBB();

  return BT_CondUncond;
}
Esempio n. 3
0
void MatcherGen::
EmitResultInstructionAsOperand(const TreePatternNode *N,
                               SmallVectorImpl<unsigned> &OutputOps) {
  Record *Op = N->getOperator();
  const CodeGenTarget &CGT = CGP.getTargetInfo();
  CodeGenInstruction &II = CGT.getInstruction(Op);
  const DAGInstruction &Inst = CGP.getInstruction(Op);

  // If we can, get the pattern for the instruction we're generating. We derive
  // a variety of information from this pattern, such as whether it has a chain.
  //
  // FIXME2: This is extremely dubious for several reasons, not the least of
  // which it gives special status to instructions with patterns that Pat<>
  // nodes can't duplicate.
  const TreePatternNode *InstPatNode = GetInstPatternNode(Inst, N);

  // NodeHasChain - Whether the instruction node we're creating takes chains.
  bool NodeHasChain = InstPatNode &&
                      InstPatNode->TreeHasProperty(SDNPHasChain, CGP);

  // Instructions which load and store from memory should have a chain,
  // regardless of whether they happen to have an internal pattern saying so.
  if (Pattern.getSrcPattern()->TreeHasProperty(SDNPHasChain, CGP)
      && (II.hasCtrlDep || II.mayLoad || II.mayStore || II.canFoldAsLoad ||
          II.hasSideEffects))
      NodeHasChain = true;

  bool isRoot = N == Pattern.getDstPattern();

  // TreeHasOutGlue - True if this tree has glue.
  bool TreeHasInGlue = false, TreeHasOutGlue = false;
  if (isRoot) {
    const TreePatternNode *SrcPat = Pattern.getSrcPattern();
    TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
                    SrcPat->TreeHasProperty(SDNPInGlue, CGP);

    // FIXME2: this is checking the entire pattern, not just the node in
    // question, doing this just for the root seems like a total hack.
    TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
  }

  // NumResults - This is the number of results produced by the instruction in
  // the "outs" list.
  unsigned NumResults = Inst.getNumResults();

  // Number of operands we know the output instruction must have. If it is
  // variadic, we could have more operands.
  unsigned NumFixedOperands = II.Operands.size();

  SmallVector<unsigned, 8> InstOps;

  // Loop over all of the fixed operands of the instruction pattern, emitting
  // code to fill them all in. The node 'N' usually has number children equal to
  // the number of input operands of the instruction.  However, in cases where
  // there are predicate operands for an instruction, we need to fill in the
  // 'execute always' values. Match up the node operands to the instruction
  // operands to do this.
  unsigned ChildNo = 0;
  for (unsigned InstOpNo = NumResults, e = NumFixedOperands;
       InstOpNo != e; ++InstOpNo) {
    // Determine what to emit for this operand.
    Record *OperandNode = II.Operands[InstOpNo].Rec;
    if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
        !CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
      // This is a predicate or optional def operand; emit the
      // 'default ops' operands.
      const DAGDefaultOperand &DefaultOp
        = CGP.getDefaultOperand(OperandNode);
      for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
        EmitResultOperand(DefaultOp.DefaultOps[i], InstOps);
      continue;
    }

    // Otherwise this is a normal operand or a predicate operand without
    // 'execute always'; emit it.

    // For operands with multiple sub-operands we may need to emit
    // multiple child patterns to cover them all.  However, ComplexPattern
    // children may themselves emit multiple MI operands.
    unsigned NumSubOps = 1;
    if (OperandNode->isSubClassOf("Operand")) {
      DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
      if (unsigned NumArgs = MIOpInfo->getNumArgs())
        NumSubOps = NumArgs;
    }

    unsigned FinalNumOps = InstOps.size() + NumSubOps;
    while (InstOps.size() < FinalNumOps) {
      const TreePatternNode *Child = N->getChild(ChildNo);
      unsigned BeforeAddingNumOps = InstOps.size();
      EmitResultOperand(Child, InstOps);
      assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");

      // If the operand is an instruction and it produced multiple results, just
      // take the first one.
      if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
        InstOps.resize(BeforeAddingNumOps+1);

      ++ChildNo;
    }
  }

  // If this is a variadic output instruction (i.e. REG_SEQUENCE), we can't
  // expand suboperands, use default operands, or other features determined from
  // the CodeGenInstruction after the fixed operands, which were handled
  // above. Emit the remaining instructions implicitly added by the use for
  // variable_ops.
  if (II.Operands.isVariadic) {
    for (unsigned I = ChildNo, E = N->getNumChildren(); I < E; ++I)
      EmitResultOperand(N->getChild(I), InstOps);
  }

  // If this node has input glue or explicitly specified input physregs, we
  // need to add chained and glued copyfromreg nodes and materialize the glue
  // input.
  if (isRoot && !PhysRegInputs.empty()) {
    // Emit all of the CopyToReg nodes for the input physical registers.  These
    // occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
    for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
      AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
                                          PhysRegInputs[i].first));
    // Even if the node has no other glue inputs, the resultant node must be
    // glued to the CopyFromReg nodes we just generated.
    TreeHasInGlue = true;
  }

  // Result order: node results, chain, glue

  // Determine the result types.
  SmallVector<MVT::SimpleValueType, 4> ResultVTs;
  for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
    ResultVTs.push_back(N->getType(i));

  // If this is the root instruction of a pattern that has physical registers in
  // its result pattern, add output VTs for them.  For example, X86 has:
  //   (set AL, (mul ...))
  // This also handles implicit results like:
  //   (implicit EFLAGS)
  if (isRoot && !Pattern.getDstRegs().empty()) {
    // If the root came from an implicit def in the instruction handling stuff,
    // don't re-add it.
    Record *HandledReg = nullptr;
    if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
      HandledReg = II.ImplicitDefs[0];

    for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
      Record *Reg = Pattern.getDstRegs()[i];
      if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
      ResultVTs.push_back(getRegisterValueType(Reg, CGT));
    }
  }

  // If this is the root of the pattern and the pattern we're matching includes
  // a node that is variadic, mark the generated node as variadic so that it
  // gets the excess operands from the input DAG.
  int NumFixedArityOperands = -1;
  if (isRoot &&
      Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP))
    NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();

  // If this is the root node and multiple matched nodes in the input pattern
  // have MemRefs in them, have the interpreter collect them and plop them onto
  // this node. If there is just one node with MemRefs, leave them on that node
  // even if it is not the root.
  //
  // FIXME3: This is actively incorrect for result patterns with multiple
  // memory-referencing instructions.
  bool PatternHasMemOperands =
    Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);

  bool NodeHasMemRefs = false;
  if (PatternHasMemOperands) {
    unsigned NumNodesThatLoadOrStore =
      numNodesThatMayLoadOrStore(Pattern.getDstPattern(), CGP);
    bool NodeIsUniqueLoadOrStore = mayInstNodeLoadOrStore(N, CGP) &&
                                   NumNodesThatLoadOrStore == 1;
    NodeHasMemRefs =
      NodeIsUniqueLoadOrStore || (isRoot && (mayInstNodeLoadOrStore(N, CGP) ||
                                             NumNodesThatLoadOrStore != 1));
  }

  assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
         "Node has no result");

  AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName(),
                                 ResultVTs, InstOps,
                                 NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
                                 NodeHasMemRefs, NumFixedArityOperands,
                                 NextRecordedOperandNo));

  // The non-chain and non-glue results of the newly emitted node get recorded.
  for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
    if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
    OutputOps.push_back(NextRecordedOperandNo++);
  }
}
Esempio n. 4
0
static void
updateSSAForUseOfInst(SILSSAUpdater &Updater,
                      SmallVectorImpl<SILArgument*> &InsertedPHIs,
                      const llvm::DenseMap<ValueBase *, SILValue> &ValueMap,
                      SILBasicBlock *Header, SILBasicBlock *EntryCheckBlock,
                      ValueBase *Inst) {
  if (Inst->use_empty())
    return;

  // Find the mapped instruction.
  assert(ValueMap.count(Inst) && "Expected to find value in map!");
  SILValue MappedValue = ValueMap.find(Inst)->second;
  auto *MappedInst = MappedValue.getDef();
  assert(MappedValue);
  assert(MappedInst);

  // For each use of a specific result value of the instruction.
  for (unsigned i = 0, e = Inst->getNumTypes(); i != e; ++i) {
    SILValue Res(Inst, i);
    // For block arguments, MappedValue is already indexed to indicate the
    // single result value that feeds the argument. In this case, i==0 because
    // SILArgument only produces one value.
    SILValue MappedRes =
        isa<SILArgument>(Inst) ? MappedValue : SILValue(MappedInst, i);
    assert(Res.getType() == MappedRes.getType() && "The types must match");

    InsertedPHIs.clear();
    Updater.Initialize(Res.getType());
    Updater.AddAvailableValue(Header, Res);
    Updater.AddAvailableValue(EntryCheckBlock, MappedRes);


    // Because of the way that phi nodes are represented we have to collect all
    // uses before we update SSA. Modifying one phi node can invalidate another
    // unrelated phi nodes operands through the common branch instruction (that
    // has to be modified). This would invalidate a plain ValueUseIterator.
    // Instead we collect uses wrapping uses in branches specially so that we
    // can reconstruct the use even after the branch has been modified.
    SmallVector<UseWrapper, 8> StoredUses;
    for (auto *U : Res.getUses())
      StoredUses.push_back(UseWrapper(U));
    for (auto U : StoredUses) {
      Operand *Use = U;
      SILInstruction *User = Use->getUser();
      assert(User && "Missing user");

      // Ignore uses in the same basic block.
      if (User->getParent() == Header)
        continue;

      assert(User->getParent() != EntryCheckBlock &&
             "The entry check block should dominate the header");
      Updater.RewriteUse(*Use);
    }
    // Canonicalize inserted phis to avoid extra BB Args.
    for (SILArgument *Arg : InsertedPHIs) {
      if (SILInstruction *Inst = replaceBBArgWithCast(Arg)) {
        Arg->replaceAllUsesWith(Inst);
        // DCE+SimplifyCFG runs as a post-pass cleanup.
        // DCE replaces dead arg values with undef.
        // SimplifyCFG deletes the dead BB arg.
      }
    }
  }
}
Esempio n. 5
0
void Mips16TargetLowering::
getOpndList(SmallVectorImpl<SDValue> &Ops,
            std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
            bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
            CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
  SelectionDAG &DAG = CLI.DAG;
  MachineFunction &MF = DAG.getMachineFunction();
  MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
  const char* Mips16HelperFunction = nullptr;
  bool NeedMips16Helper = false;

  if (Subtarget.inMips16HardFloat()) {
    //
    // currently we don't have symbols tagged with the mips16 or mips32
    // qualifier so we will assume that we don't know what kind it is.
    // and generate the helper
    //
    bool LookupHelper = true;
    if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) {
      Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, S->getSymbol() };

      if (std::binary_search(std::begin(HardFloatLibCalls),
                             std::end(HardFloatLibCalls), Find))
        LookupHelper = false;
      else {
        const char *Symbol = S->getSymbol();
        Mips16IntrinsicHelperType IntrinsicFind = { Symbol, "" };
        const Mips16HardFloatInfo::FuncSignature *Signature =
            Mips16HardFloatInfo::findFuncSignature(Symbol);
        if (!IsPICCall && (Signature && (FuncInfo->StubsNeeded.find(Symbol) ==
                                         FuncInfo->StubsNeeded.end()))) {
          FuncInfo->StubsNeeded[Symbol] = Signature;
          //
          // S2 is normally saved if the stub is for a function which
          // returns a float or double value and is not otherwise. This is
          // because more work is required after the function the stub
          // is calling completes, and so the stub cannot directly return
          // and the stub has no stack space to store the return address so
          // S2 is used for that purpose.
          // In order to take advantage of not saving S2, we need to also
          // optimize the call in the stub and this requires some further
          // functionality in MipsAsmPrinter which we don't have yet.
          // So for now we always save S2. The optimization will be done
          // in a follow-on patch.
          //
          if (1 || (Signature->RetSig != Mips16HardFloatInfo::NoFPRet))
            FuncInfo->setSaveS2();
        }
        // one more look at list of intrinsics
        const Mips16IntrinsicHelperType *Helper =
            std::lower_bound(std::begin(Mips16IntrinsicHelper),
                             std::end(Mips16IntrinsicHelper), IntrinsicFind);
        if (Helper != std::end(Mips16IntrinsicHelper) &&
            *Helper == IntrinsicFind) {
          Mips16HelperFunction = Helper->Helper;
          NeedMips16Helper = true;
          LookupHelper = false;
        }

      }
    } else if (GlobalAddressSDNode *G =
                   dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
      Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL,
                             G->getGlobal()->getName().data() };

      if (std::binary_search(std::begin(HardFloatLibCalls),
                             std::end(HardFloatLibCalls), Find))
        LookupHelper = false;
    }
    if (LookupHelper)
      Mips16HelperFunction =
        getMips16HelperFunction(CLI.RetTy, CLI.getArgs(), NeedMips16Helper);
  }

  SDValue JumpTarget = Callee;

  // T9 should contain the address of the callee function if
  // -reloction-model=pic or it is an indirect call.
  if (IsPICCall || !GlobalOrExternal) {
    unsigned V0Reg = Mips::V0;
    if (NeedMips16Helper) {
      RegsToPass.push_front(std::make_pair(V0Reg, Callee));
      JumpTarget = DAG.getExternalSymbol(Mips16HelperFunction, getPointerTy());
      ExternalSymbolSDNode *S = cast<ExternalSymbolSDNode>(JumpTarget);
      JumpTarget = getAddrGlobal(S, JumpTarget.getValueType(), DAG,
                                 MipsII::MO_GOT, Chain,
                                 FuncInfo->callPtrInfo(S->getSymbol()));
    } else
      RegsToPass.push_front(std::make_pair((unsigned)Mips::T9, Callee));
  }

  Ops.push_back(JumpTarget);

  MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
                                  InternalLinkage, CLI, Callee, Chain);
}
Esempio n. 6
0
/// \brief Emit a code snippet and caret line.
///
/// This routine emits a single line's code snippet and caret line..
///
/// \param Loc The location for the caret.
/// \param Ranges The underlined ranges for this code snippet.
/// \param Hints The FixIt hints active for this diagnostic.
void TextDiagnostic::emitSnippetAndCaret(
    SourceLocation Loc, DiagnosticsEngine::Level Level,
    SmallVectorImpl<CharSourceRange>& Ranges,
    ArrayRef<FixItHint> Hints,
    const SourceManager &SM) {
  assert(!Loc.isInvalid() && "must have a valid source location here");
  assert(Loc.isFileID() && "must have a file location here");

  // If caret diagnostics are enabled and we have location, we want to
  // emit the caret.  However, we only do this if the location moved
  // from the last diagnostic, if the last diagnostic was a note that
  // was part of a different warning or error diagnostic, or if the
  // diagnostic has ranges.  We don't want to emit the same caret
  // multiple times if one loc has multiple diagnostics.
  if (!DiagOpts->ShowCarets)
    return;
  if (Loc == LastLoc && Ranges.empty() && Hints.empty() &&
      (LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
    return;

  // Decompose the location into a FID/Offset pair.
  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
  FileID FID = LocInfo.first;
  unsigned FileOffset = LocInfo.second;

  // Get information about the buffer it points into.
  bool Invalid = false;
  const char *BufStart = SM.getBufferData(FID, &Invalid).data();
  if (Invalid)
    return;

  unsigned LineNo = SM.getLineNumber(FID, FileOffset);
  unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
  
  // Arbitrarily stop showing snippets when the line is too long.
  static const ptrdiff_t MaxLineLengthToPrint = 4096;
  if (ColNo > MaxLineLengthToPrint)
    return;

  // Rewind from the current position to the start of the line.
  const char *TokPtr = BufStart+FileOffset;
  const char *LineStart = TokPtr-ColNo+1; // Column # is 1-based.

  // Compute the line end.  Scan forward from the error position to the end of
  // the line.
  const char *LineEnd = TokPtr;
  while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
    ++LineEnd;

  // Arbitrarily stop showing snippets when the line is too long.
  if (LineEnd - LineStart > MaxLineLengthToPrint)
    return;

  // Copy the line of code into an std::string for ease of manipulation.
  std::string SourceLine(LineStart, LineEnd);

  // Create a line for the caret that is filled with spaces that is the same
  // length as the line of source code.
  std::string CaretLine(LineEnd-LineStart, ' ');

  const SourceColumnMap sourceColMap(SourceLine, DiagOpts->TabStop);

  // Highlight all of the characters covered by Ranges with ~ characters.
  for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
                                                  E = Ranges.end();
       I != E; ++I)
    highlightRange(*I, LineNo, FID, sourceColMap, CaretLine, SM, LangOpts);

  // Next, insert the caret itself.
  ColNo = sourceColMap.byteToContainingColumn(ColNo-1);
  if (CaretLine.size()<ColNo+1)
    CaretLine.resize(ColNo+1, ' ');
  CaretLine[ColNo] = '^';

  std::string FixItInsertionLine = buildFixItInsertionLine(LineNo,
                                                           sourceColMap,
                                                           Hints, SM,
                                                           DiagOpts.getPtr());

  // If the source line is too long for our terminal, select only the
  // "interesting" source region within that line.
  unsigned Columns = DiagOpts->MessageLength;
  if (Columns)
    selectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
                                  Columns, sourceColMap);

  // If we are in -fdiagnostics-print-source-range-info mode, we are trying
  // to produce easily machine parsable output.  Add a space before the
  // source line and the caret to make it trivial to tell the main diagnostic
  // line from what the user is intended to see.
  if (DiagOpts->ShowSourceRanges) {
    SourceLine = ' ' + SourceLine;
    CaretLine = ' ' + CaretLine;
  }

  // Finally, remove any blank spaces from the end of CaretLine.
  while (CaretLine[CaretLine.size()-1] == ' ')
    CaretLine.erase(CaretLine.end()-1);

  // Emit what we have computed.
  emitSnippet(SourceLine);

  if (DiagOpts->ShowColors)
    OS.changeColor(caretColor, true);
  OS << CaretLine << '\n';
  if (DiagOpts->ShowColors)
    OS.resetColor();

  if (!FixItInsertionLine.empty()) {
    if (DiagOpts->ShowColors)
      // Print fixit line in color
      OS.changeColor(fixitColor, false);
    if (DiagOpts->ShowSourceRanges)
      OS << ' ';
    OS << FixItInsertionLine << '\n';
    if (DiagOpts->ShowColors)
      OS.resetColor();
  }

  // Print out any parseable fixit information requested by the options.
  emitParseableFixits(Hints, SM);
}
Esempio n. 7
0
/// HandleIntegerSModifier - Handle the integer 's' modifier.  This adds the
/// letter 's' to the string if the value is not 1.  This is used in cases like
/// this:  "you idiot, you have %4 parameter%s4!".
static void HandleIntegerSModifier(unsigned ValNo,
                                   SmallVectorImpl<char> &OutStr) {
  if (ValNo != 1)
    OutStr.push_back('s');
}
Esempio n. 8
0
/// Return true if the specified block is in the list.
static bool isExitBlock(BasicBlock *BB,
                        const SmallVectorImpl<BasicBlock *> &ExitBlocks) {
  return find(ExitBlocks, BB) != ExitBlocks.end();
}
Esempio n. 9
0
/// For every instruction from the worklist, check to see if it has any uses
/// that are outside the current loop.  If so, insert LCSSA PHI nodes and
/// rewrite the uses.
bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
                                    DominatorTree &DT, LoopInfo &LI) {
  SmallVector<Use *, 16> UsesToRewrite;
  SmallVector<BasicBlock *, 8> ExitBlocks;
  SmallSetVector<PHINode *, 16> PHIsToRemove;
  PredIteratorCache PredCache;
  bool Changed = false;

  while (!Worklist.empty()) {
    UsesToRewrite.clear();
    ExitBlocks.clear();

    Instruction *I = Worklist.pop_back_val();
    BasicBlock *InstBB = I->getParent();
    Loop *L = LI.getLoopFor(InstBB);
    L->getExitBlocks(ExitBlocks);

    if (ExitBlocks.empty())
      continue;

    // Tokens cannot be used in PHI nodes, so we skip over them.
    // We can run into tokens which are live out of a loop with catchswitch
    // instructions in Windows EH if the catchswitch has one catchpad which
    // is inside the loop and another which is not.
    if (I->getType()->isTokenTy())
      continue;

    for (Use &U : I->uses()) {
      Instruction *User = cast<Instruction>(U.getUser());
      BasicBlock *UserBB = User->getParent();
      if (PHINode *PN = dyn_cast<PHINode>(User))
        UserBB = PN->getIncomingBlock(U);

      if (InstBB != UserBB && !L->contains(UserBB))
        UsesToRewrite.push_back(&U);
    }

    // If there are no uses outside the loop, exit with no change.
    if (UsesToRewrite.empty())
      continue;

    ++NumLCSSA; // We are applying the transformation

    // Invoke instructions are special in that their result value is not
    // available along their unwind edge. The code below tests to see whether
    // DomBB dominates the value, so adjust DomBB to the normal destination
    // block, which is effectively where the value is first usable.
    BasicBlock *DomBB = InstBB;
    if (InvokeInst *Inv = dyn_cast<InvokeInst>(I))
      DomBB = Inv->getNormalDest();

    DomTreeNode *DomNode = DT.getNode(DomBB);

    SmallVector<PHINode *, 16> AddedPHIs;
    SmallVector<PHINode *, 8> PostProcessPHIs;

    SmallVector<PHINode *, 4> InsertedPHIs;
    SSAUpdater SSAUpdate(&InsertedPHIs);
    SSAUpdate.Initialize(I->getType(), I->getName());

    // Insert the LCSSA phi's into all of the exit blocks dominated by the
    // value, and add them to the Phi's map.
    for (BasicBlock *ExitBB : ExitBlocks) {
      if (!DT.dominates(DomNode, DT.getNode(ExitBB)))
        continue;

      // If we already inserted something for this BB, don't reprocess it.
      if (SSAUpdate.HasValueForBlock(ExitBB))
        continue;

      PHINode *PN = PHINode::Create(I->getType(), PredCache.size(ExitBB),
                                    I->getName() + ".lcssa", &ExitBB->front());

      // Add inputs from inside the loop for this PHI.
      for (BasicBlock *Pred : PredCache.get(ExitBB)) {
        PN->addIncoming(I, Pred);

        // If the exit block has a predecessor not within the loop, arrange for
        // the incoming value use corresponding to that predecessor to be
        // rewritten in terms of a different LCSSA PHI.
        if (!L->contains(Pred))
          UsesToRewrite.push_back(
              &PN->getOperandUse(PN->getOperandNumForIncomingValue(
                  PN->getNumIncomingValues() - 1)));
      }

      AddedPHIs.push_back(PN);

      // Remember that this phi makes the value alive in this block.
      SSAUpdate.AddAvailableValue(ExitBB, PN);

      // LoopSimplify might fail to simplify some loops (e.g. when indirect
      // branches are involved). In such situations, it might happen that an
      // exit for Loop L1 is the header of a disjoint Loop L2. Thus, when we
      // create PHIs in such an exit block, we are also inserting PHIs into L2's
      // header. This could break LCSSA form for L2 because these inserted PHIs
      // can also have uses outside of L2. Remember all PHIs in such situation
      // as to revisit than later on. FIXME: Remove this if indirectbr support
      // into LoopSimplify gets improved.
      if (auto *OtherLoop = LI.getLoopFor(ExitBB))
        if (!L->contains(OtherLoop))
          PostProcessPHIs.push_back(PN);
    }

    // Rewrite all uses outside the loop in terms of the new PHIs we just
    // inserted.
    for (Use *UseToRewrite : UsesToRewrite) {
      // If this use is in an exit block, rewrite to use the newly inserted PHI.
      // This is required for correctness because SSAUpdate doesn't handle uses
      // in the same block.  It assumes the PHI we inserted is at the end of the
      // block.
      Instruction *User = cast<Instruction>(UseToRewrite->getUser());
      BasicBlock *UserBB = User->getParent();
      if (PHINode *PN = dyn_cast<PHINode>(User))
        UserBB = PN->getIncomingBlock(*UseToRewrite);

      if (isa<PHINode>(UserBB->begin()) && isExitBlock(UserBB, ExitBlocks)) {
        // Tell the VHs that the uses changed. This updates SCEV's caches.
        if (UseToRewrite->get()->hasValueHandle())
          ValueHandleBase::ValueIsRAUWd(*UseToRewrite, &UserBB->front());
        UseToRewrite->set(&UserBB->front());
        continue;
      }

      // Otherwise, do full PHI insertion.
      SSAUpdate.RewriteUse(*UseToRewrite);

      // SSAUpdater might have inserted phi-nodes inside other loops. We'll need
      // to post-process them to keep LCSSA form.
      for (PHINode *InsertedPN : InsertedPHIs) {
        if (auto *OtherLoop = LI.getLoopFor(InsertedPN->getParent()))
          if (!L->contains(OtherLoop))
            PostProcessPHIs.push_back(InsertedPN);
      }
    }

    // Post process PHI instructions that were inserted into another disjoint
    // loop and update their exits properly.
    for (auto *PostProcessPN : PostProcessPHIs) {
      if (PostProcessPN->use_empty())
        continue;

      // Reprocess each PHI instruction.
      Worklist.push_back(PostProcessPN);
    }

    // Keep track of PHI nodes that we want to remove because they did not have
    // any uses rewritten.
    for (PHINode *PN : AddedPHIs)
      if (PN->use_empty())
        PHIsToRemove.insert(PN);

    Changed = true;
  }
  // Remove PHI nodes that did not have any uses rewritten.
  for (PHINode *PN : PHIsToRemove) {
    assert (PN->use_empty() && "Trying to remove a phi with uses.");
    PN->eraseFromParent();
  }
  return Changed;
}
Esempio n. 10
0
unsigned BitstreamCursor::readRecord(unsigned AbbrevID,
                                     SmallVectorImpl<uint64_t> &Vals,
                                     StringRef *Blob) {
  if (AbbrevID == bitc::UNABBREV_RECORD) {
    unsigned Code = ReadVBR(6);
    unsigned NumElts = ReadVBR(6);
    for (unsigned i = 0; i != NumElts; ++i)
      Vals.push_back(ReadVBR64(6));
    return Code;
  }

  const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID);

  // Read the record code first.
  assert(Abbv->getNumOperandInfos() != 0 && "no record code in abbreviation?");
  const BitCodeAbbrevOp &CodeOp = Abbv->getOperandInfo(0);
  unsigned Code;
  if (CodeOp.isLiteral())
    Code = CodeOp.getLiteralValue();
  else {
    if (CodeOp.getEncoding() == BitCodeAbbrevOp::Array ||
        CodeOp.getEncoding() == BitCodeAbbrevOp::Blob)
      report_fatal_error("Abbreviation starts with an Array or a Blob");
    Code = readAbbreviatedField(*this, CodeOp);
  }

  for (unsigned i = 1, e = Abbv->getNumOperandInfos(); i != e; ++i) {
    const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i);
    if (Op.isLiteral()) {
      Vals.push_back(Op.getLiteralValue());
      continue;
    }

    if (Op.getEncoding() != BitCodeAbbrevOp::Array &&
        Op.getEncoding() != BitCodeAbbrevOp::Blob) {
      Vals.push_back(readAbbreviatedField(*this, Op));
      continue;
    }

    if (Op.getEncoding() == BitCodeAbbrevOp::Array) {
      // Array case.  Read the number of elements as a vbr6.
      unsigned NumElts = ReadVBR(6);

      // Get the element encoding.
      if (i + 2 != e)
        report_fatal_error("Array op not second to last");
      const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i);
      if (!EltEnc.isEncoding())
        report_fatal_error(
            "Array element type has to be an encoding of a type");

      // Read all the elements.
      switch (EltEnc.getEncoding()) {
      default:
        report_fatal_error("Array element type can't be an Array or a Blob");
      case BitCodeAbbrevOp::Fixed:
        for (; NumElts; --NumElts)
          Vals.push_back(Read((unsigned)EltEnc.getEncodingData()));
        break;
      case BitCodeAbbrevOp::VBR:
        for (; NumElts; --NumElts)
          Vals.push_back(ReadVBR64((unsigned)EltEnc.getEncodingData()));
        break;
      case BitCodeAbbrevOp::Char6:
        for (; NumElts; --NumElts)
          Vals.push_back(BitCodeAbbrevOp::DecodeChar6(Read(6)));
      }
      continue;
    }

    assert(Op.getEncoding() == BitCodeAbbrevOp::Blob);
    // Blob case.  Read the number of bytes as a vbr6.
    unsigned NumElts = ReadVBR(6);
    SkipToFourByteBoundary();  // 32-bit alignment

    // Figure out where the end of this blob will be including tail padding.
    size_t CurBitPos = GetCurrentBitNo();
    size_t NewEnd = CurBitPos+((NumElts+3)&~3)*8;

    // If this would read off the end of the bitcode file, just set the
    // record to empty and return.
    if (!canSkipToPos(NewEnd/8)) {
      Vals.append(NumElts, 0);
      NextChar = BitStream->getBitcodeBytes().getExtent();
      break;
    }

    // Otherwise, inform the streamer that we need these bytes in memory.
    const char *Ptr = (const char*)
      BitStream->getBitcodeBytes().getPointer(CurBitPos/8, NumElts);

    // If we can return a reference to the data, do so to avoid copying it.
    if (Blob) {
      *Blob = StringRef(Ptr, NumElts);
    } else {
      // Otherwise, unpack into Vals with zero extension.
      for (; NumElts; --NumElts)
        Vals.push_back((unsigned char)*Ptr++);
    }
    // Skip over tail padding.
    JumpToBit(NewEnd);
  }

  return Code;
}
Esempio n. 11
0
/// Move the given iterators to the next leaf type in depth first traversal.
///
/// Performs a depth-first traversal of the type as specified by its arguments,
/// stopping at the next leaf node (which may be a legitimate scalar type or an
/// empty struct or array).
///
/// @param SubTypes List of the partial components making up the type from
/// outermost to innermost non-empty aggregate. The element currently
/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
///
/// @param Path Set of extractvalue indices leading from the outermost type
/// (SubTypes[0]) to the leaf node currently represented.
///
/// @returns true if a new type was found, false otherwise. Calling this
/// function again on a finished iterator will repeatedly return
/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
/// aggregate or a non-aggregate
static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
                                  SmallVectorImpl<unsigned> &Path) {
  // First march back up the tree until we can successfully increment one of the
  // coordinates in Path.
  while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
    Path.pop_back();
    SubTypes.pop_back();
  }

  // If we reached the top, then the iterator is done.
  if (Path.empty())
    return false;

  // We know there's *some* valid leaf now, so march back down the tree picking
  // out the left-most element at each node.
  ++Path.back();
  Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
  while (DeeperType->isAggregateType()) {
    CompositeType *CT = cast<CompositeType>(DeeperType);
    if (!indexReallyValid(CT, 0))
      return true;

    SubTypes.push_back(CT);
    Path.push_back(0);

    DeeperType = CT->getTypeAtIndex(0U);
  }

  return true;
}
Esempio n. 12
0
/// getHandlerNames - Populate client-supplied smallvector using custom
/// metadata name and ID.
void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const {
  Names.resize(pImpl->CustomMDKindNames.size());
  for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(),
       E = pImpl->CustomMDKindNames.end(); I != E; ++I)
    Names[I->second] = I->first();
}
Esempio n. 13
0
void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
                                      ArrayRef<unsigned> RegsBeingSpilled) {
  SetVector<LiveInterval*,
            SmallVector<LiveInterval*, 8>,
            SmallPtrSet<LiveInterval*, 8> > ToShrink;

  for (;;) {
    // Erase all dead defs.
    while (!Dead.empty()) {
      MachineInstr *MI = Dead.pop_back_val();
      assert(MI->allDefsAreDead() && "Def isn't really dead");
      SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();

      // Never delete inline asm.
      if (MI->isInlineAsm()) {
        DEBUG(dbgs() << "Won't delete: " << Idx << '\t' << *MI);
        continue;
      }

      // Use the same criteria as DeadMachineInstructionElim.
      bool SawStore = false;
      if (!MI->isSafeToMove(&TII, 0, SawStore)) {
        DEBUG(dbgs() << "Can't delete: " << Idx << '\t' << *MI);
        continue;
      }

      DEBUG(dbgs() << "Deleting dead def " << Idx << '\t' << *MI);

      // Check for live intervals that may shrink
      for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
             MOE = MI->operands_end(); MOI != MOE; ++MOI) {
        if (!MOI->isReg())
          continue;
        unsigned Reg = MOI->getReg();
        if (!TargetRegisterInfo::isVirtualRegister(Reg))
          continue;
        LiveInterval &LI = LIS.getInterval(Reg);

        // Shrink read registers, unless it is likely to be expensive and
        // unlikely to change anything. We typically don't want to shrink the
        // PIC base register that has lots of uses everywhere.
        // Always shrink COPY uses that probably come from live range splitting.
        if (MI->readsVirtualRegister(Reg) &&
            (MI->isCopy() || MOI->isDef() || MRI.hasOneNonDBGUse(Reg) ||
             LI.killedAt(Idx)))
          ToShrink.insert(&LI);

        // Remove defined value.
        if (MOI->isDef()) {
          if (VNInfo *VNI = LI.getVNInfoAt(Idx)) {
            if (TheDelegate)
              TheDelegate->LRE_WillShrinkVirtReg(LI.reg);
            LI.removeValNo(VNI);
            if (LI.empty()) {
              ToShrink.remove(&LI);
              eraseVirtReg(Reg);
            }
          }
        }
      }

      if (TheDelegate)
        TheDelegate->LRE_WillEraseInstruction(MI);
      LIS.RemoveMachineInstrFromMaps(MI);
      MI->eraseFromParent();
      ++NumDCEDeleted;
    }

    if (ToShrink.empty())
      break;

    // Shrink just one live interval. Then delete new dead defs.
    LiveInterval *LI = ToShrink.back();
    ToShrink.pop_back();
    if (foldAsLoad(LI, Dead))
      continue;
    if (TheDelegate)
      TheDelegate->LRE_WillShrinkVirtReg(LI->reg);
    if (!LIS.shrinkToUses(LI, &Dead))
      continue;
    
    // Don't create new intervals for a register being spilled.
    // The new intervals would have to be spilled anyway so its not worth it.
    // Also they currently aren't spilled so creating them and not spilling
    // them results in incorrect code.
    bool BeingSpilled = false;
    for (unsigned i = 0, e = RegsBeingSpilled.size(); i != e; ++i) {
      if (LI->reg == RegsBeingSpilled[i]) {
        BeingSpilled = true;
        break;
      }
    }
    
    if (BeingSpilled) continue;

    // LI may have been separated, create new intervals.
    LI->RenumberValues(LIS);
    ConnectedVNInfoEqClasses ConEQ(LIS);
    unsigned NumComp = ConEQ.Classify(LI);
    if (NumComp <= 1)
      continue;
    ++NumFracRanges;
    bool IsOriginal = VRM && VRM->getOriginal(LI->reg) == LI->reg;
    DEBUG(dbgs() << NumComp << " components: " << *LI << '\n');
    SmallVector<LiveInterval*, 8> Dups(1, LI);
    for (unsigned i = 1; i != NumComp; ++i) {
      Dups.push_back(&createFrom(LI->reg));
      // If LI is an original interval that hasn't been split yet, make the new
      // intervals their own originals instead of referring to LI. The original
      // interval must contain all the split products, and LI doesn't.
      if (IsOriginal)
        VRM->setIsSplitFromReg(Dups.back()->reg, 0);
      if (TheDelegate)
        TheDelegate->LRE_DidCloneVirtReg(Dups.back()->reg, LI->reg);
    }
    ConEQ.Distribute(&Dups[0], MRI);
    DEBUG({
      for (unsigned i = 0; i != NumComp; ++i)
        dbgs() << '\t' << *Dups[i] << '\n';
    });
  }
Esempio n. 14
0
bool
LoadAndStorePromoter::isInstInList(Instruction *I,
                                   const SmallVectorImpl<Instruction*> &Insts)
                                   const {
  return std::find(Insts.begin(), Insts.end(), I) != Insts.end();
}
Esempio n. 15
0
void SILPerformanceInliner::collectAppliesToInline(
    SILFunction *Caller, SmallVectorImpl<FullApplySite> &Applies,
    DominanceAnalysis *DA, SILLoopAnalysis *LA) {
  DominanceInfo *DT = DA->get(Caller);
  SILLoopInfo *LI = LA->get(Caller);

  ConstantTracker constTracker(Caller);
  DominanceOrder domOrder(&Caller->front(), DT, Caller->size());

  unsigned NumCallerBlocks = Caller->size();

  // Go through all instructions and find candidates for inlining.
  // We do this in dominance order for the constTracker.
  SmallVector<FullApplySite, 8> InitialCandidates;
  while (SILBasicBlock *block = domOrder.getNext()) {
    constTracker.beginBlock();
    unsigned loopDepth = LI->getLoopDepth(block);
    for (auto I = block->begin(), E = block->end(); I != E; ++I) {
      constTracker.trackInst(&*I);

      if (!FullApplySite::isa(&*I))
        continue;

      FullApplySite AI = FullApplySite(&*I);

      DEBUG(llvm::dbgs() << "    Check:" << *I);

      auto *Callee = getEligibleFunction(AI);
      if (Callee) {
        if (isProfitableToInline(AI, loopDepth, DA, LA, constTracker,
                                 NumCallerBlocks))
          InitialCandidates.push_back(AI);
      }
    }
    domOrder.pushChildrenIf(block, [&] (SILBasicBlock *child) {
      if (ColdBlockInfo::isSlowPath(block, child)) {
        // Handle cold blocks separately.
        visitColdBlocks(InitialCandidates, child, DT);
        return false;
      }
      return true;
    });
  }

  // Calculate how many times a callee is called from this caller.
  llvm::DenseMap<SILFunction *, unsigned> CalleeCount;
  for (auto AI : InitialCandidates) {
    SILFunction *Callee = AI.getCalleeFunction();
    assert(Callee && "apply_inst does not have a direct callee anymore");
    CalleeCount[Callee]++;
  }

  // Now copy each candidate callee that has a small enough number of
  // call sites into the final set of call sites.
  for (auto AI : InitialCandidates) {
    SILFunction *Callee = AI.getCalleeFunction();
    assert(Callee && "apply_inst does not have a direct callee anymore");

    const unsigned CallsToCalleeThreshold = 1024;
    if (CalleeCount[Callee] <= CallsToCalleeThreshold)
      Applies.push_back(AI);
  }
}
Esempio n. 16
0
/// Attempt the reassociation transformation to reduce critical path length.
/// See the above comments before getMachineCombinerPatterns().
void TargetInstrInfo::reassociateOps(
    MachineInstr &Root, MachineInstr &Prev,
    MachineCombinerPattern Pattern,
    SmallVectorImpl<MachineInstr *> &InsInstrs,
    SmallVectorImpl<MachineInstr *> &DelInstrs,
    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
    MachineFunction *MF = Root.getParent()->getParent();
    MachineRegisterInfo &MRI = MF->getRegInfo();
    const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
    const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
    const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);

    // This array encodes the operand index for each parameter because the
    // operands may be commuted. Each row corresponds to a pattern value,
    // and each column specifies the index of A, B, X, Y.
    unsigned OpIdx[4][4] = {
        { 1, 1, 2, 2 },
        { 1, 2, 2, 1 },
        { 2, 1, 1, 2 },
        { 2, 2, 1, 1 }
    };

    int Row;
    switch (Pattern) {
    case MachineCombinerPattern::REASSOC_AX_BY:
        Row = 0;
        break;
    case MachineCombinerPattern::REASSOC_AX_YB:
        Row = 1;
        break;
    case MachineCombinerPattern::REASSOC_XA_BY:
        Row = 2;
        break;
    case MachineCombinerPattern::REASSOC_XA_YB:
        Row = 3;
        break;
    default:
        llvm_unreachable("unexpected MachineCombinerPattern");
    }

    MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
    MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
    MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
    MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
    MachineOperand &OpC = Root.getOperand(0);

    unsigned RegA = OpA.getReg();
    unsigned RegB = OpB.getReg();
    unsigned RegX = OpX.getReg();
    unsigned RegY = OpY.getReg();
    unsigned RegC = OpC.getReg();

    if (TargetRegisterInfo::isVirtualRegister(RegA))
        MRI.constrainRegClass(RegA, RC);
    if (TargetRegisterInfo::isVirtualRegister(RegB))
        MRI.constrainRegClass(RegB, RC);
    if (TargetRegisterInfo::isVirtualRegister(RegX))
        MRI.constrainRegClass(RegX, RC);
    if (TargetRegisterInfo::isVirtualRegister(RegY))
        MRI.constrainRegClass(RegY, RC);
    if (TargetRegisterInfo::isVirtualRegister(RegC))
        MRI.constrainRegClass(RegC, RC);

    // Create a new virtual register for the result of (X op Y) instead of
    // recycling RegB because the MachineCombiner's computation of the critical
    // path requires a new register definition rather than an existing one.
    unsigned NewVR = MRI.createVirtualRegister(RC);
    InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));

    unsigned Opcode = Root.getOpcode();
    bool KillA = OpA.isKill();
    bool KillX = OpX.isKill();
    bool KillY = OpY.isKill();

    // Create new instructions for insertion.
    MachineInstrBuilder MIB1 =
        BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
        .addReg(RegX, getKillRegState(KillX))
        .addReg(RegY, getKillRegState(KillY));
    MachineInstrBuilder MIB2 =
        BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
        .addReg(RegA, getKillRegState(KillA))
        .addReg(NewVR, getKillRegState(true));

    setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);

    // Record new instructions for insertion and old instructions for deletion.
    InsInstrs.push_back(MIB1);
    InsInstrs.push_back(MIB2);
    DelInstrs.push_back(&Prev);
    DelInstrs.push_back(&Root);
}
Esempio n. 17
0
SDValue WebAssemblyTargetLowering::LowerCall(
    CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const {
  SelectionDAG &DAG = CLI.DAG;
  SDLoc DL = CLI.DL;
  SDValue Chain = CLI.Chain;
  SDValue Callee = CLI.Callee;
  MachineFunction &MF = DAG.getMachineFunction();
  auto Layout = MF.getDataLayout();

  CallingConv::ID CallConv = CLI.CallConv;
  if (!CallingConvSupported(CallConv))
    fail(DL, DAG,
         "WebAssembly doesn't support language-specific or target-specific "
         "calling conventions yet");
  if (CLI.IsPatchPoint)
    fail(DL, DAG, "WebAssembly doesn't support patch point yet");

  // WebAssembly doesn't currently support explicit tail calls. If they are
  // required, fail. Otherwise, just disable them.
  if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
       MF.getTarget().Options.GuaranteedTailCallOpt) ||
      (CLI.CS && CLI.CS->isMustTailCall()))
    fail(DL, DAG, "WebAssembly doesn't support tail call yet");
  CLI.IsTailCall = false;

  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
  if (Ins.size() > 1)
    fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");

  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
  for (unsigned i = 0; i < Outs.size(); ++i) {
    const ISD::OutputArg &Out = Outs[i];
    SDValue &OutVal = OutVals[i];
    if (Out.Flags.isNest())
      fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
    if (Out.Flags.isInAlloca())
      fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
    if (Out.Flags.isInConsecutiveRegs())
      fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
    if (Out.Flags.isInConsecutiveRegsLast())
      fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
    if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
      auto *MFI = MF.getFrameInfo();
      int FI = MFI->CreateStackObject(Out.Flags.getByValSize(),
                                      Out.Flags.getByValAlign(),
                                      /*isSS=*/false);
      SDValue SizeNode =
          DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
      SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
      Chain = DAG.getMemcpy(
          Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
          /*isVolatile*/ false, /*AlwaysInline=*/false,
          /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
      OutVal = FINode;
    }
  }

  bool IsVarArg = CLI.IsVarArg;
  unsigned NumFixedArgs = CLI.NumFixedArgs;

  auto PtrVT = getPointerTy(Layout);

  // Analyze operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());

  if (IsVarArg) {
    // Outgoing non-fixed arguments are placed in a buffer. First
    // compute their offsets and the total amount of buffer space needed.
    for (SDValue Arg :
         make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
      EVT VT = Arg.getValueType();
      assert(VT != MVT::iPTR && "Legalized args should be concrete");
      Type *Ty = VT.getTypeForEVT(*DAG.getContext());
      unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
                                             Layout.getABITypeAlignment(Ty));
      CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
                                        Offset, VT.getSimpleVT(),
                                        CCValAssign::Full));
    }
  }

  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();

  SDValue FINode;
  if (IsVarArg && NumBytes) {
    // For non-fixed arguments, next emit stores to store the argument values
    // to the stack buffer at the offsets computed above.
    int FI = MF.getFrameInfo()->CreateStackObject(NumBytes,
                                                  Layout.getStackAlignment(),
                                                  /*isSS=*/false);
    unsigned ValNo = 0;
    SmallVector<SDValue, 8> Chains;
    for (SDValue Arg :
         make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
      assert(ArgLocs[ValNo].getValNo() == ValNo &&
             "ArgLocs should remain in order and only hold varargs args");
      unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
      FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
      SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
                                DAG.getConstant(Offset, DL, PtrVT));
      Chains.push_back(DAG.getStore(
          Chain, DL, Arg, Add,
          MachinePointerInfo::getFixedStack(MF, FI, Offset), false, false, 0));
    }
    if (!Chains.empty())
      Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
  } else if (IsVarArg) {
    FINode = DAG.getIntPtrConstant(0, DL);
  }

  // Compute the operands for the CALLn node.
  SmallVector<SDValue, 16> Ops;
  Ops.push_back(Chain);
  Ops.push_back(Callee);

  // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
  // isn't reliable.
  Ops.append(OutVals.begin(),
             IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
  // Add a pointer to the vararg buffer.
  if (IsVarArg) Ops.push_back(FINode);

  SmallVector<EVT, 8> InTys;
  for (const auto &In : Ins) {
    assert(!In.Flags.isByVal() && "byval is not valid for return values");
    assert(!In.Flags.isNest() && "nest is not valid for return values");
    if (In.Flags.isInAlloca())
      fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
    if (In.Flags.isInConsecutiveRegs())
      fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
    if (In.Flags.isInConsecutiveRegsLast())
      fail(DL, DAG,
           "WebAssembly hasn't implemented cons regs last return values");
    // Ignore In.getOrigAlign() because all our arguments are passed in
    // registers.
    InTys.push_back(In.VT);
  }
  InTys.push_back(MVT::Other);
  SDVTList InTyList = DAG.getVTList(InTys);
  SDValue Res =
      DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
                  DL, InTyList, Ops);
  if (Ins.empty()) {
    Chain = Res;
  } else {
    InVals.push_back(Res);
    Chain = Res.getValue(1);
  }

  return Chain;
}
Esempio n. 18
0
// First thing we need to do is scan the whole function for values that are
// live across unwind edges.  Each value that is live across an unwind edge
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge.  This process also splits all critical edges
// coming out of invoke's.
void LowerInvoke::
splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);
    SplitCriticalEdge(II, 1, this);
    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    Type *Ty = AI->getType();
    // Aggregate types can't be cast, but are legal argument types, so we have
    // to handle them differently. We use an extract/insert pair as a
    // lightweight method to achieve the same goal.
    if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
      Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
      Instruction *NI = InsertValueInst::Create(AI, EI, 0);
      NI->insertAfter(EI);
      AI->replaceAllUsesWith(NI);
      // Set the operand of the instructions back to the AllocaInst.
      EI->setOperand(0, AI);
      NI->setOperand(0, AI);
    } else {
      // This is always a no-op cast because we're casting AI to AI->getType()
      // so src and destination types are identical. BitCast is the only
      // possibility.
      CastInst *NC = new BitCastInst(
        AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
      AI->replaceAllUsesWith(NC);
      // Set the operand of the cast instruction back to the AllocaInst.
      // Normally it's forbidden to replace a CastInst's operand because it
      // could cause the opcode to reflect an illegal conversion. However,
      // we're replacing it here with the same value it was constructed with.
      // We do this because the above replaceAllUsesWith() clobbered the
      // operand, but we want this one to remain.
      NC->setOperand(0, AI);
    }
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*,16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Scan all of the uses and see if the live range is live across an unwind
      // edge.  If we find a use live across an invoke edge, create an alloca
      // and spill the value.
      std::set<InvokeInst*> InvokesWithStoreInserted;

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
Esempio n. 19
0
/// parseGenericWhereClause - Parse a 'where' clause, which places additional
/// constraints on generic parameters or types based on them.
///
///   where-clause:
///     'where' requirement (',' requirement) *
///
///   requirement:
///     conformance-requirement
///     same-type-requirement
///
///   conformance-requirement:
///     type-identifier ':' type-identifier
///     type-identifier ':' type-composition
///
///   same-type-requirement:
///     type-identifier '==' type
bool Parser::parseGenericWhereClause(
               SourceLoc &WhereLoc,
               SmallVectorImpl<RequirementRepr> &Requirements) {
  // Parse the 'where'.
  WhereLoc = consumeToken(tok::kw_where);
  bool Invalid = false;
  do {
    // Parse the leading type-identifier.
    ParserResult<TypeRepr> FirstType = parseTypeIdentifier();
    if (FirstType.isNull() || FirstType.hasCodeCompletion()) {
      Invalid = true;
      break;
    }

    if (Tok.is(tok::colon)) {
      // A conformance-requirement.
      SourceLoc ColonLoc = consumeToken();

      // Parse the protocol or composition.
      ParserResult<TypeRepr> Protocol;
      if (Tok.is(tok::kw_protocol)) {
        Protocol = parseTypeComposition();
      } else {
        Protocol = parseTypeIdentifier();
      }
      if (Protocol.isNull() || Protocol.hasCodeCompletion()) {
        Invalid = true;
        break;
      }

      // Add the requirement.
      Requirements.push_back(RequirementRepr::getConformance(FirstType.get(),
                                                         ColonLoc,
                                                         Protocol.get()));
    } else if ((Tok.isAnyOperator() && Tok.getText() == "==") ||
               Tok.is(tok::equal)) {
      // A same-type-requirement
      if (Tok.is(tok::equal)) {
        diagnose(Tok, diag::requires_single_equal)
          .fixItReplace(SourceRange(Tok.getLoc()), "==");
      }
      SourceLoc EqualLoc = consumeToken();

      // Parse the second type.
      ParserResult<TypeRepr> SecondType = parseType();
      if (SecondType.isNull() || SecondType.hasCodeCompletion()) {
        Invalid = true;
        break;
      }

      // Add the requirement
      Requirements.push_back(RequirementRepr::getSameType(FirstType.get(),
                                                      EqualLoc,
                                                      SecondType.get()));
    } else {
      diagnose(Tok, diag::expected_requirement_delim);
      Invalid = true;
      break;
    }
    // If there's a comma, keep parsing the list.
  } while (consumeIf(tok::comma));

  return Invalid;
}
Esempio n. 20
0
bool
R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
                             MachineBasicBlock *&TBB,
                             MachineBasicBlock *&FBB,
                             SmallVectorImpl<MachineOperand> &Cond,
                             bool AllowModify) const {
  // Most of the following comes from the ARM implementation of AnalyzeBranch

  // If the block has no terminators, it just falls into the block after it.
  MachineBasicBlock::iterator I = MBB.end();
  if (I == MBB.begin())
    return false;
  --I;
  while (I->isDebugValue()) {
    if (I == MBB.begin())
      return false;
    --I;
  }
  if (static_cast<MachineInstr *>(I)->getOpcode() != AMDGPU::JUMP) {
    return false;
  }

  // Get the last instruction in the block.
  MachineInstr *LastInst = I;

  // If there is only one terminator instruction, process it.
  unsigned LastOpc = LastInst->getOpcode();
  if (I == MBB.begin() ||
      static_cast<MachineInstr *>(--I)->getOpcode() != AMDGPU::JUMP) {
    if (LastOpc == AMDGPU::JUMP) {
      if(!isPredicated(LastInst)) {
        TBB = LastInst->getOperand(0).getMBB();
        return false;
      } else {
        MachineInstr *predSet = I;
        while (!isPredicateSetter(predSet->getOpcode())) {
          predSet = --I;
        }
        TBB = LastInst->getOperand(0).getMBB();
        Cond.push_back(predSet->getOperand(1));
        Cond.push_back(predSet->getOperand(2));
        Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
        return false;
      }
    }
    return true;  // Can't handle indirect branch.
  }

  // Get the instruction before it if it is a terminator.
  MachineInstr *SecondLastInst = I;
  unsigned SecondLastOpc = SecondLastInst->getOpcode();

  // If the block ends with a B and a Bcc, handle it.
  if (SecondLastOpc == AMDGPU::JUMP &&
      isPredicated(SecondLastInst) &&
      LastOpc == AMDGPU::JUMP &&
      !isPredicated(LastInst)) {
    MachineInstr *predSet = --I;
    while (!isPredicateSetter(predSet->getOpcode())) {
      predSet = --I;
    }
    TBB = SecondLastInst->getOperand(0).getMBB();
    FBB = LastInst->getOperand(0).getMBB();
    Cond.push_back(predSet->getOperand(1));
    Cond.push_back(predSet->getOperand(2));
    Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
    return false;
  }

  // Otherwise, can't handle this.
  return true;
}
Esempio n. 21
0
void Diagnostic::
FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
                 SmallVectorImpl<char> &OutStr) const {

  /// FormattedArgs - Keep track of all of the arguments formatted by
  /// ConvertArgToString and pass them into subsequent calls to
  /// ConvertArgToString, allowing the implementation to avoid redundancies in
  /// obvious cases.
  SmallVector<DiagnosticsEngine::ArgumentValue, 8> FormattedArgs;

  /// QualTypeVals - Pass a vector of arrays so that QualType names can be
  /// compared to see if more information is needed to be printed.
  SmallVector<intptr_t, 2> QualTypeVals;
  SmallVector<char, 64> Tree;

  for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
    if (getArgKind(i) == DiagnosticsEngine::ak_qualtype)
      QualTypeVals.push_back(getRawArg(i));

  while (DiagStr != DiagEnd) {
    if (DiagStr[0] != '%') {
      // Append non-%0 substrings to Str if we have one.
      const char *StrEnd = std::find(DiagStr, DiagEnd, '%');
      OutStr.append(DiagStr, StrEnd);
      DiagStr = StrEnd;
      continue;
    } else if (isPunctuation(DiagStr[1])) {
      OutStr.push_back(DiagStr[1]);  // %% -> %.
      DiagStr += 2;
      continue;
    }

    // Skip the %.
    ++DiagStr;

    // This must be a placeholder for a diagnostic argument.  The format for a
    // placeholder is one of "%0", "%modifier0", or "%modifier{arguments}0".
    // The digit is a number from 0-9 indicating which argument this comes from.
    // The modifier is a string of digits from the set [-a-z]+, arguments is a
    // brace enclosed string.
    const char *Modifier = 0, *Argument = 0;
    unsigned ModifierLen = 0, ArgumentLen = 0;

    // Check to see if we have a modifier.  If so eat it.
    if (!isDigit(DiagStr[0])) {
      Modifier = DiagStr;
      while (DiagStr[0] == '-' ||
             (DiagStr[0] >= 'a' && DiagStr[0] <= 'z'))
        ++DiagStr;
      ModifierLen = DiagStr-Modifier;

      // If we have an argument, get it next.
      if (DiagStr[0] == '{') {
        ++DiagStr; // Skip {.
        Argument = DiagStr;

        DiagStr = ScanFormat(DiagStr, DiagEnd, '}');
        assert(DiagStr != DiagEnd && "Mismatched {}'s in diagnostic string!");
        ArgumentLen = DiagStr-Argument;
        ++DiagStr;  // Skip }.
      }
    }

    assert(isDigit(*DiagStr) && "Invalid format for argument in diagnostic");
    unsigned ArgNo = *DiagStr++ - '0';

    // Only used for type diffing.
    unsigned ArgNo2 = ArgNo;

    DiagnosticsEngine::ArgumentKind Kind = getArgKind(ArgNo);
    if (ModifierIs(Modifier, ModifierLen, "diff")) {
      assert(*DiagStr == ',' && isDigit(*(DiagStr + 1)) &&
             "Invalid format for diff modifier");
      ++DiagStr;  // Comma.
      ArgNo2 = *DiagStr++ - '0';
      DiagnosticsEngine::ArgumentKind Kind2 = getArgKind(ArgNo2);
      if (Kind == DiagnosticsEngine::ak_qualtype &&
          Kind2 == DiagnosticsEngine::ak_qualtype)
        Kind = DiagnosticsEngine::ak_qualtype_pair;
      else {
        // %diff only supports QualTypes.  For other kinds of arguments,
        // use the default printing.  For example, if the modifier is:
        //   "%diff{compare $ to $|other text}1,2"
        // treat it as:
        //   "compare %1 to %2"
        const char *Pipe = ScanFormat(Argument, Argument + ArgumentLen, '|');
        const char *FirstDollar = ScanFormat(Argument, Pipe, '$');
        const char *SecondDollar = ScanFormat(FirstDollar + 1, Pipe, '$');
        const char ArgStr1[] = { '%', static_cast<char>('0' + ArgNo) };
        const char ArgStr2[] = { '%', static_cast<char>('0' + ArgNo2) };
        FormatDiagnostic(Argument, FirstDollar, OutStr);
        FormatDiagnostic(ArgStr1, ArgStr1 + 2, OutStr);
        FormatDiagnostic(FirstDollar + 1, SecondDollar, OutStr);
        FormatDiagnostic(ArgStr2, ArgStr2 + 2, OutStr);
        FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
        continue;
      }
    }
    
    switch (Kind) {
    // ---- STRINGS ----
    case DiagnosticsEngine::ak_std_string: {
      const std::string &S = getArgStdStr(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");
      OutStr.append(S.begin(), S.end());
      break;
    }
    case DiagnosticsEngine::ak_c_string: {
      const char *S = getArgCStr(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");

      // Don't crash if get passed a null pointer by accident.
      if (!S)
        S = "(null)";

      OutStr.append(S, S + strlen(S));
      break;
    }
    // ---- INTEGERS ----
    case DiagnosticsEngine::ak_sint: {
      int Val = getArgSInt(ArgNo);

      if (ModifierIs(Modifier, ModifierLen, "select")) {
        HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "s")) {
        HandleIntegerSModifier(Val, OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
        HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
        HandleOrdinalModifier((unsigned)Val, OutStr);
      } else {
        assert(ModifierLen == 0 && "Unknown integer modifier");
        llvm::raw_svector_ostream(OutStr) << Val;
      }
      break;
    }
    case DiagnosticsEngine::ak_uint: {
      unsigned Val = getArgUInt(ArgNo);

      if (ModifierIs(Modifier, ModifierLen, "select")) {
        HandleSelectModifier(*this, Val, Argument, ArgumentLen, OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "s")) {
        HandleIntegerSModifier(Val, OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
        HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
                             OutStr);
      } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
        HandleOrdinalModifier(Val, OutStr);
      } else {
        assert(ModifierLen == 0 && "Unknown integer modifier");
        llvm::raw_svector_ostream(OutStr) << Val;
      }
      break;
    }
    // ---- NAMES and TYPES ----
    case DiagnosticsEngine::ak_identifierinfo: {
      const IdentifierInfo *II = getArgIdentifier(ArgNo);
      assert(ModifierLen == 0 && "No modifiers for strings yet");

      // Don't crash if get passed a null pointer by accident.
      if (!II) {
        const char *S = "(null)";
        OutStr.append(S, S + strlen(S));
        continue;
      }

      llvm::raw_svector_ostream(OutStr) << '\'' << II->getName() << '\'';
      break;
    }
    case DiagnosticsEngine::ak_qualtype:
    case DiagnosticsEngine::ak_declarationname:
    case DiagnosticsEngine::ak_nameddecl:
    case DiagnosticsEngine::ak_nestednamespec:
    case DiagnosticsEngine::ak_declcontext:
      getDiags()->ConvertArgToString(Kind, getRawArg(ArgNo),
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      break;
    case DiagnosticsEngine::ak_qualtype_pair:
      // Create a struct with all the info needed for printing.
      TemplateDiffTypes TDT;
      TDT.FromType = getRawArg(ArgNo);
      TDT.ToType = getRawArg(ArgNo2);
      TDT.ElideType = getDiags()->ElideType;
      TDT.ShowColors = getDiags()->ShowColors;
      TDT.TemplateDiffUsed = false;
      intptr_t val = reinterpret_cast<intptr_t>(&TDT);

      const char *ArgumentEnd = Argument + ArgumentLen;
      const char *Pipe = ScanFormat(Argument, ArgumentEnd, '|');

      // Print the tree.  If this diagnostic already has a tree, skip the
      // second tree.
      if (getDiags()->PrintTemplateTree && Tree.empty()) {
        TDT.PrintFromType = true;
        TDT.PrintTree = true;
        getDiags()->ConvertArgToString(Kind, val,
                                       Modifier, ModifierLen,
                                       Argument, ArgumentLen,
                                       FormattedArgs.data(),
                                       FormattedArgs.size(),
                                       Tree, QualTypeVals);
        // If there is no tree information, fall back to regular printing.
        if (!Tree.empty()) {
          FormatDiagnostic(Pipe + 1, ArgumentEnd, OutStr);
          break;
        }
      }

      // Non-tree printing, also the fall-back when tree printing fails.
      // The fall-back is triggered when the types compared are not templates.
      const char *FirstDollar = ScanFormat(Argument, ArgumentEnd, '$');
      const char *SecondDollar = ScanFormat(FirstDollar + 1, ArgumentEnd, '$');

      // Append before text
      FormatDiagnostic(Argument, FirstDollar, OutStr);

      // Append first type
      TDT.PrintTree = false;
      TDT.PrintFromType = true;
      getDiags()->ConvertArgToString(Kind, val,
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      if (!TDT.TemplateDiffUsed)
        FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_qualtype,
                                               TDT.FromType));

      // Append middle text
      FormatDiagnostic(FirstDollar + 1, SecondDollar, OutStr);

      // Append second type
      TDT.PrintFromType = false;
      getDiags()->ConvertArgToString(Kind, val,
                                     Modifier, ModifierLen,
                                     Argument, ArgumentLen,
                                     FormattedArgs.data(), FormattedArgs.size(),
                                     OutStr, QualTypeVals);
      if (!TDT.TemplateDiffUsed)
        FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_qualtype,
                                               TDT.ToType));

      // Append end text
      FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
      break;
    }
    
    // Remember this argument info for subsequent formatting operations.  Turn
    // std::strings into a null terminated string to make it be the same case as
    // all the other ones.
    if (Kind == DiagnosticsEngine::ak_qualtype_pair)
      continue;
    else if (Kind != DiagnosticsEngine::ak_std_string)
      FormattedArgs.push_back(std::make_pair(Kind, getRawArg(ArgNo)));
    else
      FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_c_string,
                                        (intptr_t)getArgStdStr(ArgNo).c_str()));
    
  }

  // Append the type tree to the end of the diagnostics.
  OutStr.append(Tree.begin(), Tree.end());
}
Esempio n. 22
0
static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
                        unsigned UseOpIdx,
                        std::vector<FoldCandidate> &FoldList,
                        SmallVectorImpl<MachineInstr *> &CopiesToReplace,
                        const SIInstrInfo *TII, const SIRegisterInfo &TRI,
                        MachineRegisterInfo &MRI) {
  const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);

  if (!isUseSafeToFold(*UseMI, UseOp))
    return;

  // FIXME: Fold operands with subregs.
  if (UseOp.isReg() && OpToFold.isReg()) {
    if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
      return;

    // Don't fold subregister extracts into tied operands, only if it is a full
    // copy since a subregister use tied to a full register def doesn't really
    // make sense. e.g. don't fold:
    //
    // %vreg1 = COPY %vreg0:sub1
    // %vreg2<tied3> = V_MAC_F32 %vreg3, %vreg4, %vreg1<tied0>
    //
    //  into
    // %vreg2<tied3> = V_MAC_F32 %vreg3, %vreg4, %vreg0:sub1<tied0>
    if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
      return;
  }

  bool FoldingImm = OpToFold.isImm();
  APInt Imm;

  if (FoldingImm) {
    unsigned UseReg = UseOp.getReg();
    const TargetRegisterClass *UseRC
      = TargetRegisterInfo::isVirtualRegister(UseReg) ?
      MRI.getRegClass(UseReg) :
      TRI.getPhysRegClass(UseReg);

    Imm = APInt(64, OpToFold.getImm());

    const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
    const TargetRegisterClass *FoldRC =
        TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);

    // Split 64-bit constants into 32-bits for folding.
    if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
      if (UseRC->getSize() != 8)
        return;

      if (UseOp.getSubReg() == AMDGPU::sub0) {
        Imm = Imm.getLoBits(32);
      } else {
        assert(UseOp.getSubReg() == AMDGPU::sub1);
        Imm = Imm.getHiBits(32);
      }
    }

    // In order to fold immediates into copies, we need to change the
    // copy to a MOV.
    if (UseMI->getOpcode() == AMDGPU::COPY) {
      unsigned DestReg = UseMI->getOperand(0).getReg();
      const TargetRegisterClass *DestRC
        = TargetRegisterInfo::isVirtualRegister(DestReg) ?
        MRI.getRegClass(DestReg) :
        TRI.getPhysRegClass(DestReg);

      unsigned MovOp = TII->getMovOpcode(DestRC);
      if (MovOp == AMDGPU::COPY)
        return;

      UseMI->setDesc(TII->get(MovOp));
      CopiesToReplace.push_back(UseMI);
    }
  }

  // Special case for REG_SEQUENCE: We can't fold literals into
  // REG_SEQUENCE instructions, so we have to fold them into the
  // uses of REG_SEQUENCE.
  if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
    unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
    unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();

    for (MachineRegisterInfo::use_iterator
         RSUse = MRI.use_begin(RegSeqDstReg),
         RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {

      MachineInstr *RSUseMI = RSUse->getParent();
      if (RSUse->getSubReg() != RegSeqDstSubReg)
        continue;

      foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
                  CopiesToReplace, TII, TRI, MRI);
    }
    return;
  }

  const MCInstrDesc &UseDesc = UseMI->getDesc();

  // Don't fold into target independent nodes.  Target independent opcodes
  // don't have defined register classes.
  if (UseDesc.isVariadic() ||
      UseDesc.OpInfo[UseOpIdx].RegClass == -1)
    return;

  if (FoldingImm) {
    MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
    return;
  }

  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);

  // FIXME: We could try to change the instruction from 64-bit to 32-bit
  // to enable more folding opportunites.  The shrink operands pass
  // already does this.
  return;
}
Esempio n. 23
0
/// SplitLandingPadPredecessors - This method transforms the landing pad,
/// OrigBB, by introducing two new basic blocks into the function. One of those
/// new basic blocks gets the predecessors listed in Preds. The other basic
/// block gets the remaining predecessors of OrigBB. The landingpad instruction
/// OrigBB is clone into both of the new basic blocks. The new blocks are given
/// the suffixes 'Suffix1' and 'Suffix2', and are returned in the NewBBs vector.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
/// DominanceFrontier, LoopInfo, and LCCSA but no other analyses. In particular,
/// it does not preserve LoopSimplify (because it's complicated to handle the
/// case where one of the edges being split is an exit of a loop with other
/// exits).
///
void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
                                       ArrayRef<BasicBlock *> Preds,
                                       const char *Suffix1, const char *Suffix2,
                                       SmallVectorImpl<BasicBlock *> &NewBBs,
                                       DominatorTree *DT, LoopInfo *LI,
                                       bool PreserveLCSSA) {
  assert(OrigBB->isLandingPad() && "Trying to split a non-landing pad!");

  // Create a new basic block for OrigBB's predecessors listed in Preds. Insert
  // it right before the original block.
  BasicBlock *NewBB1 = BasicBlock::Create(OrigBB->getContext(),
                                          OrigBB->getName() + Suffix1,
                                          OrigBB->getParent(), OrigBB);
  NewBBs.push_back(NewBB1);

  // The new block unconditionally branches to the old block.
  BranchInst *BI1 = BranchInst::Create(OrigBB, NewBB1);
  BI1->setDebugLoc(OrigBB->getFirstNonPHI()->getDebugLoc());

  // Move the edges from Preds to point to NewBB1 instead of OrigBB.
  for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
    // This is slightly more strict than necessary; the minimum requirement
    // is that there be no more than one indirectbr branching to BB. And
    // all BlockAddress uses would need to be updated.
    assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) &&
           "Cannot split an edge from an IndirectBrInst");
    Preds[i]->getTerminator()->replaceUsesOfWith(OrigBB, NewBB1);
  }

  bool HasLoopExit = false;
  UpdateAnalysisInformation(OrigBB, NewBB1, Preds, DT, LI, PreserveLCSSA,
                            HasLoopExit);

  // Update the PHI nodes in OrigBB with the values coming from NewBB1.
  UpdatePHINodes(OrigBB, NewBB1, Preds, BI1, HasLoopExit);

  // Move the remaining edges from OrigBB to point to NewBB2.
  SmallVector<BasicBlock*, 8> NewBB2Preds;
  for (pred_iterator i = pred_begin(OrigBB), e = pred_end(OrigBB);
       i != e; ) {
    BasicBlock *Pred = *i++;
    if (Pred == NewBB1) continue;
    assert(!isa<IndirectBrInst>(Pred->getTerminator()) &&
           "Cannot split an edge from an IndirectBrInst");
    NewBB2Preds.push_back(Pred);
    e = pred_end(OrigBB);
  }

  BasicBlock *NewBB2 = nullptr;
  if (!NewBB2Preds.empty()) {
    // Create another basic block for the rest of OrigBB's predecessors.
    NewBB2 = BasicBlock::Create(OrigBB->getContext(),
                                OrigBB->getName() + Suffix2,
                                OrigBB->getParent(), OrigBB);
    NewBBs.push_back(NewBB2);

    // The new block unconditionally branches to the old block.
    BranchInst *BI2 = BranchInst::Create(OrigBB, NewBB2);
    BI2->setDebugLoc(OrigBB->getFirstNonPHI()->getDebugLoc());

    // Move the remaining edges from OrigBB to point to NewBB2.
    for (SmallVectorImpl<BasicBlock*>::iterator
           i = NewBB2Preds.begin(), e = NewBB2Preds.end(); i != e; ++i)
      (*i)->getTerminator()->replaceUsesOfWith(OrigBB, NewBB2);

    // Update DominatorTree, LoopInfo, and LCCSA analysis information.
    HasLoopExit = false;
    UpdateAnalysisInformation(OrigBB, NewBB2, NewBB2Preds, DT, LI,
                              PreserveLCSSA, HasLoopExit);

    // Update the PHI nodes in OrigBB with the values coming from NewBB2.
    UpdatePHINodes(OrigBB, NewBB2, NewBB2Preds, BI2, HasLoopExit);
  }

  LandingPadInst *LPad = OrigBB->getLandingPadInst();
  Instruction *Clone1 = LPad->clone();
  Clone1->setName(Twine("lpad") + Suffix1);
  NewBB1->getInstList().insert(NewBB1->getFirstInsertionPt(), Clone1);

  if (NewBB2) {
    Instruction *Clone2 = LPad->clone();
    Clone2->setName(Twine("lpad") + Suffix2);
    NewBB2->getInstList().insert(NewBB2->getFirstInsertionPt(), Clone2);

    // Create a PHI node for the two cloned landingpad instructions.
    PHINode *PN = PHINode::Create(LPad->getType(), 2, "lpad.phi", LPad);
    PN->addIncoming(Clone1, NewBB1);
    PN->addIncoming(Clone2, NewBB2);
    LPad->replaceAllUsesWith(PN);
    LPad->eraseFromParent();
  } else {
    // There is no second clone. Just replace the landing pad with the first
    // clone.
    LPad->replaceAllUsesWith(Clone1);
    LPad->eraseFromParent();
  }
}
Esempio n. 24
0
void Pattern::collectVariables(SmallVectorImpl<VarDecl *> &variables) const {
  forEachVariable([&](VarDecl *VD) { variables.push_back(VD); });
}
Esempio n. 25
0
bool SparcAsmParser::expandSET(MCInst &Inst, SMLoc IDLoc,
                               SmallVectorImpl<MCInst> &Instructions) {
  MCOperand MCRegOp = Inst.getOperand(0);
  MCOperand MCValOp = Inst.getOperand(1);
  assert(MCRegOp.isReg());
  assert(MCValOp.isImm() || MCValOp.isExpr());

  // the imm operand can be either an expression or an immediate.
  bool IsImm = Inst.getOperand(1).isImm();
  int64_t RawImmValue = IsImm ? MCValOp.getImm() : 0;

  // Allow either a signed or unsigned 32-bit immediate.
  if (RawImmValue < -2147483648LL || RawImmValue > 4294967295LL) {
    return Error(IDLoc,
                 "set: argument must be between -2147483648 and 4294967295");
  }

  // If the value was expressed as a large unsigned number, that's ok.
  // We want to see if it "looks like" a small signed number.
  int32_t ImmValue = RawImmValue;
  // For 'set' you can't use 'or' with a negative operand on V9 because
  // that would splat the sign bit across the upper half of the destination
  // register, whereas 'set' is defined to zero the high 32 bits.
  bool IsEffectivelyImm13 =
      IsImm && ((is64Bit() ? 0 : -4096) <= ImmValue && ImmValue < 4096);
  const MCExpr *ValExpr;
  if (IsImm)
    ValExpr = MCConstantExpr::create(ImmValue, getContext());
  else
    ValExpr = MCValOp.getExpr();

  MCOperand PrevReg = MCOperand::createReg(Sparc::G0);

  // If not just a signed imm13 value, then either we use a 'sethi' with a
  // following 'or', or a 'sethi' by itself if there are no more 1 bits.
  // In either case, start with the 'sethi'.
  if (!IsEffectivelyImm13) {
    MCInst TmpInst;
    const MCExpr *Expr = adjustPICRelocation(SparcMCExpr::VK_Sparc_HI, ValExpr);
    TmpInst.setLoc(IDLoc);
    TmpInst.setOpcode(SP::SETHIi);
    TmpInst.addOperand(MCRegOp);
    TmpInst.addOperand(MCOperand::createExpr(Expr));
    Instructions.push_back(TmpInst);
    PrevReg = MCRegOp;
  }

  // The low bits require touching in 3 cases:
  // * A non-immediate value will always require both instructions.
  // * An effectively imm13 value needs only an 'or' instruction.
  // * Otherwise, an immediate that is not effectively imm13 requires the
  //   'or' only if bits remain after clearing the 22 bits that 'sethi' set.
  // If the low bits are known zeros, there's nothing to do.
  // In the second case, and only in that case, must we NOT clear
  // bits of the immediate value via the %lo() assembler function.
  // Note also, the 'or' instruction doesn't mind a large value in the case
  // where the operand to 'set' was 0xFFFFFzzz - it does exactly what you mean.
  if (!IsImm || IsEffectivelyImm13 || (ImmValue & 0x3ff)) {
    MCInst TmpInst;
    const MCExpr *Expr;
    if (IsEffectivelyImm13)
      Expr = ValExpr;
    else
      Expr = adjustPICRelocation(SparcMCExpr::VK_Sparc_LO, ValExpr);
    TmpInst.setLoc(IDLoc);
    TmpInst.setOpcode(SP::ORri);
    TmpInst.addOperand(MCRegOp);
    TmpInst.addOperand(PrevReg);
    TmpInst.addOperand(MCOperand::createExpr(Expr));
    Instructions.push_back(TmpInst);
  }
  return false;
}
Esempio n. 26
0
static void ParseProgName(SmallVectorImpl<const char *> &ArgVector,
                          std::set<std::string> &SavedStrings,
                          Driver &TheDriver)
{
  // Try to infer frontend type and default target from the program name.

  // suffixes[] contains the list of known driver suffixes.
  // Suffixes are compared against the program name in order.
  // If there is a match, the frontend type is updated as necessary (CPP/C++).
  // If there is no match, a second round is done after stripping the last
  // hyphen and everything following it. This allows using something like
  // "clang++-2.9".

  // If there is a match in either the first or second round,
  // the function tries to identify a target as prefix. E.g.
  // "x86_64-linux-clang" as interpreted as suffix "clang" with
  // target prefix "x86_64-linux". If such a target prefix is found,
  // is gets added via -target as implicit first argument.
  static const struct {
    const char *Suffix;
    const char *ModeFlag;
  } suffixes [] = {
    { "clang",     nullptr },
    { "clang++",   "--driver-mode=g++" },
    { "clang-c++", "--driver-mode=g++" },
    { "clang-upc", "--driver-mode=gupc" },
    { "clang-cc",  nullptr },
    { "clang-cpp", "--driver-mode=cpp" },
    { "clang-g++", "--driver-mode=g++" },
    { "clang-gcc", nullptr },
    { "clang-cl",  "--driver-mode=cl"  },
    { "cc",        nullptr },
    { "cpp",       "--driver-mode=cpp" },
    { "cl" ,       "--driver-mode=cl"  },
    { "++",        "--driver-mode=g++" },
    { "upc",       "--driver-mode=gupc" }
  };
  std::string ProgName(llvm::sys::path::stem(ArgVector[0]));
#ifdef LLVM_ON_WIN32
  // Transform to lowercase for case insensitive file systems.
  std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(),
                 toLowercase);
#endif
  StringRef ProgNameRef(ProgName);
  StringRef Prefix;

  for (int Components = 2; Components; --Components) {
    bool FoundMatch = false;
    size_t i;

    for (i = 0; i < sizeof(suffixes) / sizeof(suffixes[0]); ++i) {
      if (ProgNameRef.endswith(suffixes[i].Suffix)) {
        FoundMatch = true;
        SmallVectorImpl<const char *>::iterator it = ArgVector.begin();
        if (it != ArgVector.end())
          ++it;
        if (suffixes[i].ModeFlag)
          ArgVector.insert(it, suffixes[i].ModeFlag);
        break;
      }
    }

    if (FoundMatch) {
      StringRef::size_type LastComponent = ProgNameRef.rfind('-',
        ProgNameRef.size() - strlen(suffixes[i].Suffix));
      if (LastComponent != StringRef::npos)
        Prefix = ProgNameRef.slice(0, LastComponent);
      break;
    }

    StringRef::size_type LastComponent = ProgNameRef.rfind('-');
    if (LastComponent == StringRef::npos)
      break;
    ProgNameRef = ProgNameRef.slice(0, LastComponent);
  }

  if (Prefix.empty())
    return;

  std::string IgnoredError;
  if (llvm::TargetRegistry::lookupTarget(Prefix, IgnoredError)) {
    SmallVectorImpl<const char *>::iterator it = ArgVector.begin();
    if (it != ArgVector.end())
      ++it;
    const char* Strings[] =
      { SaveStringInSet(SavedStrings, std::string("-target")),
        SaveStringInSet(SavedStrings, Prefix) };
    ArgVector.insert(it, Strings, Strings + llvm::array_lengthof(Strings));
  }
}
Esempio n. 27
0
static bool ParseFile(const std::string &Filename,
                      const std::vector<std::string> &IncludeDirs,
                      SmallVectorImpl<std::string> &OutputFiles) {
  ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
      MemoryBuffer::getFileOrSTDIN(Filename);
  if (std::error_code EC = MBOrErr.getError()) {
    llvm::errs() << "Could not open input file '" << Filename << "': " 
                 << EC.message() <<"\n";
    return true;
  }
  std::unique_ptr<llvm::MemoryBuffer> MB = std::move(MBOrErr.get());

  // Record the location of the include directory so that the lexer can find it
  // later.
  SourceMgr SrcMgr;
  SrcMgr.setIncludeDirs(IncludeDirs);

  // Tell SrcMgr about this buffer, which is what Parser will pick up.
  SrcMgr.AddNewSourceBuffer(std::move(MB), llvm::SMLoc());

  LangOptions Opts;
  Opts.DefaultReal8 = DefaultReal8;
  Opts.DefaultDouble8 = DefaultDouble8;
  Opts.DefaultInt8 = DefaultInt8;
  Opts.ReturnComments = ReturnComments;
  if(!FreeForm && !FixedForm) {
    llvm::StringRef Ext = llvm::sys::path::extension(Filename);
    if(Ext.equals_lower(".f")) {
      Opts.FixedForm = 1;
      Opts.FreeForm = 0;
    }
  } else if(FixedForm) {
    Opts.FixedForm = 1;
    Opts.FreeForm = 0;
  }

  TextDiagnosticPrinter TDP(SrcMgr);
  DiagnosticsEngine Diag(new DiagnosticIDs,&SrcMgr, &TDP, false);
  // Chain in -verify checker, if requested.
  if(RunVerifier)
    Diag.setClient(new VerifyDiagnosticConsumer(Diag));

  ASTContext Context(SrcMgr, Opts);
  Sema SA(Context, Diag);
  Parser P(SrcMgr, Opts, Diag, SA);
  Diag.getClient()->BeginSourceFile(Opts, &P.getLexer());
  P.ParseProgramUnits();
  Diag.getClient()->EndSourceFile();

  // Dump
  if(PrintAST || DumpAST) {
    auto Dumper = CreateASTDumper("");
    Dumper->HandleTranslationUnit(Context);
    delete Dumper;
  }

  // Emit
  if(!SyntaxOnly && !Diag.hadErrors()) {    
    flang::TargetOptions TargetOptions;
    TargetOptions.Triple = TargetTriple.empty()? llvm::sys::getDefaultTargetTriple() :
                                                 TargetTriple;
    TargetOptions.CPU = llvm::sys::getHostCPUName();

    auto CG = CreateLLVMCodeGen(Diag, Filename == ""? std::string("module") : Filename,
                                CodeGenOptions(), TargetOptions, llvm::getGlobalContext());
    CG->Initialize(Context);
    CG->HandleTranslationUnit(Context);

    BackendAction BA = Backend_EmitObj;
    if(EmitASM)   BA = Backend_EmitAssembly;
    if(EmitLLVM)  BA = Backend_EmitLL;

    const llvm::Target *TheTarget = 0;
    std::string Err;
    TheTarget = llvm::TargetRegistry::lookupTarget(TargetOptions.Triple, Err);

    CodeGenOpt::Level TMOptLevel = CodeGenOpt::Default;
    switch(OptLevel) {
    case 0:  TMOptLevel = CodeGenOpt::None; break;
    case 3:  TMOptLevel = CodeGenOpt::Aggressive; break;
    }

    llvm::TargetOptions Options;

    auto TM = TheTarget->createTargetMachine(TargetOptions.Triple, TargetOptions.CPU, "", Options,
                                             Reloc::Default, CodeModel::Default,
                                             TMOptLevel);

    if(!(EmitLLVM && OptLevel == 0)) {
      auto TheModule = CG->GetModule();
      auto PM = new llvm::legacy::PassManager();
      //llvm::legacy::FunctionPassManager *FPM = new llvm::legacy::FunctionPassManager(TheModule);
      //FPM->add(new DataLayoutPass());
      //PM->add(new llvm::DataLayoutPass());
      //TM->addAnalysisPasses(*PM);
      PM->add(createPromoteMemoryToRegisterPass());

      PassManagerBuilder PMBuilder;
      PMBuilder.OptLevel = OptLevel;
      PMBuilder.SizeLevel = 0;
      PMBuilder.LoopVectorize = true;
      PMBuilder.SLPVectorize = true;
      unsigned Threshold = 225;
      if (OptLevel > 2)
        Threshold = 275;
      PMBuilder.Inliner = createFunctionInliningPass(Threshold);


      PMBuilder.populateModulePassManager(*PM);
      //llvm::legacy::PassManager *MPM = new llvm::legacy::PassManager();
      //PMBuilder.populateModulePassManager(*MPM);

      PM->run(*TheModule);
      //MPM->run(*TheModule);
      delete PM;
      //delete MPM;
    }

    if(Interpret) {
      //const char *Env[] = { "", nullptr };
      //Execute(CG->ReleaseModule(), Env);
    } else if(OutputFile == "-"){
      // FIXME: outputting to stdout is broken 
      //EmitFile(llvm::outs(), CG->GetModule(), TM, BA);
      OutputFiles.push_back(GetOutputName("stdout",BA));
      EmitOutputFile(OutputFiles.back(), CG->GetModule(), TM, BA);
    }else {
      OutputFiles.push_back(GetOutputName(Filename, BA));
      EmitOutputFile(OutputFiles.back(), CG->GetModule(), TM, BA);
    }
    delete CG;
  }

  return Diag.hadErrors();
}
Esempio n. 28
0
/// ApplyQAOverride - Apply a list of edits to the input argument lists.
///
/// The input string is a space separate list of edits to perform,
/// they are applied in order to the input argument lists. Edits
/// should be one of the following forms:
///
///  '#': Silence information about the changes to the command line arguments.
///
///  '^': Add FOO as a new argument at the beginning of the command line.
///
///  '+': Add FOO as a new argument at the end of the command line.
///
///  's/XXX/YYY/': Substitute the regular expression XXX with YYY in the command
///  line.
///
///  'xOPTION': Removes all instances of the literal argument OPTION.
///
///  'XOPTION': Removes all instances of the literal argument OPTION,
///  and the following argument.
///
///  'Ox': Removes all flags matching 'O' or 'O[sz0-9]' and adds 'Ox'
///  at the end of the command line.
///
/// \param OS - The stream to write edit information to.
/// \param Args - The vector of command line arguments.
/// \param Edit - The override command to perform.
/// \param SavedStrings - Set to use for storing string representations.
static void ApplyOneQAOverride(raw_ostream &OS,
                               SmallVectorImpl<const char*> &Args,
                               StringRef Edit,
                               std::set<std::string> &SavedStrings) {
  // This does not need to be efficient.

  if (Edit[0] == '^') {
    const char *Str =
      SaveStringInSet(SavedStrings, Edit.substr(1));
    OS << "### Adding argument " << Str << " at beginning\n";
    Args.insert(Args.begin() + 1, Str);
  } else if (Edit[0] == '+') {
    const char *Str =
      SaveStringInSet(SavedStrings, Edit.substr(1));
    OS << "### Adding argument " << Str << " at end\n";
    Args.push_back(Str);
  } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.endswith("/") &&
             Edit.slice(2, Edit.size()-1).find('/') != StringRef::npos) {
    StringRef MatchPattern = Edit.substr(2).split('/').first;
    StringRef ReplPattern = Edit.substr(2).split('/').second;
    ReplPattern = ReplPattern.slice(0, ReplPattern.size()-1);

    for (unsigned i = 1, e = Args.size(); i != e; ++i) {
      std::string Repl = llvm::Regex(MatchPattern).sub(ReplPattern, Args[i]);

      if (Repl != Args[i]) {
        OS << "### Replacing '" << Args[i] << "' with '" << Repl << "'\n";
        Args[i] = SaveStringInSet(SavedStrings, Repl);
      }
    }
  } else if (Edit[0] == 'x' || Edit[0] == 'X') {
    std::string Option = Edit.substr(1, std::string::npos);
    for (unsigned i = 1; i < Args.size();) {
      if (Option == Args[i]) {
        OS << "### Deleting argument " << Args[i] << '\n';
        Args.erase(Args.begin() + i);
        if (Edit[0] == 'X') {
          if (i < Args.size()) {
            OS << "### Deleting argument " << Args[i] << '\n';
            Args.erase(Args.begin() + i);
          } else
            OS << "### Invalid X edit, end of command line!\n";
        }
      } else
        ++i;
    }
  } else if (Edit[0] == 'O') {
    for (unsigned i = 1; i < Args.size();) {
      const char *A = Args[i];
      if (A[0] == '-' && A[1] == 'O' &&
          (A[2] == '\0' ||
           (A[3] == '\0' && (A[2] == 's' || A[2] == 'z' ||
                             ('0' <= A[2] && A[2] <= '9'))))) {
        OS << "### Deleting argument " << Args[i] << '\n';
        Args.erase(Args.begin() + i);
      } else
        ++i;
    }
    OS << "### Adding argument " << Edit << " at end\n";
    Args.push_back(SaveStringInSet(SavedStrings, '-' + Edit.str()));
  } else {
    OS << "### Unrecognized edit: " << Edit << "\n";
  }
}
/// Optimize access to the let property, which is known
/// to have a constant value. Replace all loads from the
/// property by its constant value.
void LetPropertiesOpt::optimizeLetPropertyAccess(VarDecl *Property,
    SmallVectorImpl<SILInstruction *> &Init) {

  if (SkipProcessing.count(Property))
    return;

  if (Init.empty())
    return;

  auto *Ty = dyn_cast<NominalTypeDecl>(Property->getDeclContext());
  if (SkipTypeProcessing.count(Ty))
    return;

  DEBUG(llvm::dbgs() << "Replacing access to property '" << *Property
                     << "' by its constant initializer\n");

  auto PropertyAccess = Property->getEffectiveAccess();
  auto TypeAccess = Ty->getEffectiveAccess();
  auto CanRemove = false;

  // Check if a given let property can be removed, because it
  // is not accessible elsewhere. This can happen if this property
  // is private or if it is internal and WMO mode is used.
  if (TypeAccess == Accessibility::Private ||
      PropertyAccess == Accessibility::Private
      || ((TypeAccess == Accessibility::Internal ||
          PropertyAccess == Accessibility::Internal) &&
          Module->isWholeModule())) {
    CanRemove = true;
    DEBUG(llvm::dbgs() << "Storage for property '" << *Property
                       << "' can be eliminated\n");
  }

  if (CannotRemove.count(Property))
    CanRemove = false;

  if (!AccessMap.count(Property)) {
    DEBUG(llvm::dbgs() << "Property '" << *Property << "' is never read\n");
    if (CanRemove) {
      // TODO: Remove the let property, because it is never accessed.
    }
    return;
  }

  auto &Loads = AccessMap[Property];

  unsigned NumReplaced = 0;

  for (auto Load: Loads) {
    // Look for any instructions accessing let properties.
    if (isa<RefElementAddrInst>(Load)) {
      // Copy the initializer into the function
      // Replace the access to a let property by the value
      // computed by this initializer.
      InstructionsCloner Cloner(*Load->getFunction(), Init, Load);
      Cloner.clone();
      SILInstruction *I = &*std::prev(Load->getIterator());
      SILBuilderWithScope B(Load);
      for (auto UI = Load->use_begin(), E = Load->use_end(); UI != E;) {
        auto *User = UI->getUser();
        ++UI;
        if (isa<StoreInst>(User))
          continue;

        replaceLoadSequence(User, I, B);
        eraseUsesOfInstruction(User);
        User->eraseFromParent();
        ++NumReplaced;
      }
      HasChanged = true;
    } else if (isa<StructExtractInst>(Load)) {
      // Copy the initializer into the function
      // Replace the access to a let property by the value
      // computed by this initializer.
      InstructionsCloner Cloner(*Load->getFunction(), Init, Load);
      Cloner.clone();
      SILInstruction *I = &*std::prev(Load->getIterator());
      Load->replaceAllUsesWith(I);
      DEBUG(llvm::dbgs() << "Access to " << *Property << " was replaced:\n";
            I->dumpInContext());

      Load->eraseFromParent();
      ++NumReplaced;
      HasChanged = true;
    }  else if (isa<StructElementAddrInst>(Load)) {
      // Copy the initializer into the function
      // Replace the access to a let property by the value
      // computed by this initializer.
      InstructionsCloner Cloner(*Load->getFunction(), Init, Load);
      Cloner.clone();
      SILInstruction *I = &*std::prev(Load->getIterator());
      SILBuilderWithScope B(Load);
      for (auto UI = Load->use_begin(), E = Load->use_end(); UI != E;) {
        auto *User = UI->getUser();
        ++UI;
        if (isa<StoreInst>(User))
          continue;
        replaceLoadSequence(User, I, B);
        eraseUsesOfInstruction(User);
        User->eraseFromParent();
        ++NumReplaced;
      }
      HasChanged = true;
    }
  }
Esempio n. 30
0
int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
                              SmallVectorImpl<AsmToken> &tokens,
                              const std::string &str) {
  int ret = 0;
  
  switch (Key.Arch) {
  default:
    return -1;
  case Triple::x86:
  case Triple::x86_64:
  case Triple::arm:
  case Triple::thumb:
    break;
  }
  
  const char *cStr = str.c_str();
  MemoryBuffer *buf = MemoryBuffer::getMemBuffer(cStr, cStr + strlen(cStr));
  
  StringRef instName;
  SMLoc instLoc;
  
  SourceMgr sourceMgr;
  sourceMgr.AddNewSourceBuffer(buf, SMLoc()); // ownership of buf handed over
  MCContext context(*AsmInfo, NULL);
  OwningPtr<MCStreamer> streamer(createNullStreamer(context));
  OwningPtr<MCAsmParser> genericParser(createMCAsmParser(*Tgt, sourceMgr,
                                                         context, *streamer,
                                                         *AsmInfo));
  OwningPtr<TargetAsmParser> TargetParser(Tgt->createAsmParser(*genericParser,
                                                               *TargetMachine));
  
  AsmToken OpcodeToken = genericParser->Lex();
  AsmToken NextToken = genericParser->Lex();  // consume next token, because specificParser expects us to
    
  if (OpcodeToken.is(AsmToken::Identifier)) {
    instName = OpcodeToken.getString();
    instLoc = OpcodeToken.getLoc();
    
    if (NextToken.isNot(AsmToken::Eof) &&
        TargetParser->ParseInstruction(instName, instLoc, operands))
      ret = -1;
  } else {
    ret = -1;
  }
  
  ParserMutex.acquire();
  
  if (!ret) {
    GenericAsmLexer->setBuffer(buf);
  
    while (SpecificAsmLexer->Lex(),
           SpecificAsmLexer->isNot(AsmToken::Eof) &&
           SpecificAsmLexer->isNot(AsmToken::EndOfStatement)) {
      if (SpecificAsmLexer->is(AsmToken::Error)) {
        ret = -1;
        break;
      }
      tokens.push_back(SpecificAsmLexer->getTok());
    }
  }

  ParserMutex.release();
  
  return ret;
}