void SwiftLangSupport::findInterfaceDocument(StringRef ModuleName,
                                             ArrayRef<const char *> Args,
                       std::function<void(const InterfaceDocInfo &)> Receiver) {
  InterfaceDocInfo Info;

  CompilerInstance CI;
  // Display diagnostics to stderr.
  PrintingDiagnosticConsumer PrintDiags;
  CI.addDiagnosticConsumer(&PrintDiags);

  CompilerInvocation Invocation;
  std::string Error;
  if (getASTManager().initCompilerInvocation(Invocation, Args, CI.getDiags(),
                                             StringRef(), Error)) {
    Info.Error = Error;
    return Receiver(Info);
  }

  if (auto IFaceGenRef = IFaceGenContexts.find(ModuleName, Invocation))
    Info.ModuleInterfaceName = IFaceGenRef->getDocumentName();

  SmallString<128> Buf;
  SmallVector<std::pair<unsigned, unsigned>, 16> ArgOffs;
  auto addArgPair = [&](StringRef Arg, StringRef Val) {
    assert(!Arg.empty());
    if (Val.empty())
      return;
    unsigned ArgBegin = Buf.size();
    Buf += Arg;
    unsigned ArgEnd = Buf.size();
    unsigned ValBegin = Buf.size();
    Buf += Val;
    unsigned ValEnd = Buf.size();
    ArgOffs.push_back(std::make_pair(ArgBegin, ArgEnd));
    ArgOffs.push_back(std::make_pair(ValBegin, ValEnd));
  };
  auto addSingleArg = [&](StringRef Arg) {
    assert(!Arg.empty());
    unsigned ArgBegin = Buf.size();
    Buf += Arg;
    unsigned ArgEnd = Buf.size();
    ArgOffs.push_back(std::make_pair(ArgBegin, ArgEnd));
  };

  addArgPair("-target", Invocation.getTargetTriple());

  const auto &SPOpts = Invocation.getSearchPathOptions();
  addArgPair("-sdk", SPOpts.SDKPath);
  for (auto &FramePath : SPOpts.FrameworkSearchPaths) {
    if (FramePath.IsSystem)
      addArgPair("-Fsystem", FramePath.Path);
    else
      addArgPair("-F", FramePath.Path);
  }
  for (auto &Path : SPOpts.ImportSearchPaths)
    addArgPair("-I", Path);

  const auto &ClangOpts = Invocation.getClangImporterOptions();
  addArgPair("-module-cache-path", ClangOpts.ModuleCachePath);
  for (auto &ExtraArg : ClangOpts.ExtraArgs)
    addArgPair("-Xcc", ExtraArg);

  if (Invocation.getFrontendOptions().ImportUnderlyingModule)
    addSingleArg("-import-underlying-module");
  addArgPair("-import-objc-header",
             Invocation.getFrontendOptions().ImplicitObjCHeaderPath);

  SmallVector<StringRef, 16> NewArgs;
  for (auto Pair : ArgOffs) {
    NewArgs.push_back(StringRef(Buf.begin()+Pair.first, Pair.second-Pair.first));
  }
  Info.CompilerArgs = NewArgs;

  return Receiver(Info);
}
/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
/// This function finds loads of the same base and different offsets. If the
/// offsets are not far apart (target specific), it add MVT::Glue inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
  SDNode *Chain = 0;
  unsigned NumOps = Node->getNumOperands();
  if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
    Chain = Node->getOperand(NumOps-1).getNode();
  if (!Chain)
    return;

  // Look for other loads of the same chain. Find loads that are loading from
  // the same base pointer and different offsets.
  SmallPtrSet<SDNode*, 16> Visited;
  SmallVector<int64_t, 4> Offsets;
  DenseMap<long long, SDNode*> O2SMap;  // Map from offset to SDNode.
  bool Cluster = false;
  SDNode *Base = Node;
  for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
       I != E; ++I) {
    SDNode *User = *I;
    if (User == Node || !Visited.insert(User))
      continue;
    int64_t Offset1, Offset2;
    if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
        Offset1 == Offset2)
      // FIXME: Should be ok if they addresses are identical. But earlier
      // optimizations really should have eliminated one of the loads.
      continue;
    if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
      Offsets.push_back(Offset1);
    O2SMap.insert(std::make_pair(Offset2, User));
    Offsets.push_back(Offset2);
    if (Offset2 < Offset1)
      Base = User;
    Cluster = true;
  }

  if (!Cluster)
    return;

  // Sort them in increasing order.
  std::sort(Offsets.begin(), Offsets.end());

  // Check if the loads are close enough.
  SmallVector<SDNode*, 4> Loads;
  unsigned NumLoads = 0;
  int64_t BaseOff = Offsets[0];
  SDNode *BaseLoad = O2SMap[BaseOff];
  Loads.push_back(BaseLoad);
  for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
    int64_t Offset = Offsets[i];
    SDNode *Load = O2SMap[Offset];
    if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
      break; // Stop right here. Ignore loads that are further away.
    Loads.push_back(Load);
    ++NumLoads;
  }

  if (NumLoads == 0)
    return;

  // Cluster loads by adding MVT::Glue outputs and inputs. This also
  // ensure they are scheduled in order of increasing addresses.
  SDNode *Lead = Loads[0];
  AddGlue(Lead, SDValue(0, 0), true, DAG);

  SDValue InGlue = SDValue(Lead, Lead->getNumValues() - 1);
  for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
    bool OutGlue = I < E - 1;
    SDNode *Load = Loads[I];

    AddGlue(Load, InGlue, OutGlue, DAG);

    if (OutGlue)
      InGlue = SDValue(Load, Load->getNumValues() - 1);

    ++LoadsClustered;
  }
}
/// EmitSchedule - Emit the machine code in scheduled order.
MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
  InstrEmitter Emitter(BB, InsertPos);
  DenseMap<SDValue, unsigned> VRBaseMap;
  DenseMap<SUnit*, unsigned> CopyVRBaseMap;
  SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
  SmallSet<unsigned, 8> Seen;
  bool HasDbg = DAG->hasDebugValues();

  // If this is the first BB, emit byval parameter dbg_value's.
  if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
    SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
    SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
    for (; PDI != PDE; ++PDI) {
      MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
      if (DbgMI)
        BB->insert(InsertPos, DbgMI);
    }
  }

  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
    SUnit *SU = Sequence[i];
    if (!SU) {
      // Null SUnit* is a noop.
      EmitNoop();
      continue;
    }

    // For pre-regalloc scheduling, create instructions corresponding to the
    // SDNode and any glued SDNodes and append them to the block.
    if (!SU->getNode()) {
      // Emit a copy.
      EmitPhysRegCopy(SU, CopyVRBaseMap);
      continue;
    }

    SmallVector<SDNode *, 4> GluedNodes;
    for (SDNode *N = SU->getNode()->getGluedNode(); N;
         N = N->getGluedNode())
      GluedNodes.push_back(N);
    while (!GluedNodes.empty()) {
      SDNode *N = GluedNodes.back();
      Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned,
                       VRBaseMap);
      // Remember the source order of the inserted instruction.
      if (HasDbg)
        ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
      GluedNodes.pop_back();
    }
    Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
                     VRBaseMap);
    // Remember the source order of the inserted instruction.
    if (HasDbg)
      ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
                        Seen);
  }

  // Insert all the dbg_values which have not already been inserted in source
  // order sequence.
  if (HasDbg) {
    MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();

    // Sort the source order instructions and use the order to insert debug
    // values.
    std::sort(Orders.begin(), Orders.end(), OrderSorter());

    SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
    SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
    // Now emit the rest according to source order.
    unsigned LastOrder = 0;
    for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
      unsigned Order = Orders[i].first;
      MachineInstr *MI = Orders[i].second;
      // Insert all SDDbgValue's whose order(s) are before "Order".
      if (!MI)
        continue;
      for (; DI != DE &&
             (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
        if ((*DI)->isInvalidated())
          continue;
        MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
        if (DbgMI) {
          if (!LastOrder)
            // Insert to start of the BB (after PHIs).
            BB->insert(BBBegin, DbgMI);
          else {
            // Insert at the instruction, which may be in a different
            // block, if the block was split by a custom inserter.
            MachineBasicBlock::iterator Pos = MI;
            MI->getParent()->insert(llvm::next(Pos), DbgMI);
          }
        }
      }
      LastOrder = Order;
    }
    // Add trailing DbgValue's before the terminator. FIXME: May want to add
    // some of them before one or more conditional branches?
    while (DI != DE) {
      MachineBasicBlock *InsertBB = Emitter.getBlock();
      MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
      if (!(*DI)->isInvalidated()) {
        MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
        if (DbgMI)
          InsertBB->insert(Pos, DbgMI);
      }
      ++DI;
    }
  }

  BB = Emitter.getBlock();
  InsertPos = Emitter.getInsertPos();
  return BB;
}
Example #4
0
void
RegisterInfoEmitter::emitComposeSubRegIndices(raw_ostream &OS,
                                              CodeGenRegBank &RegBank,
                                              const std::string &ClName) {
  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
  OS << "unsigned " << ClName
     << "::composeSubRegIndicesImpl(unsigned IdxA, unsigned IdxB) const {\n";

  // Many sub-register indexes are composition-compatible, meaning that
  //
  //   compose(IdxA, IdxB) == compose(IdxA', IdxB)
  //
  // for many IdxA, IdxA' pairs. Not all sub-register indexes can be composed.
  // The illegal entries can be use as wildcards to compress the table further.

  // Map each Sub-register index to a compatible table row.
  SmallVector<unsigned, 4> RowMap;
  SmallVector<SmallVector<CodeGenSubRegIndex*, 4>, 4> Rows;

  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
    unsigned Found = ~0u;
    for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
      if (combine(SubRegIndices[i], Rows[r])) {
        Found = r;
        break;
      }
    }
    if (Found == ~0u) {
      Found = Rows.size();
      Rows.resize(Found + 1);
      Rows.back().resize(SubRegIndices.size());
      combine(SubRegIndices[i], Rows.back());
    }
    RowMap.push_back(Found);
  }

  // Output the row map if there is multiple rows.
  if (Rows.size() > 1) {
    OS << "  static const " << getMinimalTypeForRange(Rows.size())
       << " RowMap[" << SubRegIndices.size() << "] = {\n    ";
    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
      OS << RowMap[i] << ", ";
    OS << "\n  };\n";
  }

  // Output the rows.
  OS << "  static const " << getMinimalTypeForRange(SubRegIndices.size()+1)
     << " Rows[" << Rows.size() << "][" << SubRegIndices.size() << "] = {\n";
  for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
    OS << "    { ";
    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
      if (Rows[r][i])
        OS << Rows[r][i]->EnumValue << ", ";
      else
        OS << "0, ";
    OS << "},\n";
  }
  OS << "  };\n\n";

  OS << "  --IdxA; assert(IdxA < " << SubRegIndices.size() << ");\n"
     << "  --IdxB; assert(IdxB < " << SubRegIndices.size() << ");\n";
  if (Rows.size() > 1)
    OS << "  return Rows[RowMap[IdxA]][IdxB];\n";
  else
    OS << "  return Rows[0][IdxB];\n";
  OS << "}\n\n";
}
void AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(
    MachineFunction &MF, RegScavenger *RS) const {
  const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
      MF.getSubtarget().getRegisterInfo());
  AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
  MachineRegisterInfo *MRI = &MF.getRegInfo();
  SmallVector<unsigned, 4> UnspilledCSGPRs;
  SmallVector<unsigned, 4> UnspilledCSFPRs;

  // The frame record needs to be created by saving the appropriate registers
  if (hasFP(MF)) {
    MRI->setPhysRegUsed(AArch64::FP);
    MRI->setPhysRegUsed(AArch64::LR);
  }

  // Spill the BasePtr if it's used. Do this first thing so that the
  // getCalleeSavedRegs() below will get the right answer.
  if (RegInfo->hasBasePointer(MF))
    MRI->setPhysRegUsed(RegInfo->getBaseRegister());

  // If any callee-saved registers are used, the frame cannot be eliminated.
  unsigned NumGPRSpilled = 0;
  unsigned NumFPRSpilled = 0;
  bool ExtraCSSpill = false;
  bool CanEliminateFrame = true;
  DEBUG(dbgs() << "*** processFunctionBeforeCalleeSavedScan\nUsed CSRs:");
  const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);

  // Check pairs of consecutive callee-saved registers.
  for (unsigned i = 0; CSRegs[i]; i += 2) {
    assert(CSRegs[i + 1] && "Odd number of callee-saved registers!");

    const unsigned OddReg = CSRegs[i];
    const unsigned EvenReg = CSRegs[i + 1];
    assert((AArch64::GPR64RegClass.contains(OddReg) &&
            AArch64::GPR64RegClass.contains(EvenReg)) ^
               (AArch64::FPR64RegClass.contains(OddReg) &&
                AArch64::FPR64RegClass.contains(EvenReg)) &&
           "Register class mismatch!");

    const bool OddRegUsed = MRI->isPhysRegUsed(OddReg);
    const bool EvenRegUsed = MRI->isPhysRegUsed(EvenReg);

    // Early exit if none of the registers in the register pair is actually
    // used.
    if (!OddRegUsed && !EvenRegUsed) {
      if (AArch64::GPR64RegClass.contains(OddReg)) {
        UnspilledCSGPRs.push_back(OddReg);
        UnspilledCSGPRs.push_back(EvenReg);
      } else {
        UnspilledCSFPRs.push_back(OddReg);
        UnspilledCSFPRs.push_back(EvenReg);
      }
      continue;
    }

    unsigned Reg = AArch64::NoRegister;
    // If only one of the registers of the register pair is used, make sure to
    // mark the other one as used as well.
    if (OddRegUsed ^ EvenRegUsed) {
      // Find out which register is the additional spill.
      Reg = OddRegUsed ? EvenReg : OddReg;
      MRI->setPhysRegUsed(Reg);
    }

    DEBUG(dbgs() << ' ' << PrintReg(OddReg, RegInfo));
    DEBUG(dbgs() << ' ' << PrintReg(EvenReg, RegInfo));

    assert(((OddReg == AArch64::LR && EvenReg == AArch64::FP) ||
            (RegInfo->getEncodingValue(OddReg) + 1 ==
             RegInfo->getEncodingValue(EvenReg))) &&
           "Register pair of non-adjacent registers!");
    if (AArch64::GPR64RegClass.contains(OddReg)) {
      NumGPRSpilled += 2;
      // If it's not a reserved register, we can use it in lieu of an
      // emergency spill slot for the register scavenger.
      // FIXME: It would be better to instead keep looking and choose another
      // unspilled register that isn't reserved, if there is one.
      if (Reg != AArch64::NoRegister && !RegInfo->isReservedReg(MF, Reg))
        ExtraCSSpill = true;
    } else
      NumFPRSpilled += 2;

    CanEliminateFrame = false;
  }

  // FIXME: Set BigStack if any stack slot references may be out of range.
  // For now, just conservatively guestimate based on unscaled indexing
  // range. We'll end up allocating an unnecessary spill slot a lot, but
  // realistically that's not a big deal at this stage of the game.
  // The CSR spill slots have not been allocated yet, so estimateStackSize
  // won't include them.
  MachineFrameInfo *MFI = MF.getFrameInfo();
  unsigned CFSize = estimateStackSize(MF) + 8 * (NumGPRSpilled + NumFPRSpilled);
  DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
  bool BigStack = (CFSize >= 256);
  if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
    AFI->setHasStackFrame(true);

  // Estimate if we might need to scavenge a register at some point in order
  // to materialize a stack offset. If so, either spill one additional
  // callee-saved register or reserve a special spill slot to facilitate
  // register scavenging. If we already spilled an extra callee-saved register
  // above to keep the number of spills even, we don't need to do anything else
  // here.
  if (BigStack && !ExtraCSSpill) {

    // If we're adding a register to spill here, we have to add two of them
    // to keep the number of regs to spill even.
    assert(((UnspilledCSGPRs.size() & 1) == 0) && "Odd number of registers!");
    unsigned Count = 0;
    while (!UnspilledCSGPRs.empty() && Count < 2) {
      unsigned Reg = UnspilledCSGPRs.back();
      UnspilledCSGPRs.pop_back();
      DEBUG(dbgs() << "Spilling " << PrintReg(Reg, RegInfo)
                   << " to get a scratch register.\n");
      MRI->setPhysRegUsed(Reg);
      ExtraCSSpill = true;
      ++Count;
    }

    // If we didn't find an extra callee-saved register to spill, create
    // an emergency spill slot.
    if (!ExtraCSSpill) {
      const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
      int FI = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false);
      RS->addScavengingFrameIndex(FI);
      DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
                   << " as the emergency spill slot.\n");
    }
  }
}
Example #6
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::STATEPOINT ||
                       MI->getOpcode() == TargetOpcode::PATCHPOINT ||
                       MI->getOpcode() == TargetOpcode::STACKMAP);

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    assert(MI == Ops[i].first && "Instruction conflict during operand folding");
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (!SpillSubRegs && MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstrSpan MIS(MI);

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;

  // Remove LIS for any dead defs in the original MI not in FoldMI.
  for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
    if (!MO->isReg())
      continue;
    unsigned Reg = MO->getReg();
    if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
        MRI.isReserved(Reg)) {
      continue;
    }
    // Skip non-Defs, including undef uses and internal reads.
    if (MO->isUse())
      continue;
    MIBundleOperands::PhysRegInfo RI =
      MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
    if (RI.Defines)
      continue;
    // FoldMI does not define this physreg. Remove the LI segment.
    assert(MO->isDead() && "Cannot fold physreg def");
    SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
    LIS.removePhysRegDefAt(Reg, Idx);
  }

  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // Insert any new instructions other than FoldMI into the LIS maps.
  assert(!MIS.empty() && "Unexpected empty span of instructions!");
  for (MachineBasicBlock::iterator MII = MIS.begin(), End = MIS.end();
       MII != End; ++MII)
    if (&*MII != FoldMI)
      LIS.InsertMachineInstrInMaps(&*MII);

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
                                           "folded"));

  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}
void ScheduleDAGSDNodes::BuildSchedUnits() {
  // During scheduling, the NodeId field of SDNode is used to map SDNodes
  // to their associated SUnits by holding SUnits table indices. A value
  // of -1 means the SDNode does not yet have an associated SUnit.
  unsigned NumNodes = 0;
  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
       E = DAG->allnodes_end(); NI != E; ++NI) {
    NI->setNodeId(-1);
    ++NumNodes;
  }

  // Reserve entries in the vector for each of the SUnits we are creating.  This
  // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
  // invalidated.
  // FIXME: Multiply by 2 because we may clone nodes during scheduling.
  // This is a temporary workaround.
  SUnits.reserve(NumNodes * 2);
  
  // Add all nodes in depth first order.
  SmallVector<SDNode*, 64> Worklist;
  SmallPtrSet<SDNode*, 64> Visited;
  Worklist.push_back(DAG->getRoot().getNode());
  Visited.insert(DAG->getRoot().getNode());
  
  while (!Worklist.empty()) {
    SDNode *NI = Worklist.pop_back_val();
    
    // Add all operands to the worklist unless they've already been added.
    for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
      if (Visited.insert(NI->getOperand(i).getNode()))
        Worklist.push_back(NI->getOperand(i).getNode());
  
    if (isPassiveNode(NI))  // Leaf node, e.g. a TargetImmediate.
      continue;
    
    // If this node has already been processed, stop now.
    if (NI->getNodeId() != -1) continue;
    
    SUnit *NodeSUnit = NewSUnit(NI);
    
    // See if anything is flagged to this node, if so, add them to flagged
    // nodes.  Nodes can have at most one flag input and one flag output.  Flags
    // are required to be the last operand and result of a node.
    
    // Scan up to find flagged preds.
    SDNode *N = NI;
    while (N->getNumOperands() &&
           N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
      N = N->getOperand(N->getNumOperands()-1).getNode();
      assert(N->getNodeId() == -1 && "Node already inserted!");
      N->setNodeId(NodeSUnit->NodeNum);
    }
    
    // Scan down to find any flagged succs.
    N = NI;
    while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
      SDValue FlagVal(N, N->getNumValues()-1);
      
      // There are either zero or one users of the Flag result.
      bool HasFlagUse = false;
      for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 
           UI != E; ++UI)
        if (FlagVal.isOperandOf(*UI)) {
          HasFlagUse = true;
          assert(N->getNodeId() == -1 && "Node already inserted!");
          N->setNodeId(NodeSUnit->NodeNum);
          N = *UI;
          break;
        }
      if (!HasFlagUse) break;
    }
    
    // If there are flag operands involved, N is now the bottom-most node
    // of the sequence of nodes that are flagged together.
    // Update the SUnit.
    NodeSUnit->setNode(N);
    assert(N->getNodeId() == -1 && "Node already inserted!");
    N->setNodeId(NodeSUnit->NodeNum);

    // Assign the Latency field of NodeSUnit using target-provided information.
    ComputeLatency(NodeSUnit);
  }
}
Example #8
0
/// PromoteAliasSet - Try to promote memory values to scalars by sinking
/// stores out of the loop and moving loads to before the loop.  We do this by
/// looping over the stores in the loop, looking for stores to Must pointers
/// which are loop invariant.
///
void LICM::PromoteAliasSet(AliasSet &AS) {
  // We can promote this alias set if it has a store, if it is a "Must" alias
  // set, if the pointer is loop invariant, and if we are not eliminating any
  // volatile loads or stores.
  if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
      AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue()))
    return;
  
  assert(!AS.empty() &&
         "Must alias set should have at least one pointer element in it!");
  Value *SomePtr = AS.begin()->getValue();

  // It isn't safe to promote a load/store from the loop if the load/store is
  // conditional.  For example, turning:
  //
  //    for () { if (c) *P += 1; }
  //
  // into:
  //
  //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
  //
  // is not safe, because *P may only be valid to access if 'c' is true.
  // 
  // It is safe to promote P if all uses are direct load/stores and if at
  // least one is guaranteed to be executed.
  bool GuaranteedToExecute = false;
  
  SmallVector<Instruction*, 64> LoopUses;
  SmallPtrSet<Value*, 4> PointerMustAliases;

  // Check that all of the pointers in the alias set have the same type.  We
  // cannot (yet) promote a memory location that is loaded and stored in
  // different sizes.
  for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
    Value *ASIV = ASI->getValue();
    PointerMustAliases.insert(ASIV);
    
    // Check that all of the pointers in the alias set have the same type.  We
    // cannot (yet) promote a memory location that is loaded and stored in
    // different sizes.
    if (SomePtr->getType() != ASIV->getType())
      return;
    
    for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end();
         UI != UE; ++UI) {
      // Ignore instructions that are outside the loop.
      Instruction *Use = dyn_cast<Instruction>(*UI);
      if (!Use || !CurLoop->contains(Use))
        continue;
      
      // If there is an non-load/store instruction in the loop, we can't promote
      // it.
      if (isa<LoadInst>(Use))
        assert(!cast<LoadInst>(Use)->isVolatile() && "AST broken");
      else if (isa<StoreInst>(Use)) {
        assert(!cast<StoreInst>(Use)->isVolatile() && "AST broken");
        if (Use->getOperand(0) == ASIV) return;
      } else
        return; // Not a load or store.
      
      if (!GuaranteedToExecute)
        GuaranteedToExecute = isSafeToExecuteUnconditionally(*Use);
      
      LoopUses.push_back(Use);
    }
  }
  
  // If there isn't a guaranteed-to-execute instruction, we can't promote.
  if (!GuaranteedToExecute)
    return;
  
  // Otherwise, this is safe to promote, lets do it!
  DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n');  
  Changed = true;
  ++NumPromoted;

  // We use the SSAUpdater interface to insert phi nodes as required.
  SmallVector<PHINode*, 16> NewPHIs;
  SSAUpdater SSA(&NewPHIs);
  
  // It wants to know some value of the same type as what we'll be inserting.
  Value *SomeValue;
  if (isa<LoadInst>(LoopUses[0]))
    SomeValue = LoopUses[0];
  else
    SomeValue = cast<StoreInst>(LoopUses[0])->getOperand(0);
  SSA.Initialize(SomeValue->getType(), SomeValue->getName());

  // First step: bucket up uses of the pointers by the block they occur in.
  // This is important because we have to handle multiple defs/uses in a block
  // ourselves: SSAUpdater is purely for cross-block references.
  // FIXME: Want a TinyVector<Instruction*> since there is usually 0/1 element.
  DenseMap<BasicBlock*, std::vector<Instruction*> > UsesByBlock;
  for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) {
    Instruction *User = LoopUses[i];
    UsesByBlock[User->getParent()].push_back(User);
  }
  
  // Okay, now we can iterate over all the blocks in the loop with uses,
  // processing them.  Keep track of which loads are loading a live-in value.
  SmallVector<LoadInst*, 32> LiveInLoads;
  DenseMap<Value*, Value*> ReplacedLoads;
  
  for (unsigned LoopUse = 0, e = LoopUses.size(); LoopUse != e; ++LoopUse) {
    Instruction *User = LoopUses[LoopUse];
    std::vector<Instruction*> &BlockUses = UsesByBlock[User->getParent()];
    
    // If this block has already been processed, ignore this repeat use.
    if (BlockUses.empty()) continue;
    
    // Okay, this is the first use in the block.  If this block just has a
    // single user in it, we can rewrite it trivially.
    if (BlockUses.size() == 1) {
      // If it is a store, it is a trivial def of the value in the block.
      if (isa<StoreInst>(User)) {
        SSA.AddAvailableValue(User->getParent(),
                              cast<StoreInst>(User)->getOperand(0));
      } else {
        // Otherwise it is a load, queue it to rewrite as a live-in load.
        LiveInLoads.push_back(cast<LoadInst>(User));
      }
      BlockUses.clear();
      continue;
    }
    
    // Otherwise, check to see if this block is all loads.  If so, we can queue
    // them all as live in loads.
    bool HasStore = false;
    for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) {
      if (isa<StoreInst>(BlockUses[i])) {
        HasStore = true;
        break;
      }
    }
    
    if (!HasStore) {
      for (unsigned i = 0, e = BlockUses.size(); i != e; ++i)
        LiveInLoads.push_back(cast<LoadInst>(BlockUses[i]));
      BlockUses.clear();
      continue;
    }

    // Otherwise, we have mixed loads and stores (or just a bunch of stores).
    // Since SSAUpdater is purely for cross-block values, we need to determine
    // the order of these instructions in the block.  If the first use in the
    // block is a load, then it uses the live in value.  The last store defines
    // the live out value.  We handle this by doing a linear scan of the block.
    BasicBlock *BB = User->getParent();
    Value *StoredValue = 0;
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      if (LoadInst *L = dyn_cast<LoadInst>(II)) {
        // If this is a load from an unrelated pointer, ignore it.
        if (!PointerMustAliases.count(L->getOperand(0))) continue;

        // If we haven't seen a store yet, this is a live in use, otherwise
        // use the stored value.
        if (StoredValue) {
          L->replaceAllUsesWith(StoredValue);
          ReplacedLoads[L] = StoredValue;
        } else {
          LiveInLoads.push_back(L);
        }
        continue;
      }
      
      if (StoreInst *S = dyn_cast<StoreInst>(II)) {
        // If this is a store to an unrelated pointer, ignore it.
        if (!PointerMustAliases.count(S->getOperand(1))) continue;

        // Remember that this is the active value in the block.
        StoredValue = S->getOperand(0);
      }
    }
    
    // The last stored value that happened is the live-out for the block.
    assert(StoredValue && "Already checked that there is a store in block");
    SSA.AddAvailableValue(BB, StoredValue);
    BlockUses.clear();
  }
  
  // Now that all the intra-loop values are classified, set up the preheader.
  // It gets a load of the pointer we're promoting, and it is the live-out value
  // from the preheader.
  LoadInst *PreheaderLoad = new LoadInst(SomePtr,SomePtr->getName()+".promoted",
                                         Preheader->getTerminator());
  SSA.AddAvailableValue(Preheader, PreheaderLoad);

  // Now that the preheader is good to go, set up the exit blocks.  Each exit
  // block gets a store of the live-out values that feed them.  Since we've
  // already told the SSA updater about the defs in the loop and the preheader
  // definition, it is all set and we can start using it.
  SmallVector<BasicBlock*, 8> ExitBlocks;
  CurLoop->getUniqueExitBlocks(ExitBlocks);
  for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
    BasicBlock *ExitBlock = ExitBlocks[i];
    Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
    Instruction *InsertPos = ExitBlock->getFirstNonPHI();
    new StoreInst(LiveInValue, SomePtr, InsertPos);
  }

  // Okay, now we rewrite all loads that use live-in values in the loop,
  // inserting PHI nodes as necessary.
  for (unsigned i = 0, e = LiveInLoads.size(); i != e; ++i) {
    LoadInst *ALoad = LiveInLoads[i];
    Value *NewVal = SSA.GetValueInMiddleOfBlock(ALoad->getParent());
    ALoad->replaceAllUsesWith(NewVal);
    CurAST->copyValue(ALoad, NewVal);
    ReplacedLoads[ALoad] = NewVal;
  }
  
  // If the preheader load is itself a pointer, we need to tell alias analysis
  // about the new pointer we created in the preheader block and about any PHI
  // nodes that just got inserted.
  if (PreheaderLoad->getType()->isPointerTy()) {
    // Copy any value stored to or loaded from a must-alias of the pointer.
    CurAST->copyValue(SomeValue, PreheaderLoad);
    
    for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
      CurAST->copyValue(SomeValue, NewPHIs[i]);
  }
  
  // Now that everything is rewritten, delete the old instructions from the body
  // of the loop.  They should all be dead now.
  for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) {
    Instruction *User = LoopUses[i];
    
    // If this is a load that still has uses, then the load must have been added
    // as a live value in the SSAUpdate data structure for a block (e.g. because
    // the loaded value was stored later).  In this case, we need to recursively
    // propagate the updates until we get to the real value.
    if (!User->use_empty()) {
      Value *NewVal = ReplacedLoads[User];
      assert(NewVal && "not a replaced load?");
      
      // Propagate down to the ultimate replacee.  The intermediately loads
      // could theoretically already have been deleted, so we don't want to
      // dereference the Value*'s.
      DenseMap<Value*, Value*>::iterator RLI = ReplacedLoads.find(NewVal);
      while (RLI != ReplacedLoads.end()) {
        NewVal = RLI->second;
        RLI = ReplacedLoads.find(NewVal);
      }
      
      User->replaceAllUsesWith(NewVal);
      CurAST->copyValue(User, NewVal);
    }
    
    CurAST->deleteValue(User);
    User->eraseFromParent();
  }
  
  // fwew, we're done!
}
static CXCodeCompleteResults *
clang_codeCompleteAt_Impl(CXTranslationUnit TU, const char *complete_filename,
                          unsigned complete_line, unsigned complete_column,
                          ArrayRef<CXUnsavedFile> unsaved_files,
                          unsigned options) {
  bool IncludeBriefComments = options & CXCodeComplete_IncludeBriefComments;
  bool SkipPreamble = options & CXCodeComplete_SkipPreamble;
  bool IncludeFixIts = options & CXCodeComplete_IncludeCompletionsWithFixIts;

#ifdef UDP_CODE_COMPLETION_LOGGER
#ifdef UDP_CODE_COMPLETION_LOGGER_PORT
  const llvm::TimeRecord &StartTime =  llvm::TimeRecord::getCurrentTime();
#endif
#endif
  bool EnableLogging = getenv("LIBCLANG_CODE_COMPLETION_LOGGING") != nullptr;

  if (cxtu::isNotUsableTU(TU)) {
    LOG_BAD_TU(TU);
    return nullptr;
  }

  ASTUnit *AST = cxtu::getASTUnit(TU);
  if (!AST)
    return nullptr;

  CIndexer *CXXIdx = TU->CIdx;
  if (CXXIdx->isOptEnabled(CXGlobalOpt_ThreadBackgroundPriorityForEditing))
    setThreadBackgroundPriority();

  ASTUnit::ConcurrencyCheck Check(*AST);

  // Perform the remapping of source files.
  SmallVector<ASTUnit::RemappedFile, 4> RemappedFiles;

  for (auto &UF : unsaved_files) {
    std::unique_ptr<llvm::MemoryBuffer> MB =
        llvm::MemoryBuffer::getMemBufferCopy(getContents(UF), UF.Filename);
    RemappedFiles.push_back(std::make_pair(UF.Filename, MB.release()));
  }

  if (EnableLogging) {
    // FIXME: Add logging.
  }

  // Parse the resulting source file to find code-completion results.
  AllocatedCXCodeCompleteResults *Results = new AllocatedCXCodeCompleteResults(
      &AST->getFileManager());
  Results->Results = nullptr;
  Results->NumResults = 0;
  
  // Create a code-completion consumer to capture the results.
  CodeCompleteOptions Opts;
  Opts.IncludeBriefComments = IncludeBriefComments;
  Opts.LoadExternal = !SkipPreamble;
  Opts.IncludeFixIts = IncludeFixIts;
  CaptureCompletionResults Capture(Opts, *Results, &TU);

  // Perform completion.
  std::vector<const char *> CArgs;
  for (const auto &Arg : TU->Arguments)
    CArgs.push_back(Arg.c_str());
  std::string CompletionInvocation =
      llvm::formatv("-code-completion-at={0}:{1}:{2}", complete_filename,
                    complete_line, complete_column)
          .str();
  LibclangInvocationReporter InvocationReporter(
      *CXXIdx, LibclangInvocationReporter::OperationKind::CompletionOperation,
      TU->ParsingOptions, CArgs, CompletionInvocation, unsaved_files);
  AST->CodeComplete(complete_filename, complete_line, complete_column,
                    RemappedFiles, (options & CXCodeComplete_IncludeMacros),
                    (options & CXCodeComplete_IncludeCodePatterns),
                    IncludeBriefComments, Capture,
                    CXXIdx->getPCHContainerOperations(), *Results->Diag,
                    Results->LangOpts, *Results->SourceMgr, *Results->FileMgr,
                    Results->Diagnostics, Results->TemporaryBuffers);

  Results->DiagnosticsWrappers.resize(Results->Diagnostics.size());

  // Keep a reference to the allocator used for cached global completions, so
  // that we can be sure that the memory used by our code completion strings
  // doesn't get freed due to subsequent reparses (while the code completion
  // results are still active).
  Results->CachedCompletionAllocator = AST->getCachedCompletionAllocator();

  

#ifdef UDP_CODE_COMPLETION_LOGGER
#ifdef UDP_CODE_COMPLETION_LOGGER_PORT
  const llvm::TimeRecord &EndTime =  llvm::TimeRecord::getCurrentTime();
  SmallString<256> LogResult;
  llvm::raw_svector_ostream os(LogResult);

  // Figure out the language and whether or not it uses PCH.
  const char *lang = 0;
  bool usesPCH = false;

  for (std::vector<const char*>::iterator I = argv.begin(), E = argv.end();
       I != E; ++I) {
    if (*I == 0)
      continue;
    if (strcmp(*I, "-x") == 0) {
      if (I + 1 != E) {
        lang = *(++I);
        continue;
      }
    }
    else if (strcmp(*I, "-include") == 0) {
      if (I+1 != E) {
        const char *arg = *(++I);
        SmallString<512> pchName;
        {
          llvm::raw_svector_ostream os(pchName);
          os << arg << ".pth";
        }
        pchName.push_back('\0');
        struct stat stat_results;
        if (stat(pchName.str().c_str(), &stat_results) == 0)
          usesPCH = true;
        continue;
      }
    }
  }

  os << "{ ";
  os << "\"wall\": " << (EndTime.getWallTime() - StartTime.getWallTime());
  os << ", \"numRes\": " << Results->NumResults;
  os << ", \"diags\": " << Results->Diagnostics.size();
  os << ", \"pch\": " << (usesPCH ? "true" : "false");
  os << ", \"lang\": \"" << (lang ? lang : "<unknown>") << '"';
  const char *name = getlogin();
  os << ", \"user\": \"" << (name ? name : "unknown") << '"';
  os << ", \"clangVer\": \"" << getClangFullVersion() << '"';
  os << " }";

  StringRef res = os.str();
  if (res.size() > 0) {
    do {
      // Setup the UDP socket.
      struct sockaddr_in servaddr;
      bzero(&servaddr, sizeof(servaddr));
      servaddr.sin_family = AF_INET;
      servaddr.sin_port = htons(UDP_CODE_COMPLETION_LOGGER_PORT);
      if (inet_pton(AF_INET, UDP_CODE_COMPLETION_LOGGER,
                    &servaddr.sin_addr) <= 0)
        break;

      int sockfd = socket(AF_INET, SOCK_DGRAM, 0);
      if (sockfd < 0)
        break;

      sendto(sockfd, res.data(), res.size(), 0,
             (struct sockaddr *)&servaddr, sizeof(servaddr));
      close(sockfd);
    }
    while (false);
  }
#endif
#endif
  return Results;
}
bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
  MF = &mf;
  MRI = &mf.getRegInfo();
  TRI = MF->getTarget().getRegisterInfo();

  ReservedRegisters = TRI->getReservedRegs(mf);

  unsigned NumRegs = TRI->getNumRegs();
  PhysRegDef  = new MachineInstr*[NumRegs];
  PhysRegUse  = new MachineInstr*[NumRegs];
  PHIVarInfo = new SmallVector<unsigned, 4>[MF->getNumBlockIDs()];
  std::fill(PhysRegDef,  PhysRegDef  + NumRegs, (MachineInstr*)0);
  std::fill(PhysRegUse,  PhysRegUse  + NumRegs, (MachineInstr*)0);
  PHIJoins.clear();

  /// Get some space for a respectable number of registers.
  VirtRegInfo.resize(64);

  analyzePHINodes(mf);

  // Calculate live variable information in depth first order on the CFG of the
  // function.  This guarantees that we will see the definition of a virtual
  // register before its uses due to dominance properties of SSA (except for PHI
  // nodes, which are treated as a special case).
  MachineBasicBlock *Entry = MF->begin();
  SmallPtrSet<MachineBasicBlock*,16> Visited;

  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
       DFI != E; ++DFI) {
    MachineBasicBlock *MBB = *DFI;

    // Mark live-in registers as live-in.
    SmallVector<unsigned, 4> Defs;
    for (MachineBasicBlock::const_livein_iterator II = MBB->livein_begin(),
           EE = MBB->livein_end(); II != EE; ++II) {
      assert(TargetRegisterInfo::isPhysicalRegister(*II) &&
             "Cannot have a live-in virtual register!");
      HandlePhysRegDef(*II, 0, Defs);
    }

    // Loop over all of the instructions, processing them.
    DistanceMap.clear();
    unsigned Dist = 0;
    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
         I != E; ++I) {
      MachineInstr *MI = I;
      if (MI->isDebugValue())
        continue;
      DistanceMap.insert(std::make_pair(MI, Dist++));

      // Process all of the operands of the instruction...
      unsigned NumOperandsToProcess = MI->getNumOperands();

      // Unless it is a PHI node.  In this case, ONLY process the DEF, not any
      // of the uses.  They will be handled in other basic blocks.
      if (MI->isPHI())
        NumOperandsToProcess = 1;

      SmallVector<unsigned, 4> UseRegs;
      SmallVector<unsigned, 4> DefRegs;
      for (unsigned i = 0; i != NumOperandsToProcess; ++i) {
        const MachineOperand &MO = MI->getOperand(i);
        if (!MO.isReg() || MO.getReg() == 0)
          continue;
        unsigned MOReg = MO.getReg();
        if (MO.isUse())
          UseRegs.push_back(MOReg);
        if (MO.isDef())
          DefRegs.push_back(MOReg);
      }

      // Process all uses.
      for (unsigned i = 0, e = UseRegs.size(); i != e; ++i) {
        unsigned MOReg = UseRegs[i];
        if (TargetRegisterInfo::isVirtualRegister(MOReg))
          HandleVirtRegUse(MOReg, MBB, MI);
        else if (!ReservedRegisters[MOReg])
          HandlePhysRegUse(MOReg, MI);
      }

      // Process all defs.
      for (unsigned i = 0, e = DefRegs.size(); i != e; ++i) {
        unsigned MOReg = DefRegs[i];
        if (TargetRegisterInfo::isVirtualRegister(MOReg))
          HandleVirtRegDef(MOReg, MI);
        else if (!ReservedRegisters[MOReg])
          HandlePhysRegDef(MOReg, MI, Defs);
      }
      UpdatePhysRegDefs(MI, Defs);
    }

    // Handle any virtual assignments from PHI nodes which might be at the
    // bottom of this basic block.  We check all of our successor blocks to see
    // if they have PHI nodes, and if so, we simulate an assignment at the end
    // of the current block.
    if (!PHIVarInfo[MBB->getNumber()].empty()) {
      SmallVector<unsigned, 4>& VarInfoVec = PHIVarInfo[MBB->getNumber()];

      for (SmallVector<unsigned, 4>::iterator I = VarInfoVec.begin(),
             E = VarInfoVec.end(); I != E; ++I)
        // Mark it alive only in the block we are representing.
        MarkVirtRegAliveInBlock(getVarInfo(*I),MRI->getVRegDef(*I)->getParent(),
                                MBB);
    }

    // Finally, if the last instruction in the block is a return, make sure to
    // mark it as using all of the live-out values in the function.
    if (!MBB->empty() && MBB->back().getDesc().isReturn()) {
      MachineInstr *Ret = &MBB->back();

      for (MachineRegisterInfo::liveout_iterator
           I = MF->getRegInfo().liveout_begin(),
           E = MF->getRegInfo().liveout_end(); I != E; ++I) {
        assert(TargetRegisterInfo::isPhysicalRegister(*I) &&
               "Cannot have a live-out virtual register!");
        HandlePhysRegUse(*I, Ret);

        // Add live-out registers as implicit uses.
        if (!Ret->readsRegister(*I))
          Ret->addOperand(MachineOperand::CreateReg(*I, false, true));
      }
    }

    // Loop over PhysRegDef / PhysRegUse, killing any registers that are
    // available at the end of the basic block.
    for (unsigned i = 0; i != NumRegs; ++i)
      if (PhysRegDef[i] || PhysRegUse[i])
        HandlePhysRegDef(i, 0, Defs);

    std::fill(PhysRegDef,  PhysRegDef  + NumRegs, (MachineInstr*)0);
    std::fill(PhysRegUse,  PhysRegUse  + NumRegs, (MachineInstr*)0);
  }

  // Convert and transfer the dead / killed information we have gathered into
  // VirtRegInfo onto MI's.
  for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i)
    for (unsigned j = 0, e2 = VirtRegInfo[i].Kills.size(); j != e2; ++j)
      if (VirtRegInfo[i].Kills[j] ==
          MRI->getVRegDef(i + TargetRegisterInfo::FirstVirtualRegister))
        VirtRegInfo[i]
          .Kills[j]->addRegisterDead(i +
                                     TargetRegisterInfo::FirstVirtualRegister,
                                     TRI);
      else
        VirtRegInfo[i]
          .Kills[j]->addRegisterKilled(i +
                                       TargetRegisterInfo::FirstVirtualRegister,
                                       TRI);

  // Check to make sure there are no unreachable blocks in the MC CFG for the
  // function.  If so, it is due to a bug in the instruction selector or some
  // other part of the code generator if this happens.
#ifndef NDEBUG
  for(MachineFunction::iterator i = MF->begin(), e = MF->end(); i != e; ++i)
    assert(Visited.count(&*i) != 0 && "unreachable basic block found");
#endif

  delete[] PhysRegDef;
  delete[] PhysRegUse;
  delete[] PHIVarInfo;

  return false;
}
Example #11
0
static bool mergeConstants(Module &M) {
  // Find all the globals that are marked "used".  These cannot be merged.
  SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
  FindUsedValues(M.getGlobalVariable("llvm.used"), UsedGlobals);
  FindUsedValues(M.getGlobalVariable("llvm.compiler.used"), UsedGlobals);

  // Map unique constants to globals.
  DenseMap<Constant *, GlobalVariable *> CMap;

  // Replacements - This vector contains a list of replacements to perform.
  SmallVector<std::pair<GlobalVariable*, GlobalVariable*>, 32> Replacements;

  bool MadeChange = false;

  // Iterate constant merging while we are still making progress.  Merging two
  // constants together may allow us to merge other constants together if the
  // second level constants have initializers which point to the globals that
  // were just merged.
  while (true) {
    // First: Find the canonical constants others will be merged with.
    for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
         GVI != E; ) {
      GlobalVariable *GV = &*GVI++;

      // If this GV is dead, remove it.
      GV->removeDeadConstantUsers();
      if (GV->use_empty() && GV->hasLocalLinkage()) {
        GV->eraseFromParent();
        continue;
      }

      // Only process constants with initializers in the default address space.
      if (!GV->isConstant() || !GV->hasDefinitiveInitializer() ||
          GV->getType()->getAddressSpace() != 0 || GV->hasSection() ||
          // Don't touch values marked with attribute(used).
          UsedGlobals.count(GV))
        continue;

      // This transformation is legal for weak ODR globals in the sense it
      // doesn't change semantics, but we really don't want to perform it
      // anyway; it's likely to pessimize code generation, and some tools
      // (like the Darwin linker in cases involving CFString) don't expect it.
      if (GV->isWeakForLinker())
        continue;

      // Don't touch globals with metadata other then !dbg.
      if (hasMetadataOtherThanDebugLoc(GV))
        continue;

      Constant *Init = GV->getInitializer();

      // Check to see if the initializer is already known.
      GlobalVariable *&Slot = CMap[Init];

      // If this is the first constant we find or if the old one is local,
      // replace with the current one. If the current is externally visible
      // it cannot be replace, but can be the canonical constant we merge with.
      if (!Slot || IsBetterCanonical(*GV, *Slot))
        Slot = GV;
    }

    // Second: identify all globals that can be merged together, filling in
    // the Replacements vector.  We cannot do the replacement in this pass
    // because doing so may cause initializers of other globals to be rewritten,
    // invalidating the Constant* pointers in CMap.
    for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
         GVI != E; ) {
      GlobalVariable *GV = &*GVI++;

      // Only process constants with initializers in the default address space.
      if (!GV->isConstant() || !GV->hasDefinitiveInitializer() ||
          GV->getType()->getAddressSpace() != 0 || GV->hasSection() ||
          // Don't touch values marked with attribute(used).
          UsedGlobals.count(GV))
        continue;

      // We can only replace constant with local linkage.
      if (!GV->hasLocalLinkage())
        continue;

      Constant *Init = GV->getInitializer();

      // Check to see if the initializer is already known.
      GlobalVariable *Slot = CMap[Init];

      if (!Slot || Slot == GV)
        continue;

      if (!Slot->hasGlobalUnnamedAddr() && !GV->hasGlobalUnnamedAddr())
        continue;

      if (hasMetadataOtherThanDebugLoc(GV))
        continue;

      if (!GV->hasGlobalUnnamedAddr())
        Slot->setUnnamedAddr(GlobalValue::UnnamedAddr::None);

      // Make all uses of the duplicate constant use the canonical version.
      Replacements.push_back(std::make_pair(GV, Slot));
    }

    if (Replacements.empty())
      return MadeChange;
    CMap.clear();

    // Now that we have figured out which replacements must be made, do them all
    // now.  This avoid invalidating the pointers in CMap, which are unneeded
    // now.
    for (unsigned i = 0, e = Replacements.size(); i != e; ++i) {
      // Bump the alignment if necessary.
      if (Replacements[i].first->getAlignment() ||
          Replacements[i].second->getAlignment()) {
        Replacements[i].second->setAlignment(
            std::max(getAlignment(Replacements[i].first),
                     getAlignment(Replacements[i].second)));
      }

      copyDebugLocMetadata(Replacements[i].first, Replacements[i].second);

      // Eliminate any uses of the dead global.
      Replacements[i].first->replaceAllUsesWith(Replacements[i].second);

      // Delete the global value from the module.
      assert(Replacements[i].first->hasLocalLinkage() &&
             "Refusing to delete an externally visible global variable.");
      Replacements[i].first->eraseFromParent();
    }

    NumMerged += Replacements.size();
    Replacements.clear();
  }
}
Example #12
0
/// Given an instruction in the loop, check to see if it has any uses that are
/// outside the current loop.  If so, insert LCSSA PHI nodes and rewrite the
/// uses.
static bool processInstruction(Loop &L, Instruction &Inst, DominatorTree &DT,
                               const SmallVectorImpl<BasicBlock *> &ExitBlocks,
                               PredIteratorCache &PredCache, LoopInfo *LI) {
  SmallVector<Use *, 16> UsesToRewrite;

  // Tokens cannot be used in PHI nodes, so we skip over them.
  // We can run into tokens which are live out of a loop with catchswitch
  // instructions in Windows EH if the catchswitch has one catchpad which
  // is inside the loop and another which is not.
  if (Inst.getType()->isTokenTy())
    return false;

  BasicBlock *InstBB = Inst.getParent();

  for (Use &U : Inst.uses()) {
    Instruction *User = cast<Instruction>(U.getUser());
    BasicBlock *UserBB = User->getParent();
    if (PHINode *PN = dyn_cast<PHINode>(User))
      UserBB = PN->getIncomingBlock(U);

    if (InstBB != UserBB && !L.contains(UserBB))
      UsesToRewrite.push_back(&U);
  }

  // If there are no uses outside the loop, exit with no change.
  if (UsesToRewrite.empty())
    return false;

  ++NumLCSSA; // We are applying the transformation

  // Invoke instructions are special in that their result value is not available
  // along their unwind edge. The code below tests to see whether DomBB
  // dominates the value, so adjust DomBB to the normal destination block,
  // which is effectively where the value is first usable.
  BasicBlock *DomBB = Inst.getParent();
  if (InvokeInst *Inv = dyn_cast<InvokeInst>(&Inst))
    DomBB = Inv->getNormalDest();

  DomTreeNode *DomNode = DT.getNode(DomBB);

  SmallVector<PHINode *, 16> AddedPHIs;
  SmallVector<PHINode *, 8> PostProcessPHIs;

  SSAUpdater SSAUpdate;
  SSAUpdate.Initialize(Inst.getType(), Inst.getName());

  // Insert the LCSSA phi's into all of the exit blocks dominated by the
  // value, and add them to the Phi's map.
  for (BasicBlock *ExitBB : ExitBlocks) {
    if (!DT.dominates(DomNode, DT.getNode(ExitBB)))
      continue;

    // If we already inserted something for this BB, don't reprocess it.
    if (SSAUpdate.HasValueForBlock(ExitBB))
      continue;

    PHINode *PN = PHINode::Create(Inst.getType(), PredCache.size(ExitBB),
                                  Inst.getName() + ".lcssa", &ExitBB->front());

    // Add inputs from inside the loop for this PHI.
    for (BasicBlock *Pred : PredCache.get(ExitBB)) {
      PN->addIncoming(&Inst, Pred);

      // If the exit block has a predecessor not within the loop, arrange for
      // the incoming value use corresponding to that predecessor to be
      // rewritten in terms of a different LCSSA PHI.
      if (!L.contains(Pred))
        UsesToRewrite.push_back(
            &PN->getOperandUse(PN->getOperandNumForIncomingValue(
                 PN->getNumIncomingValues() - 1)));
    }

    AddedPHIs.push_back(PN);

    // Remember that this phi makes the value alive in this block.
    SSAUpdate.AddAvailableValue(ExitBB, PN);

    // LoopSimplify might fail to simplify some loops (e.g. when indirect
    // branches are involved). In such situations, it might happen that an exit
    // for Loop L1 is the header of a disjoint Loop L2. Thus, when we create
    // PHIs in such an exit block, we are also inserting PHIs into L2's header.
    // This could break LCSSA form for L2 because these inserted PHIs can also
    // have uses outside of L2. Remember all PHIs in such situation as to
    // revisit than later on. FIXME: Remove this if indirectbr support into
    // LoopSimplify gets improved.
    if (auto *OtherLoop = LI->getLoopFor(ExitBB))
      if (!L.contains(OtherLoop))
        PostProcessPHIs.push_back(PN);
  }

  // Rewrite all uses outside the loop in terms of the new PHIs we just
  // inserted.
  for (Use *UseToRewrite : UsesToRewrite) {
    // If this use is in an exit block, rewrite to use the newly inserted PHI.
    // This is required for correctness because SSAUpdate doesn't handle uses in
    // the same block.  It assumes the PHI we inserted is at the end of the
    // block.
    Instruction *User = cast<Instruction>(UseToRewrite->getUser());
    BasicBlock *UserBB = User->getParent();
    if (PHINode *PN = dyn_cast<PHINode>(User))
      UserBB = PN->getIncomingBlock(*UseToRewrite);

    if (isa<PHINode>(UserBB->begin()) && isExitBlock(UserBB, ExitBlocks)) {
      // Tell the VHs that the uses changed. This updates SCEV's caches.
      if (UseToRewrite->get()->hasValueHandle())
        ValueHandleBase::ValueIsRAUWd(*UseToRewrite, &UserBB->front());
      UseToRewrite->set(&UserBB->front());
      continue;
    }

    // Otherwise, do full PHI insertion.
    SSAUpdate.RewriteUse(*UseToRewrite);
  }

  // Post process PHI instructions that were inserted into another disjoint loop
  // and update their exits properly.
  for (auto *I : PostProcessPHIs) {
    if (I->use_empty())
      continue;

    BasicBlock *PHIBB = I->getParent();
    Loop *OtherLoop = LI->getLoopFor(PHIBB);
    SmallVector<BasicBlock *, 8> EBs;
    OtherLoop->getExitBlocks(EBs);
    if (EBs.empty())
      continue;

    // Recurse and re-process each PHI instruction. FIXME: we should really
    // convert this entire thing to a worklist approach where we process a
    // vector of instructions...
    processInstruction(*OtherLoop, *I, DT, EBs, PredCache, LI);
  }

  // Remove PHI nodes that did not have any uses rewritten.
  for (PHINode *PN : AddedPHIs)
    if (PN->use_empty())
      PN->eraseFromParent();

  return true;
}
Example #13
0
void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) {
  const Function *F = MF->getFunction();
  auto &OS = *Asm->OutStreamer;
  const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo();

  StringRef FuncLinkageName = GlobalValue::getRealLinkageName(F->getName());

  SmallVector<std::pair<const MCExpr *, int>, 4> IPToStateTable;
  MCSymbol *FuncInfoXData = nullptr;
  if (shouldEmitPersonality) {
    // If we're 64-bit, emit a pointer to the C++ EH data, and build a map from
    // IPs to state numbers.
    FuncInfoXData =
        Asm->OutContext.getOrCreateSymbol(Twine("$cppxdata$", FuncLinkageName));
    computeIP2StateTable(MF, FuncInfo, IPToStateTable);
  } else {
    FuncInfoXData = Asm->OutContext.getOrCreateLSDASymbol(FuncLinkageName);
  }

  int UnwindHelpOffset = 0;
  if (Asm->MAI->usesWindowsCFI())
    UnwindHelpOffset =
        getFrameIndexOffset(FuncInfo.UnwindHelpFrameIdx, FuncInfo);

  MCSymbol *UnwindMapXData = nullptr;
  MCSymbol *TryBlockMapXData = nullptr;
  MCSymbol *IPToStateXData = nullptr;
  if (!FuncInfo.CxxUnwindMap.empty())
    UnwindMapXData = Asm->OutContext.getOrCreateSymbol(
        Twine("$stateUnwindMap$", FuncLinkageName));
  if (!FuncInfo.TryBlockMap.empty())
    TryBlockMapXData =
        Asm->OutContext.getOrCreateSymbol(Twine("$tryMap$", FuncLinkageName));
  if (!IPToStateTable.empty())
    IPToStateXData =
        Asm->OutContext.getOrCreateSymbol(Twine("$ip2state$", FuncLinkageName));

  bool VerboseAsm = OS.isVerboseAsm();
  auto AddComment = [&](const Twine &Comment) {
    if (VerboseAsm)
      OS.AddComment(Comment);
  };

  // FuncInfo {
  //   uint32_t           MagicNumber
  //   int32_t            MaxState;
  //   UnwindMapEntry    *UnwindMap;
  //   uint32_t           NumTryBlocks;
  //   TryBlockMapEntry  *TryBlockMap;
  //   uint32_t           IPMapEntries; // always 0 for x86
  //   IPToStateMapEntry *IPToStateMap; // always 0 for x86
  //   uint32_t           UnwindHelp;   // non-x86 only
  //   ESTypeList        *ESTypeList;
  //   int32_t            EHFlags;
  // }
  // EHFlags & 1 -> Synchronous exceptions only, no async exceptions.
  // EHFlags & 2 -> ???
  // EHFlags & 4 -> The function is noexcept(true), unwinding can't continue.
  OS.EmitValueToAlignment(4);
  OS.EmitLabel(FuncInfoXData);

  AddComment("MagicNumber");
  OS.EmitIntValue(0x19930522, 4);

  AddComment("MaxState");
  OS.EmitIntValue(FuncInfo.CxxUnwindMap.size(), 4);

  AddComment("UnwindMap");
  OS.EmitValue(create32bitRef(UnwindMapXData), 4);

  AddComment("NumTryBlocks");
  OS.EmitIntValue(FuncInfo.TryBlockMap.size(), 4);

  AddComment("TryBlockMap");
  OS.EmitValue(create32bitRef(TryBlockMapXData), 4);

  AddComment("IPMapEntries");
  OS.EmitIntValue(IPToStateTable.size(), 4);

  AddComment("IPToStateXData");
  OS.EmitValue(create32bitRef(IPToStateXData), 4);

  if (Asm->MAI->usesWindowsCFI()) {
    AddComment("UnwindHelp");
    OS.EmitIntValue(UnwindHelpOffset, 4);
  }

  AddComment("ESTypeList");
  OS.EmitIntValue(0, 4);

  AddComment("EHFlags");
  OS.EmitIntValue(1, 4);

  // UnwindMapEntry {
  //   int32_t ToState;
  //   void  (*Action)();
  // };
  if (UnwindMapXData) {
    OS.EmitLabel(UnwindMapXData);
    for (const CxxUnwindMapEntry &UME : FuncInfo.CxxUnwindMap) {
      MCSymbol *CleanupSym =
          getMCSymbolForMBB(Asm, UME.Cleanup.dyn_cast<MachineBasicBlock *>());
      AddComment("ToState");
      OS.EmitIntValue(UME.ToState, 4);

      AddComment("Action");
      OS.EmitValue(create32bitRef(CleanupSym), 4);
    }
  }

  // TryBlockMap {
  //   int32_t      TryLow;
  //   int32_t      TryHigh;
  //   int32_t      CatchHigh;
  //   int32_t      NumCatches;
  //   HandlerType *HandlerArray;
  // };
  if (TryBlockMapXData) {
    OS.EmitLabel(TryBlockMapXData);
    SmallVector<MCSymbol *, 1> HandlerMaps;
    for (size_t I = 0, E = FuncInfo.TryBlockMap.size(); I != E; ++I) {
      const WinEHTryBlockMapEntry &TBME = FuncInfo.TryBlockMap[I];

      MCSymbol *HandlerMapXData = nullptr;
      if (!TBME.HandlerArray.empty())
        HandlerMapXData =
            Asm->OutContext.getOrCreateSymbol(Twine("$handlerMap$")
                                                  .concat(Twine(I))
                                                  .concat("$")
                                                  .concat(FuncLinkageName));
      HandlerMaps.push_back(HandlerMapXData);

      // TBMEs should form intervals.
      assert(0 <= TBME.TryLow && "bad trymap interval");
      assert(TBME.TryLow <= TBME.TryHigh && "bad trymap interval");
      assert(TBME.TryHigh < TBME.CatchHigh && "bad trymap interval");
      assert(TBME.CatchHigh < int(FuncInfo.CxxUnwindMap.size()) &&
             "bad trymap interval");

      AddComment("TryLow");
      OS.EmitIntValue(TBME.TryLow, 4);

      AddComment("TryHigh");
      OS.EmitIntValue(TBME.TryHigh, 4);

      AddComment("CatchHigh");
      OS.EmitIntValue(TBME.CatchHigh, 4);

      AddComment("NumCatches");
      OS.EmitIntValue(TBME.HandlerArray.size(), 4);

      AddComment("HandlerArray");
      OS.EmitValue(create32bitRef(HandlerMapXData), 4);
    }

    // All funclets use the same parent frame offset currently.
    unsigned ParentFrameOffset = 0;
    if (shouldEmitPersonality) {
      const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
      ParentFrameOffset = TFI->getWinEHParentFrameOffset(*MF);
    }

    for (size_t I = 0, E = FuncInfo.TryBlockMap.size(); I != E; ++I) {
      const WinEHTryBlockMapEntry &TBME = FuncInfo.TryBlockMap[I];
      MCSymbol *HandlerMapXData = HandlerMaps[I];
      if (!HandlerMapXData)
        continue;
      // HandlerType {
      //   int32_t         Adjectives;
      //   TypeDescriptor *Type;
      //   int32_t         CatchObjOffset;
      //   void          (*Handler)();
      //   int32_t         ParentFrameOffset; // x64 only
      // };
      OS.EmitLabel(HandlerMapXData);
      for (const WinEHHandlerType &HT : TBME.HandlerArray) {
        // Get the frame escape label with the offset of the catch object. If
        // the index is INT_MAX, then there is no catch object, and we should
        // emit an offset of zero, indicating that no copy will occur.
        const MCExpr *FrameAllocOffsetRef = nullptr;
        if (HT.CatchObj.FrameIndex != INT_MAX) {
          int Offset = getFrameIndexOffset(HT.CatchObj.FrameIndex, FuncInfo);
          FrameAllocOffsetRef = MCConstantExpr::create(Offset, Asm->OutContext);
        } else {
          FrameAllocOffsetRef = MCConstantExpr::create(0, Asm->OutContext);
        }

        MCSymbol *HandlerSym =
            getMCSymbolForMBB(Asm, HT.Handler.dyn_cast<MachineBasicBlock *>());

        AddComment("Adjectives");
        OS.EmitIntValue(HT.Adjectives, 4);

        AddComment("Type");
        OS.EmitValue(create32bitRef(HT.TypeDescriptor), 4);

        AddComment("CatchObjOffset");
        OS.EmitValue(FrameAllocOffsetRef, 4);

        AddComment("Handler");
        OS.EmitValue(create32bitRef(HandlerSym), 4);

        if (shouldEmitPersonality) {
          AddComment("ParentFrameOffset");
          OS.EmitIntValue(ParentFrameOffset, 4);
        }
      }
    }
  }

  // IPToStateMapEntry {
  //   void   *IP;
  //   int32_t State;
  // };
  if (IPToStateXData) {
    OS.EmitLabel(IPToStateXData);
    for (auto &IPStatePair : IPToStateTable) {
      AddComment("IP");
      OS.EmitValue(IPStatePair.first, 4);
      AddComment("ToState");
      OS.EmitIntValue(IPStatePair.second, 4);
    }
  }
}
Example #14
0
void WinException::emitCLRExceptionTable(const MachineFunction *MF) {
  // CLR EH "states" are really just IDs that identify handlers/funclets;
  // states, handlers, and funclets all have 1:1 mappings between them, and a
  // handler/funclet's "state" is its index in the ClrEHUnwindMap.
  MCStreamer &OS = *Asm->OutStreamer;
  const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo();
  MCSymbol *FuncBeginSym = Asm->getFunctionBegin();
  MCSymbol *FuncEndSym = Asm->getFunctionEnd();

  // A ClrClause describes a protected region.
  struct ClrClause {
    const MCSymbol *StartLabel; // Start of protected region
    const MCSymbol *EndLabel;   // End of protected region
    int State;          // Index of handler protecting the protected region
    int EnclosingState; // Index of funclet enclosing the protected region
  };
  SmallVector<ClrClause, 8> Clauses;

  // Build a map from handler MBBs to their corresponding states (i.e. their
  // indices in the ClrEHUnwindMap).
  int NumStates = FuncInfo.ClrEHUnwindMap.size();
  assert(NumStates > 0 && "Don't need exception table!");
  DenseMap<const MachineBasicBlock *, int> HandlerStates;
  for (int State = 0; State < NumStates; ++State) {
    MachineBasicBlock *HandlerBlock =
        FuncInfo.ClrEHUnwindMap[State].Handler.get<MachineBasicBlock *>();
    HandlerStates[HandlerBlock] = State;
    // Use this loop through all handlers to verify our assumption (used in
    // the MinEnclosingState computation) that enclosing funclets have lower
    // state numbers than their enclosed funclets.
    assert(FuncInfo.ClrEHUnwindMap[State].HandlerParentState < State &&
           "ill-formed state numbering");
  }
  // Map the main function to the NullState.
  HandlerStates[&MF->front()] = NullState;

  // Write out a sentinel indicating the end of the standard (Windows) xdata
  // and the start of the additional (CLR) info.
  OS.EmitIntValue(0xffffffff, 4);
  // Write out the number of funclets
  OS.EmitIntValue(NumStates, 4);

  // Walk the machine blocks/instrs, computing and emitting a few things:
  // 1. Emit a list of the offsets to each handler entry, in lexical order.
  // 2. Compute a map (EndSymbolMap) from each funclet to the symbol at its end.
  // 3. Compute the list of ClrClauses, in the required order (inner before
  //    outer, earlier before later; the order by which a forward scan with
  //    early termination will find the innermost enclosing clause covering
  //    a given address).
  // 4. A map (MinClauseMap) from each handler index to the index of the
  //    outermost funclet/function which contains a try clause targeting the
  //    key handler.  This will be used to determine IsDuplicate-ness when
  //    emitting ClrClauses.  The NullState value is used to indicate that the
  //    top-level function contains a try clause targeting the key handler.
  // HandlerStack is a stack of (PendingStartLabel, PendingState) pairs for
  // try regions we entered before entering the PendingState try but which
  // we haven't yet exited.
  SmallVector<std::pair<const MCSymbol *, int>, 4> HandlerStack;
  // EndSymbolMap and MinClauseMap are maps described above.
  std::unique_ptr<MCSymbol *[]> EndSymbolMap(new MCSymbol *[NumStates]);
  SmallVector<int, 4> MinClauseMap((size_t)NumStates, NumStates);

  // Visit the root function and each funclet.
  for (MachineFunction::const_iterator FuncletStart = MF->begin(),
                                       FuncletEnd = MF->begin(),
                                       End = MF->end();
       FuncletStart != End; FuncletStart = FuncletEnd) {
    int FuncletState = HandlerStates[&*FuncletStart];
    // Find the end of the funclet
    MCSymbol *EndSymbol = FuncEndSym;
    while (++FuncletEnd != End) {
      if (FuncletEnd->isEHFuncletEntry()) {
        EndSymbol = getMCSymbolForMBB(Asm, &*FuncletEnd);
        break;
      }
    }
    // Emit the function/funclet end and, if this is a funclet (and not the
    // root function), record it in the EndSymbolMap.
    OS.EmitValue(getOffset(EndSymbol, FuncBeginSym), 4);
    if (FuncletState != NullState) {
      // Record the end of the handler.
      EndSymbolMap[FuncletState] = EndSymbol;
    }

    // Walk the state changes in this function/funclet and compute its clauses.
    // Funclets always start in the null state.
    const MCSymbol *CurrentStartLabel = nullptr;
    int CurrentState = NullState;
    assert(HandlerStack.empty());
    for (const auto &StateChange :
         InvokeStateChangeIterator::range(FuncInfo, FuncletStart, FuncletEnd)) {
      // Close any try regions we're not still under
      int StillPendingState =
          getTryAncestor(FuncInfo, CurrentState, StateChange.NewState);
      while (CurrentState != StillPendingState) {
        assert(CurrentState != NullState &&
               "Failed to find still-pending state!");
        // Close the pending clause
        Clauses.push_back({CurrentStartLabel, StateChange.PreviousEndLabel,
                           CurrentState, FuncletState});
        // Now the next-outer try region is current
        CurrentState = FuncInfo.ClrEHUnwindMap[CurrentState].TryParentState;
        // Pop the new start label from the handler stack if we've exited all
        // inner try regions of the corresponding try region.
        if (HandlerStack.back().second == CurrentState)
          CurrentStartLabel = HandlerStack.pop_back_val().first;
      }

      if (StateChange.NewState != CurrentState) {
        // For each clause we're starting, update the MinClauseMap so we can
        // know which is the topmost funclet containing a clause targeting
        // it.
        for (int EnteredState = StateChange.NewState;
             EnteredState != CurrentState;
             EnteredState =
                 FuncInfo.ClrEHUnwindMap[EnteredState].TryParentState) {
          int &MinEnclosingState = MinClauseMap[EnteredState];
          if (FuncletState < MinEnclosingState)
            MinEnclosingState = FuncletState;
        }
        // Save the previous current start/label on the stack and update to
        // the newly-current start/state.
        HandlerStack.emplace_back(CurrentStartLabel, CurrentState);
        CurrentStartLabel = StateChange.NewStartLabel;
        CurrentState = StateChange.NewState;
      }
    }
    assert(HandlerStack.empty());
  }

  // Now emit the clause info, starting with the number of clauses.
  OS.EmitIntValue(Clauses.size(), 4);
  for (ClrClause &Clause : Clauses) {
    // Emit a CORINFO_EH_CLAUSE :
    /*
      struct CORINFO_EH_CLAUSE
      {
          CORINFO_EH_CLAUSE_FLAGS Flags;         // actually a CorExceptionFlag
          DWORD                   TryOffset;
          DWORD                   TryLength;     // actually TryEndOffset
          DWORD                   HandlerOffset;
          DWORD                   HandlerLength; // actually HandlerEndOffset
          union
          {
              DWORD               ClassToken;   // use for catch clauses
              DWORD               FilterOffset; // use for filter clauses
          };
      };

      enum CORINFO_EH_CLAUSE_FLAGS
      {
          CORINFO_EH_CLAUSE_NONE    = 0,
          CORINFO_EH_CLAUSE_FILTER  = 0x0001, // This clause is for a filter
          CORINFO_EH_CLAUSE_FINALLY = 0x0002, // This clause is a finally clause
          CORINFO_EH_CLAUSE_FAULT   = 0x0004, // This clause is a fault clause
      };
      typedef enum CorExceptionFlag
      {
          COR_ILEXCEPTION_CLAUSE_NONE,
          COR_ILEXCEPTION_CLAUSE_FILTER  = 0x0001, // This is a filter clause
          COR_ILEXCEPTION_CLAUSE_FINALLY = 0x0002, // This is a finally clause
          COR_ILEXCEPTION_CLAUSE_FAULT = 0x0004,   // This is a fault clause
          COR_ILEXCEPTION_CLAUSE_DUPLICATED = 0x0008, // duplicated clause. This
                                                      // clause was duplicated
                                                      // to a funclet which was
                                                      // pulled out of line
      } CorExceptionFlag;
    */
    // Add 1 to the start/end of the EH clause; the IP associated with a
    // call when the runtime does its scan is the IP of the next instruction
    // (the one to which control will return after the call), so we need
    // to add 1 to the end of the clause to cover that offset.  We also add
    // 1 to the start of the clause to make sure that the ranges reported
    // for all clauses are disjoint.  Note that we'll need some additional
    // logic when machine traps are supported, since in that case the IP
    // that the runtime uses is the offset of the faulting instruction
    // itself; if such an instruction immediately follows a call but the
    // two belong to different clauses, we'll need to insert a nop between
    // them so the runtime can distinguish the point to which the call will
    // return from the point at which the fault occurs.

    const MCExpr *ClauseBegin =
        getOffsetPlusOne(Clause.StartLabel, FuncBeginSym);
    const MCExpr *ClauseEnd = getOffsetPlusOne(Clause.EndLabel, FuncBeginSym);

    const ClrEHUnwindMapEntry &Entry = FuncInfo.ClrEHUnwindMap[Clause.State];
    MachineBasicBlock *HandlerBlock = Entry.Handler.get<MachineBasicBlock *>();
    MCSymbol *BeginSym = getMCSymbolForMBB(Asm, HandlerBlock);
    const MCExpr *HandlerBegin = getOffset(BeginSym, FuncBeginSym);
    MCSymbol *EndSym = EndSymbolMap[Clause.State];
    const MCExpr *HandlerEnd = getOffset(EndSym, FuncBeginSym);

    uint32_t Flags = 0;
    switch (Entry.HandlerType) {
    case ClrHandlerType::Catch:
      // Leaving bits 0-2 clear indicates catch.
      break;
    case ClrHandlerType::Filter:
      Flags |= 1;
      break;
    case ClrHandlerType::Finally:
      Flags |= 2;
      break;
    case ClrHandlerType::Fault:
      Flags |= 4;
      break;
    }
    if (Clause.EnclosingState != MinClauseMap[Clause.State]) {
      // This is a "duplicate" clause; the handler needs to be entered from a
      // frame above the one holding the invoke.
      assert(Clause.EnclosingState > MinClauseMap[Clause.State]);
      Flags |= 8;
    }
    OS.EmitIntValue(Flags, 4);

    // Write the clause start/end
    OS.EmitValue(ClauseBegin, 4);
    OS.EmitValue(ClauseEnd, 4);

    // Write out the handler start/end
    OS.EmitValue(HandlerBegin, 4);
    OS.EmitValue(HandlerEnd, 4);

    // Write out the type token or filter offset
    assert(Entry.HandlerType != ClrHandlerType::Filter && "NYI: filters");
    OS.EmitIntValue(Entry.TypeToken, 4);
  }
}
Example #15
0
Module* makeLLVMModule() {
 // Module Construction
 Module* mod = new Module("back.ll", getGlobalContext());
 mod->setDataLayout("0x1076710");
 mod->setTargetTriple("x86_64-unknown-linux-gnu");

 // Type Definitions
 ArrayType* ArrayTy_0 = ArrayType::get(IntegerType::get(mod->getContext(), 8), 17);

 PointerType* PointerTy_1 = PointerType::get(ArrayTy_0, 0);

 std::vector<Type*>FuncTy_2_args;
 PointerType* PointerTy_3 = PointerType::get(IntegerType::get(mod->getContext(), 8), 0);

 FuncTy_2_args.push_back(PointerTy_3);
 FunctionType* FuncTy_2 = FunctionType::get(
  /*Result=*/Type::getVoidTy(mod->getContext()),
  /*Params=*/FuncTy_2_args,
  /*isVarArg=*/false);

 PointerType* PointerTy_4 = PointerType::get(PointerTy_3, 0);

 std::vector<Type*>FuncTy_5_args;
 FunctionType* FuncTy_5 = FunctionType::get(
  /*Result=*/IntegerType::get(mod->getContext(), 32),
  /*Params=*/FuncTy_5_args,
  /*isVarArg=*/false);

 PointerType* PointerTy_6 = PointerType::get(FuncTy_2, 0);


 // Function Declarations

 Function* func__Z15DoStuffWithCStrPKc = mod->getFunction("_Z15DoStuffWithCStrPKc");
 if (!func__Z15DoStuffWithCStrPKc) {
 func__Z15DoStuffWithCStrPKc = Function::Create(
  /*Type=*/FuncTy_2,
  /*Linkage=*/GlobalValue::ExternalLinkage,
  /*Name=*/"_Z15DoStuffWithCStrPKc", mod);
 func__Z15DoStuffWithCStrPKc->setCallingConv(CallingConv::C);
 }
 AttributeSet func__Z15DoStuffWithCStrPKc_PAL;
 {
  SmallVector<AttributeSet, 4> Attrs;
  AttributeSet PAS;
   {
    AttrBuilder B;
    B.addAttribute(Attribute::NoUnwind);
    B.addAttribute(Attribute::UWTable);
    PAS = AttributeSet::get(mod->getContext(), ~0U, B);
   }

  Attrs.push_back(PAS);
  func__Z15DoStuffWithCStrPKc_PAL = AttributeSet::get(mod->getContext(), Attrs);

 }
 func__Z15DoStuffWithCStrPKc->setAttributes(func__Z15DoStuffWithCStrPKc_PAL);

 Function* func_main = mod->getFunction("main");
 if (!func_main) {
 func_main = Function::Create(
  /*Type=*/FuncTy_5,
  /*Linkage=*/GlobalValue::ExternalLinkage,
  /*Name=*/"main", mod);
 func_main->setCallingConv(CallingConv::C);
 }
 AttributeSet func_main_PAL;
 {
  SmallVector<AttributeSet, 4> Attrs;
  AttributeSet PAS;
   {
    AttrBuilder B;
    B.addAttribute(Attribute::NoUnwind);
    B.addAttribute(Attribute::UWTable);
    PAS = AttributeSet::get(mod->getContext(), ~0U, B);
   }

  Attrs.push_back(PAS);
  func_main_PAL = AttributeSet::get(mod->getContext(), Attrs);

 }
 func_main->setAttributes(func_main_PAL);

 // Global Variable Declarations


 GlobalVariable* gvar_array__str = new GlobalVariable(/*Module=*/*mod,
 /*Type=*/ArrayTy_0,
 /*isConstant=*/true,
 /*Linkage=*/GlobalValue::PrivateLinkage,
 /*Initializer=*/0, // has initializer, specified below
 /*Name=*/".str");
 gvar_array__str->setAlignment(1);

 // Constant Definitions
 Constant *const_array_7 = ConstantDataArray::getString(mod->getContext(), "This is a string", true);
 ConstantInt* const_int32_8 = ConstantInt::get(mod->getContext(), APInt(32, StringRef("1"), 10));
 std::vector<Constant*> const_ptr_9_indices;
 ConstantInt* const_int32_10 = ConstantInt::get(mod->getContext(), APInt(32, StringRef("0"), 10));
 const_ptr_9_indices.push_back(const_int32_10);
 const_ptr_9_indices.push_back(const_int32_10);
 Constant* const_ptr_9 = ConstantExpr::getGetElementPtr(gvar_array__str, const_ptr_9_indices);

 // Global Variable Definitions
 gvar_array__str->setInitializer(const_array_7);

 // Function Definitions

 // Function: _Z15DoStuffWithCStrPKc (func__Z15DoStuffWithCStrPKc)
 {
  Function::arg_iterator args = func__Z15DoStuffWithCStrPKc->arg_begin();
  Value* ptr_msg = args++;
  ptr_msg->setName("msg");

  BasicBlock* label_11 = BasicBlock::Create(mod->getContext(), "",func__Z15DoStuffWithCStrPKc,0);

  // Block  (label_11)
  AllocaInst* ptr_12 = new AllocaInst(PointerTy_3, "", label_11);
  ptr_12->setAlignment(8);
  StoreInst* void_13 = new StoreInst(ptr_msg, ptr_12, false, label_11);
  void_13->setAlignment(8);
  ReturnInst::Create(mod->getContext(), label_11);

 }

 // Function: main (func_main)
 {

  BasicBlock* label_15 = BasicBlock::Create(mod->getContext(), "",func_main,0);

  // Block  (label_15)
  CallInst* void_16 = CallInst::Create(func__Z15DoStuffWithCStrPKc, const_ptr_9, "", label_15);
  void_16->setCallingConv(CallingConv::C);
  void_16->setTailCall(false);
  AttributeSet void_16_PAL;
  void_16->setAttributes(void_16_PAL);

  ReturnInst::Create(mod->getContext(), const_int32_10, label_15);

 }

 return mod;
}
bool SwiftStackPromotion::runOnFunction(Function &F) {

  bool Changed = false;
  Constant *allocFunc = nullptr;
  Constant *initFunc = nullptr;
  int maxSize = LimitOpt;
  Module *M = F.getParent();
  const DataLayout &DL = M->getDataLayout();
  IntegerType *AllocType = nullptr;
  IntegerType *IntType = nullptr;

  SmallVector<CallInst *, 8> BufferAllocs;
  SmallPtrSet<CallInst *, 8> PromotedAllocs;
  SmallVector<CallInst *, 8> BufferDeallocs;

  // Search for allocation- and deallocation-calls in the function.
  for (BasicBlock &BB : F) {
    for (auto Iter = BB.begin(); Iter != BB.end(); ) {
      Instruction *I = &*Iter;
      Iter++;

      if (auto *AI = dyn_cast<AllocaInst>(I)) {
        int Size = 1;
        if (auto *SizeConst = dyn_cast<ConstantInt>(AI->getArraySize()))
          Size = SizeConst->getValue().getSExtValue();

          // Count the existing alloca sizes against the limit.
        maxSize -= DL.getTypeAllocSize(AI->getAllocatedType()) * Size;
      }

      auto *CI = dyn_cast<CallInst>(I);
      if (!CI)
        continue;

      Function *Callee = CI->getCalledFunction();
      if (!Callee)
        continue;

      if (Callee->getName() == "swift_bufferAllocateOnStack") {
        BufferAllocs.push_back(CI);
      } else if (Callee->getName() == "swift_bufferDeallocateFromStack") {
        BufferDeallocs.push_back(CI);
      }
    }
  }

  // First handle allocations.
  for (CallInst *CI : BufferAllocs) {
    Function *Callee = CI->getCalledFunction();
    assert(Callee);
    unsigned align = 0;
    if (int size = canPromote(CI, align, maxSize)) {
      maxSize -= size;
      if (!AllocType) {
        // Create the swift_initStackObject function and all required types.
        AllocType = IntegerType::get(M->getContext(), 8);
        IntType = IntegerType::get(M->getContext(), 32);
        auto *OrigFT = Callee->getFunctionType();
        auto *HeapObjTy = OrigFT->getReturnType();
        auto *MetaDataTy = OrigFT->getParamType(0);
        auto *NewFTy = FunctionType::get(HeapObjTy,
                                         {MetaDataTy, HeapObjTy},
                                         false);
        initFunc = M->getOrInsertFunction("swift_initStackObject", NewFTy);
      }
      // Replace the allocation call with an alloca.
      Value *AllocA = new AllocaInst(AllocType, ConstantInt::get(IntType, size),
                                     align, "buffer", &*F.front().begin());
      // And initialize it with a call to swift_initStackObject.
      IRBuilder<> B(CI);
      Value *casted = B.CreateBitCast(AllocA, CI->getType());
      CallInst *initCall = B.CreateCall(initFunc,
                                        {CI->getArgOperand(0), casted});
      CI->replaceAllUsesWith(initCall);
      CI->eraseFromParent();
      PromotedAllocs.insert(initCall);
      ++NumBufferAllocsPromoted;
    } else {
      // We don't do stack promotion. Replace the call with a call to the
      // regular swift_bufferAllocate.
      if (!allocFunc) {
        allocFunc = M->getOrInsertFunction("swift_bufferAllocate",
                                           Callee->getFunctionType());
      }
      CI->setCalledFunction(allocFunc);
    }
    Changed = true;
  }

  // After we made the decision for all allocations we can handle the
  // deallocations.
  for (CallInst *CI : BufferDeallocs) {
    CallInst *Alloc = dyn_cast<CallInst>(CI->getArgOperand(0));
    assert(Alloc && "alloc buffer obfuscated");
    if (PromotedAllocs.count(Alloc)) {

      removeRedundantRTCalls(CI);

      IRBuilder<> B(CI);
      // This has two purposes:
      // 1. Tell LLVM the lifetime of the allocated stack memory.
      // 2. Avoid tail-call optimization which may convert the call to the final
      //    release to a jump, which is done after the stack frame is
      //    destructed.
      B.CreateLifetimeEnd(CI->getArgOperand(0));
    }
    // Other than inserting the end-of-lifetime, the deallocation is a no-op.
    CI->eraseFromParent();
    Changed = true;
  }
  return Changed;
}
Example #17
0
// The CC users in CCUsers are testing the result of a comparison of some
// value X against zero and we know that any CC value produced by MI
// would also reflect the value of X.  Try to adjust CCUsers so that
// they test the result of MI directly, returning true on success.
// Leave everything unchanged on failure.
bool SystemZElimCompare::adjustCCMasksForInstr(
    MachineInstr &MI, MachineInstr &Compare,
    SmallVectorImpl<MachineInstr *> &CCUsers) {
  int Opcode = MI.getOpcode();
  const MCInstrDesc &Desc = TII->get(Opcode);
  unsigned MIFlags = Desc.TSFlags;

  // See which compare-style condition codes are available.
  unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags);

  // For unsigned comparisons with zero, only equality makes sense.
  unsigned CompareFlags = Compare.getDesc().TSFlags;
  if (CompareFlags & SystemZII::IsLogical)
    ReusableCCMask &= SystemZ::CCMASK_CMP_EQ;

  if (ReusableCCMask == 0)
    return false;

  unsigned CCValues = SystemZII::getCCValues(MIFlags);
  assert((ReusableCCMask & ~CCValues) == 0 && "Invalid CCValues");

  // Now check whether these flags are enough for all users.
  SmallVector<MachineOperand *, 4> AlterMasks;
  for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) {
    MachineInstr *MI = CCUsers[I];

    // Fail if this isn't a use of CC that we understand.
    unsigned Flags = MI->getDesc().TSFlags;
    unsigned FirstOpNum;
    if (Flags & SystemZII::CCMaskFirst)
      FirstOpNum = 0;
    else if (Flags & SystemZII::CCMaskLast)
      FirstOpNum = MI->getNumExplicitOperands() - 2;
    else
      return false;

    // Check whether the instruction predicate treats all CC values
    // outside of ReusableCCMask in the same way.  In that case it
    // doesn't matter what those CC values mean.
    unsigned CCValid = MI->getOperand(FirstOpNum).getImm();
    unsigned CCMask = MI->getOperand(FirstOpNum + 1).getImm();
    unsigned OutValid = ~ReusableCCMask & CCValid;
    unsigned OutMask = ~ReusableCCMask & CCMask;
    if (OutMask != 0 && OutMask != OutValid)
      return false;

    AlterMasks.push_back(&MI->getOperand(FirstOpNum));
    AlterMasks.push_back(&MI->getOperand(FirstOpNum + 1));
  }

  // All users are OK.  Adjust the masks for MI.
  for (unsigned I = 0, E = AlterMasks.size(); I != E; I += 2) {
    AlterMasks[I]->setImm(CCValues);
    unsigned CCMask = AlterMasks[I + 1]->getImm();
    if (CCMask & ~ReusableCCMask)
      AlterMasks[I + 1]->setImm((CCMask & ReusableCCMask) |
                                (CCValues & ~ReusableCCMask));
  }

  // CC is now live after MI.
  int CCDef = MI.findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI);
  assert(CCDef >= 0 && "Couldn't find CC set");
  MI.getOperand(CCDef).setIsDead(false);

  // Clear any intervening kills of CC.
  MachineBasicBlock::iterator MBBI = MI, MBBE = Compare;
  for (++MBBI; MBBI != MBBE; ++MBBI)
    MBBI->clearRegisterKills(SystemZ::CC, TRI);

  return true;
}
Example #18
0
void GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
  using namespace llvm;
  
  // Emit the file header.
  Stream.Emit((unsigned)'B', 8);
  Stream.Emit((unsigned)'C', 8);
  Stream.Emit((unsigned)'G', 8);
  Stream.Emit((unsigned)'I', 8);

  // Write the block-info block, which describes the records in this bitcode
  // file.
  emitBlockInfoBlock(Stream);

  Stream.EnterSubblock(GLOBAL_INDEX_BLOCK_ID, 3);

  // Write the metadata.
  SmallVector<uint64_t, 2> Record;
  Record.push_back(CurrentVersion);
  Stream.EmitRecord(INDEX_METADATA, Record);

  // Write the set of known module files.
  for (ModuleFilesMap::iterator M = ModuleFiles.begin(),
                                MEnd = ModuleFiles.end();
       M != MEnd; ++M) {
    Record.clear();
    Record.push_back(M->second.ID);
    Record.push_back(M->first->getSize());
    Record.push_back(M->first->getModificationTime());

    // File name
    StringRef Name(M->first->getName());
    Record.push_back(Name.size());
    Record.append(Name.begin(), Name.end());

    // Dependencies
    Record.push_back(M->second.Dependencies.size());
    Record.append(M->second.Dependencies.begin(), M->second.Dependencies.end());
    Stream.EmitRecord(MODULE, Record);
  }

  // Write the identifier -> module file mapping.
  {
    llvm::OnDiskChainedHashTableGenerator<IdentifierIndexWriterTrait> Generator;
    IdentifierIndexWriterTrait Trait;

    // Populate the hash table.
    for (InterestingIdentifierMap::iterator I = InterestingIdentifiers.begin(),
                                            IEnd = InterestingIdentifiers.end();
         I != IEnd; ++I) {
      Generator.insert(I->first(), I->second, Trait);
    }
    
    // Create the on-disk hash table in a buffer.
    SmallString<4096> IdentifierTable;
    uint32_t BucketOffset;
    {
      using namespace llvm::support;
      llvm::raw_svector_ostream Out(IdentifierTable);
      // Make sure that no bucket is at offset 0
      endian::Writer<little>(Out).write<uint32_t>(0);
      BucketOffset = Generator.Emit(Out, Trait);
    }

    // Create a blob abbreviation
    BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
    Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_INDEX));
    Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
    Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
    unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);

    // Write the identifier table
    Record.clear();
    Record.push_back(IDENTIFIER_INDEX);
    Record.push_back(BucketOffset);
    Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable.str());
  }

  Stream.ExitBlock();
}
Example #19
0
/// traceSiblingValue - Trace a value that is about to be spilled back to the
/// real defining instructions by looking through sibling copies. Always stay
/// within the range of OrigVNI so the registers are known to carry the same
/// value.
///
/// Determine if the value is defined by all reloads, so spilling isn't
/// necessary - the value is already in the stack slot.
///
/// Return a defining instruction that may be a candidate for rematerialization.
///
MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
                                               VNInfo *OrigVNI) {
  // Check if a cached value already exists.
  SibValueMap::iterator SVI;
  bool Inserted;
  std::tie(SVI, Inserted) =
    SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI)));
  if (!Inserted) {
    DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':'
                 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second);
    return SVI->second.DefMI;
  }

  DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'
               << UseVNI->id << '@' << UseVNI->def << '\n');

  // List of (Reg, VNI) that have been inserted into SibValues, but need to be
  // processed.
  SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;
  WorkList.push_back(std::make_pair(UseReg, UseVNI));

  LiveInterval &OrigLI = LIS.getInterval(Original);
  do {
    unsigned Reg;
    VNInfo *VNI;
    std::tie(Reg, VNI) = WorkList.pop_back_val();
    DEBUG(dbgs() << "  " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def
                 << ":\t");

    // First check if this value has already been computed.
    SVI = SibValues.find(VNI);
    assert(SVI != SibValues.end() && "Missing SibValues entry");

    // Trace through PHI-defs created by live range splitting.
    if (VNI->isPHIDef()) {
      // Stop at original PHIs.  We don't know the value at the
      // predecessors. Look up the VNInfo for the current definition
      // in OrigLI, to properly determine whether or not this phi was
      // added by splitting.
      if (VNI->def == OrigLI.getVNInfoAt(VNI->def)->def) {
        DEBUG(dbgs() << "orig phi value\n");
        SVI->second.DefByOrigPHI = true;
        SVI->second.AllDefsAreReloads = false;
        propagateSiblingValue(SVI);
        continue;
      }

      // This is a PHI inserted by live range splitting.  We could trace the
      // live-out value from predecessor blocks, but that search can be very
      // expensive if there are many predecessors and many more PHIs as
      // generated by tail-dup when it sees an indirectbr.  Instead, look at
      // all the non-PHI defs that have the same value as OrigVNI.  They must
      // jointly dominate VNI->def.  This is not optimal since VNI may actually
      // be jointly dominated by a smaller subset of defs, so there is a change
      // we will miss a AllDefsAreReloads optimization.

      // Separate all values dominated by OrigVNI into PHIs and non-PHIs.
      SmallVector<VNInfo*, 8> PHIs, NonPHIs;
      LiveInterval &LI = LIS.getInterval(Reg);

      for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end();
           VI != VE; ++VI) {
        VNInfo *VNI2 = *VI;
        if (VNI2->isUnused())
          continue;
        if (!OrigLI.containsOneValue() &&
            OrigLI.getVNInfoAt(VNI2->def) != OrigVNI)
          continue;
        if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def)
          PHIs.push_back(VNI2);
        else
          NonPHIs.push_back(VNI2);
      }
      DEBUG(dbgs() << "split phi value, checking " << PHIs.size()
                   << " phi-defs, and " << NonPHIs.size()
                   << " non-phi/orig defs\n");

      // Create entries for all the PHIs.  Don't add them to the worklist, we
      // are processing all of them in one go here.
      for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
        SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i])));

      // Add every PHI as a dependent of all the non-PHIs.
      for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) {
        VNInfo *NonPHI = NonPHIs[i];
        // Known value? Try an insertion.
        std::tie(SVI, Inserted) =
          SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI)));
        // Add all the PHIs as dependents of NonPHI.
        SVI->second.Deps.insert(SVI->second.Deps.end(), PHIs.begin(),
                                PHIs.end());
        // This is the first time we see NonPHI, add it to the worklist.
        if (Inserted)
          WorkList.push_back(std::make_pair(Reg, NonPHI));
        else
          // Propagate to all inserted PHIs, not just VNI.
          propagateSiblingValue(SVI);
      }

      // Next work list item.
      continue;
    }

    MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
    assert(MI && "Missing def");

    // Trace through sibling copies.
    if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
      if (isSibling(SrcReg)) {
        LiveInterval &SrcLI = LIS.getInterval(SrcReg);
        LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
        assert(SrcQ.valueIn() && "Copy from non-existing value");
        // Check if this COPY kills its source.
        SVI->second.KillsSource = SrcQ.isKill();
        VNInfo *SrcVNI = SrcQ.valueIn();
        DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':'
                     << SrcVNI->id << '@' << SrcVNI->def
                     << " kill=" << unsigned(SVI->second.KillsSource) << '\n');
        // Known sibling source value? Try an insertion.
        std::tie(SVI, Inserted) = SibValues.insert(
            std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI)));
        // This is the first time we see Src, add it to the worklist.
        if (Inserted)
          WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
        propagateSiblingValue(SVI, VNI);
        // Next work list item.
        continue;
      }
    }

    // Track reachable reloads.
    SVI->second.DefMI = MI;
    SVI->second.SpillMBB = MI->getParent();
    int FI;
    if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {
      DEBUG(dbgs() << "reload\n");
      propagateSiblingValue(SVI);
      // Next work list item.
      continue;
    }

    // Potential remat candidate.
    DEBUG(dbgs() << "def " << *MI);
    SVI->second.AllDefsAreReloads = false;
    propagateSiblingValue(SVI);
  } while (!WorkList.empty());

  // Look up the value we were looking for.  We already did this lookup at the
  // top of the function, but SibValues may have been invalidated.
  SVI = SibValues.find(UseVNI);
  assert(SVI != SibValues.end() && "Didn't compute requested info");
  DEBUG(dbgs() << "  traced to:\t" << SVI->second);
  return SVI->second.DefMI;
}
Example #20
0
/// RemoveBlockIfDead - If the specified block is dead, remove it, update loop
/// information, and remove any dead successors it has.
///
void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
                                     std::vector<Instruction*> &Worklist,
                                     Loop *L) {
  if (pred_begin(BB) != pred_end(BB)) {
    // This block isn't dead, since an edge to BB was just removed, see if there
    // are any easy simplifications we can do now.
    if (BasicBlock *Pred = BB->getSinglePredecessor()) {
      // If it has one pred, fold phi nodes in BB.
      while (isa<PHINode>(BB->begin()))
        ReplaceUsesOfWith(BB->begin(), 
                          cast<PHINode>(BB->begin())->getIncomingValue(0), 
                          Worklist, L, LPM);
      
      // If this is the header of a loop and the only pred is the latch, we now
      // have an unreachable loop.
      if (Loop *L = LI->getLoopFor(BB))
        if (loopHeader == BB && L->contains(Pred)) {
          // Remove the branch from the latch to the header block, this makes
          // the header dead, which will make the latch dead (because the header
          // dominates the latch).
          LPM->deleteSimpleAnalysisValue(Pred->getTerminator(), L);
          Pred->getTerminator()->eraseFromParent();
          new UnreachableInst(BB->getContext(), Pred);
          
          // The loop is now broken, remove it from LI.
          RemoveLoopFromHierarchy(L);
          
          // Reprocess the header, which now IS dead.
          RemoveBlockIfDead(BB, Worklist, L);
          return;
        }
      
      // If pred ends in a uncond branch, add uncond branch to worklist so that
      // the two blocks will get merged.
      if (BranchInst *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
        if (BI->isUnconditional())
          Worklist.push_back(BI);
    }
    return;
  }

  DEBUG(dbgs() << "Nuking dead block: " << *BB);
  
  // Remove the instructions in the basic block from the worklist.
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    RemoveFromWorklist(I, Worklist);
    
    // Anything that uses the instructions in this basic block should have their
    // uses replaced with undefs.
    // If I is not void type then replaceAllUsesWith undef.
    // This allows ValueHandlers and custom metadata to adjust itself.
    if (!I->getType()->isVoidTy())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }
  
  // If this is the edge to the header block for a loop, remove the loop and
  // promote all subloops.
  if (Loop *BBLoop = LI->getLoopFor(BB)) {
    if (BBLoop->getLoopLatch() == BB) {
      RemoveLoopFromHierarchy(BBLoop);
      if (currentLoop == BBLoop) {
        currentLoop = 0;
        redoLoop = false;
      }
    }
  }

  // Remove the block from the loop info, which removes it from any loops it
  // was in.
  LI->removeBlock(BB);
  
  
  // Remove phi node entries in successors for this block.
  TerminatorInst *TI = BB->getTerminator();
  SmallVector<BasicBlock*, 4> Succs;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
    Succs.push_back(TI->getSuccessor(i));
    TI->getSuccessor(i)->removePredecessor(BB);
  }
  
  // Unique the successors, remove anything with multiple uses.
  array_pod_sort(Succs.begin(), Succs.end());
  Succs.erase(std::unique(Succs.begin(), Succs.end()), Succs.end());
  
  // Remove the basic block, including all of the instructions contained in it.
  LPM->deleteSimpleAnalysisValue(BB, L);  
  BB->eraseFromParent();
  // Remove successor blocks here that are not dead, so that we know we only
  // have dead blocks in this list.  Nondead blocks have a way of becoming dead,
  // then getting removed before we revisit them, which is badness.
  //
  for (unsigned i = 0; i != Succs.size(); ++i)
    if (pred_begin(Succs[i]) != pred_end(Succs[i])) {
      // One exception is loop headers.  If this block was the preheader for a
      // loop, then we DO want to visit the loop so the loop gets deleted.
      // We know that if the successor is a loop header, that this loop had to
      // be the preheader: the case where this was the latch block was handled
      // above and headers can only have two predecessors.
      if (!LI->isLoopHeader(Succs[i])) {
        Succs.erase(Succs.begin()+i);
        --i;
      }
    }
  
  for (unsigned i = 0, e = Succs.size(); i != e; ++i)
    RemoveBlockIfDead(Succs[i], Worklist, L);
}
Example #21
0
unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
                                         unsigned char* StartFunction,
                                         unsigned char* EndFunction) const {
  assert(MMI && "MachineModuleInfo not registered!");

  // Map all labels and get rid of any dead landing pads.
  MMI->TidyLandingPads(JCE->getLabelLocations());

  const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
  const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
  const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
  if (PadInfos.empty()) return 0;

  // Sort the landing pads in order of their type ids.  This is used to fold
  // duplicate actions.
  SmallVector<const LandingPadInfo *, 64> LandingPads;
  LandingPads.reserve(PadInfos.size());
  for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
    LandingPads.push_back(&PadInfos[i]);
  std::sort(LandingPads.begin(), LandingPads.end(), PadLT);

  // Negative type ids index into FilterIds, positive type ids index into
  // TypeInfos.  The value written for a positive type id is just the type
  // id itself.  For a negative type id, however, the value written is the
  // (negative) byte offset of the corresponding FilterIds entry.  The byte
  // offset is usually equal to the type id, because the FilterIds entries
  // are written using a variable width encoding which outputs one byte per
  // entry as long as the value written is not too large, but can differ.
  // This kind of complication does not occur for positive type ids because
  // type infos are output using a fixed width encoding.
  // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
  SmallVector<int, 16> FilterOffsets;
  FilterOffsets.reserve(FilterIds.size());
  int Offset = -1;
  for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
    E = FilterIds.end(); I != E; ++I) {
    FilterOffsets.push_back(Offset);
    Offset -= MCAsmInfo::getULEB128Size(*I);
  }

  // Compute the actions table and gather the first action index for each
  // landing pad site.
  SmallVector<ActionEntry, 32> Actions;
  SmallVector<unsigned, 64> FirstActions;
  FirstActions.reserve(LandingPads.size());

  int FirstAction = 0;
  unsigned SizeActions = 0;
  for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
    const LandingPadInfo *LP = LandingPads[i];
    const std::vector<int> &TypeIds = LP->TypeIds;
    const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
    unsigned SizeSiteActions = 0;

    if (NumShared < TypeIds.size()) {
      unsigned SizeAction = 0;
      ActionEntry *PrevAction = 0;

      if (NumShared) {
        const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
        assert(Actions.size());
        PrevAction = &Actions.back();
        SizeAction = MCAsmInfo::getSLEB128Size(PrevAction->NextAction) +
          MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
        for (unsigned j = NumShared; j != SizePrevIds; ++j) {
          SizeAction -= MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
          SizeAction += -PrevAction->NextAction;
          PrevAction = PrevAction->Previous;
        }
      }

      // Compute the actions.
      for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
        int TypeID = TypeIds[I];
        assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
        int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
        unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);

        int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
        SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
        SizeSiteActions += SizeAction;

        ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
        Actions.push_back(Action);

        PrevAction = &Actions.back();
      }

      // Record the first action of the landing pad site.
      FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
    } // else identical - re-use previous FirstAction

    FirstActions.push_back(FirstAction);

    // Compute this sites contribution to size.
    SizeActions += SizeSiteActions;
  }

  // Compute the call-site table.  Entries must be ordered by address.
  SmallVector<CallSiteEntry, 64> CallSites;

  RangeMapType PadMap;
  for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
    const LandingPadInfo *LandingPad = LandingPads[i];
    for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
      MCSymbol *BeginLabel = LandingPad->BeginLabels[j];
      assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
      PadRange P = { i, j };
      PadMap[BeginLabel] = P;
    }
  }

  bool MayThrow = false;
  MCSymbol *LastLabel = 0;
  for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
        I != E; ++I) {
    for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
          MI != E; ++MI) {
      if (!MI->isLabel()) {
        MayThrow |= MI->isCall();
        continue;
      }

      MCSymbol *BeginLabel = MI->getOperand(0).getMCSymbol();
      assert(BeginLabel && "Invalid label!");

      if (BeginLabel == LastLabel)
        MayThrow = false;

      RangeMapType::iterator L = PadMap.find(BeginLabel);

      if (L == PadMap.end())
        continue;

      PadRange P = L->second;
      const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];

      assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
              "Inconsistent landing pad map!");

      // If some instruction between the previous try-range and this one may
      // throw, create a call-site entry with no landing pad for the region
      // between the try-ranges.
      if (MayThrow) {
        CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
        CallSites.push_back(Site);
      }

      LastLabel = LandingPad->EndLabels[P.RangeIndex];
      CallSiteEntry Site = {BeginLabel, LastLabel,
        LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};

      assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
              "Invalid landing pad!");

      // Try to merge with the previous call-site.
      if (CallSites.size()) {
        CallSiteEntry &Prev = CallSites.back();
        if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
          // Extend the range of the previous entry.
          Prev.EndLabel = Site.EndLabel;
          continue;
        }
      }

      // Otherwise, create a new call-site.
      CallSites.push_back(Site);
    }
  }
  // If some instruction between the previous try-range and the end of the
  // function may throw, create a call-site entry with no landing pad for the
  // region following the try-range.
  if (MayThrow) {
    CallSiteEntry Site = {LastLabel, 0, 0, 0};
    CallSites.push_back(Site);
  }

  // Final tallies.
  unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
                                            sizeof(int32_t) + // Site length.
                                            sizeof(int32_t)); // Landing pad.
  for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
    SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);

  unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();

  unsigned TypeOffset = sizeof(int8_t) + // Call site format
                        // Call-site table length
                        MCAsmInfo::getULEB128Size(SizeSites) +
                        SizeSites + SizeActions + SizeTypes;

  // Begin the exception table.
  JCE->emitAlignmentWithFill(4, 0);
  // Asm->EOL("Padding");

  unsigned char* DwarfExceptionTable = (unsigned char*)JCE->getCurrentPCValue();

  // Emit the header.
  JCE->emitByte(dwarf::DW_EH_PE_omit);
  // Asm->EOL("LPStart format (DW_EH_PE_omit)");
  JCE->emitByte(dwarf::DW_EH_PE_absptr);
  // Asm->EOL("TType format (DW_EH_PE_absptr)");
  JCE->emitULEB128Bytes(TypeOffset);
  // Asm->EOL("TType base offset");
  JCE->emitByte(dwarf::DW_EH_PE_udata4);
  // Asm->EOL("Call site format (DW_EH_PE_udata4)");
  JCE->emitULEB128Bytes(SizeSites);
  // Asm->EOL("Call-site table length");

  // Emit the landing pad site information.
  for (unsigned i = 0; i < CallSites.size(); ++i) {
    CallSiteEntry &S = CallSites[i];
    intptr_t BeginLabelPtr = 0;
    intptr_t EndLabelPtr = 0;

    if (!S.BeginLabel) {
      BeginLabelPtr = (intptr_t)StartFunction;
      JCE->emitInt32(0);
    } else {
      BeginLabelPtr = JCE->getLabelAddress(S.BeginLabel);
      JCE->emitInt32(BeginLabelPtr - (intptr_t)StartFunction);
    }

    // Asm->EOL("Region start");

    if (!S.EndLabel)
      EndLabelPtr = (intptr_t)EndFunction;
    else
      EndLabelPtr = JCE->getLabelAddress(S.EndLabel);

    JCE->emitInt32(EndLabelPtr - BeginLabelPtr);
    //Asm->EOL("Region length");

    if (!S.PadLabel) {
      JCE->emitInt32(0);
    } else {
      unsigned PadLabelPtr = JCE->getLabelAddress(S.PadLabel);
      JCE->emitInt32(PadLabelPtr - (intptr_t)StartFunction);
    }
    // Asm->EOL("Landing pad");

    JCE->emitULEB128Bytes(S.Action);
    // Asm->EOL("Action");
  }

  // Emit the actions.
  for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
    ActionEntry &Action = Actions[I];

    JCE->emitSLEB128Bytes(Action.ValueForTypeID);
    //Asm->EOL("TypeInfo index");
    JCE->emitSLEB128Bytes(Action.NextAction);
    //Asm->EOL("Next action");
  }

  // Emit the type ids.
  for (unsigned M = TypeInfos.size(); M; --M) {
    const GlobalVariable *GV = TypeInfos[M - 1];

    if (GV) {
      if (TD->getPointerSize() == sizeof(int32_t))
        JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
      else
        JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
    } else {
      if (TD->getPointerSize() == sizeof(int32_t))
        JCE->emitInt32(0);
      else
        JCE->emitInt64(0);
    }
    // Asm->EOL("TypeInfo");
  }

  // Emit the filter typeids.
  for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
    unsigned TypeID = FilterIds[j];
    JCE->emitULEB128Bytes(TypeID);
    //Asm->EOL("Filter TypeInfo index");
  }

  JCE->emitAlignmentWithFill(4, 0);

  return DwarfExceptionTable;
}
/// ParseDirectiveInsn
/// ::= .insn [ format, encoding, (operands (, operands)*) ]
bool SystemZAsmParser::ParseDirectiveInsn(SMLoc L) {
  MCAsmParser &Parser = getParser();

  // Expect instruction format as identifier.
  StringRef Format;
  SMLoc ErrorLoc = Parser.getTok().getLoc();
  if (Parser.parseIdentifier(Format))
    return Error(ErrorLoc, "expected instruction format");

  SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> Operands;

  // Find entry for this format in InsnMatchTable.
  auto EntryRange =
    std::equal_range(std::begin(InsnMatchTable), std::end(InsnMatchTable),
                     Format, CompareInsn());

  // If first == second, couldn't find a match in the table.
  if (EntryRange.first == EntryRange.second)
    return Error(ErrorLoc, "unrecognized format");

  struct InsnMatchEntry *Entry = EntryRange.first;

  // Format should match from equal_range.
  assert(Entry->Format == Format);

  // Parse the following operands using the table's information.
  for (int i = 0; i < Entry->NumOperands; i++) {
    MatchClassKind Kind = Entry->OperandKinds[i];

    SMLoc StartLoc = Parser.getTok().getLoc();

    // Always expect commas as separators for operands.
    if (getLexer().isNot(AsmToken::Comma))
      return Error(StartLoc, "unexpected token in directive");
    Lex();

    // Parse operands.
    OperandMatchResultTy ResTy;
    if (Kind == MCK_AnyReg)
      ResTy = parseAnyReg(Operands);
    else if (Kind == MCK_BDXAddr64Disp12 || Kind == MCK_BDXAddr64Disp20)
      ResTy = parseBDXAddr64(Operands);
    else if (Kind == MCK_BDAddr64Disp12 || Kind == MCK_BDAddr64Disp20)
      ResTy = parseBDAddr64(Operands);
    else if (Kind == MCK_PCRel32)
      ResTy = parsePCRel32(Operands);
    else if (Kind == MCK_PCRel16)
      ResTy = parsePCRel16(Operands);
    else {
      // Only remaining operand kind is an immediate.
      const MCExpr *Expr;
      SMLoc StartLoc = Parser.getTok().getLoc();

      // Expect immediate expression.
      if (Parser.parseExpression(Expr))
        return Error(StartLoc, "unexpected token in directive");

      SMLoc EndLoc =
        SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);

      Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
      ResTy = MatchOperand_Success;
    }

    if (ResTy != MatchOperand_Success)
      return true;
  }

  // Build the instruction with the parsed operands.
  MCInst Inst = MCInstBuilder(Entry->Opcode);

  for (size_t i = 0; i < Operands.size(); i++) {
    MCParsedAsmOperand &Operand = *Operands[i];
    MatchClassKind Kind = Entry->OperandKinds[i];

    // Verify operand.
    unsigned Res = validateOperandClass(Operand, Kind);
    if (Res != Match_Success)
      return Error(Operand.getStartLoc(), "unexpected operand type");

    // Add operands to instruction.
    SystemZOperand &ZOperand = static_cast<SystemZOperand &>(Operand);
    if (ZOperand.isReg())
      ZOperand.addRegOperands(Inst, 1);
    else if (ZOperand.isMem(BDMem))
      ZOperand.addBDAddrOperands(Inst, 2);
    else if (ZOperand.isMem(BDXMem))
      ZOperand.addBDXAddrOperands(Inst, 3);
    else if (ZOperand.isImm())
      ZOperand.addImmOperands(Inst, 1);
    else
      llvm_unreachable("unexpected operand type");
  }

  // Emit as a regular instruction.
  Parser.getStreamer().EmitInstruction(Inst, getSTI());

  return false;
}
Example #23
0
bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
  assert(Pointer->getType().isAddress() &&
         "Walked through the pointer to the value?");
  SILType PointeeType = Pointer->getType().getObjectType();

  /// This keeps track of instructions in the use list that touch multiple tuple
  /// elements and should be scalarized.  This is done as a second phase to
  /// avoid invalidating the use iterator.
  ///
  SmallVector<SILInstruction*, 4> UsesToScalarize;

  for (auto *UI : Pointer->getUses()) {
    auto *User = UI->getUser();

    // struct_element_addr P, #field indexes into the current element.
    if (auto *SEAI = dyn_cast<StructElementAddrInst>(User)) {
      if (!collectStructElementUses(SEAI, BaseEltNo))
        return false;
      continue;
    }

    // Instructions that compute a subelement are handled by a helper.
    if (auto *TEAI = dyn_cast<TupleElementAddrInst>(User)) {
      if (!collectTupleElementUses(TEAI, BaseEltNo))
        return false;
      continue;
    }

    // Look through begin_access.
    if (auto I = dyn_cast<BeginAccessInst>(User)) {
      if (!collectUses(I, BaseEltNo))
        return false;
      continue;
    }

    // Ignore end_access.
    if (isa<EndAccessInst>(User)) {
      continue;
    }
    
    // Loads are a use of the value.
    if (isa<LoadInst>(User)) {
      if (PointeeType.is<TupleType>())
        UsesToScalarize.push_back(User);
      else
        addElementUses(BaseEltNo, PointeeType, User, DIUseKind::Load);
      continue;
    }

    if (isa<LoadWeakInst>(User)) {
      Uses.push_back(DIMemoryUse(User, DIUseKind::Load, BaseEltNo, 1));
      continue;
    }

    // Stores *to* the allocation are writes.
    if (isa<StoreInst>(User) && UI->getOperandNumber() == 1) {
      if (PointeeType.is<TupleType>()) {
        UsesToScalarize.push_back(User);
        continue;
      }
      
      // Coming out of SILGen, we assume that raw stores are initializations,
      // unless they have trivial type (which we classify as InitOrAssign).
      DIUseKind Kind;
      if (InStructSubElement)
        Kind = DIUseKind::PartialStore;
      else if (PointeeType.isTrivial(User->getModule()))
        Kind = DIUseKind::InitOrAssign;
      else
        Kind = DIUseKind::Initialization;
      
      addElementUses(BaseEltNo, PointeeType, User, Kind);
      continue;
    }

    if (auto *SWI = dyn_cast<StoreWeakInst>(User)) {
      if (UI->getOperandNumber() == 1) {
        DIUseKind Kind;
        if (InStructSubElement)
          Kind = DIUseKind::PartialStore;
        else if (SWI->isInitializationOfDest())
          Kind = DIUseKind::Initialization;
        else
          Kind = DIUseKind::Assign;
        Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
        continue;
      }
    }

    if (auto *SUI = dyn_cast<StoreUnownedInst>(User)) {
      if (UI->getOperandNumber() == 1) {
        DIUseKind Kind;
        if (InStructSubElement)
          Kind = DIUseKind::PartialStore;
        else if (SUI->isInitializationOfDest())
          Kind = DIUseKind::Initialization;
        else
          Kind = DIUseKind::Assign;
        Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
        continue;
      }
    }

    if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
      // If this is a copy of a tuple, we should scalarize it so that we don't
      // have an access that crosses elements.
      if (PointeeType.is<TupleType>()) {
        UsesToScalarize.push_back(CAI);
        continue;
      }
      
      // If this is the source of the copy_addr, then this is a load.  If it is
      // the destination, then this is an unknown assignment.  Note that we'll
      // revisit this instruction and add it to Uses twice if it is both a load
      // and store to the same aggregate.
      DIUseKind Kind;
      if (UI->getOperandNumber() == 0)
        Kind = DIUseKind::Load;
      else if (InStructSubElement)
        Kind = DIUseKind::PartialStore;
      else if (CAI->isInitializationOfDest())
        Kind = DIUseKind::Initialization;
      else
        Kind = DIUseKind::Assign;

      addElementUses(BaseEltNo, PointeeType, User, Kind);
      continue;
    }
    
    // The apply instruction does not capture the pointer when it is passed
    // through 'inout' arguments or for indirect returns.  InOut arguments are
    // treated as uses and may-store's, but an indirect return is treated as a
    // full store.
    //
    // Note that partial_apply instructions always close over their argument.
    //
    if (auto *Apply = dyn_cast<ApplyInst>(User)) {
      auto substConv = Apply->getSubstCalleeConv();
      unsigned ArgumentNumber = UI->getOperandNumber()-1;

      // If this is an out-parameter, it is like a store.
      unsigned NumIndirectResults = substConv.getNumIndirectSILResults();
      if (ArgumentNumber < NumIndirectResults) {
        // We do not support initializing sub members. This is an old
        // restriction from when this code was used by Definite
        // Initialization. With proper code review, we can remove this, but for
        // now, lets be conservative.
        if (InStructSubElement) {
          return false;
        }
        addElementUses(BaseEltNo, PointeeType, User,
                       DIUseKind::Initialization);
        continue;

      // Otherwise, adjust the argument index.      
      } else {
        ArgumentNumber -= NumIndirectResults;
      }

      auto ParamConvention =
          substConv.getParameters()[ArgumentNumber].getConvention();

      switch (ParamConvention) {
      case ParameterConvention::Direct_Owned:
      case ParameterConvention::Direct_Unowned:
      case ParameterConvention::Direct_Guaranteed:
        llvm_unreachable("address value passed to indirect parameter");

      // If this is an in-parameter, it is like a load.
      case ParameterConvention::Indirect_In:
      case ParameterConvention::Indirect_In_Constant:
      case ParameterConvention::Indirect_In_Guaranteed:
        addElementUses(BaseEltNo, PointeeType, User, DIUseKind::IndirectIn);
        continue;

      // If this is an @inout parameter, it is like both a load and store.
      case ParameterConvention::Indirect_Inout:
      case ParameterConvention::Indirect_InoutAliasable: {
        // If we're in the initializer for a struct, and this is a call to a
        // mutating method, we model that as an escape of self.  If an
        // individual sub-member is passed as inout, then we model that as an
        // inout use.
        addElementUses(BaseEltNo, PointeeType, User, DIUseKind::InOutUse);
        continue;
      }
      }
      llvm_unreachable("bad parameter convention");
    }
    
    // init_enum_data_addr is treated like a tuple_element_addr or other instruction
    // that is looking into the memory object (i.e., the memory object needs to
    // be explicitly initialized by a copy_addr or some other use of the
    // projected address).
    if (auto I = dyn_cast<InitEnumDataAddrInst>(User)) {
      // If we are in a struct already, bail. With proper analysis, we should be
      // able to do this optimization.
      if (InStructSubElement) {
        return false;
      }

      // Keep track of the fact that we're inside of an enum.  This informs our
      // recursion that tuple stores are not scalarized outside, and that stores
      // should not be treated as partial stores.
      llvm::SaveAndRestore<bool> X(InEnumSubElement, true);
      if (!collectUses(I, BaseEltNo))
        return false;
      continue;
    }

    // init_existential_addr is modeled as an initialization store.
    if (isa<InitExistentialAddrInst>(User)) {
      // init_existential_addr should not apply to struct subelements.
      if (InStructSubElement) {
        return false;
      }
      Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
                                 BaseEltNo, 1));
      continue;
    }
    
    // inject_enum_addr is modeled as an initialization store.
    if (isa<InjectEnumAddrInst>(User)) {
      // inject_enum_addr the subelement of a struct unless in a ctor.
      if (InStructSubElement) {
        return false;
      }
      Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
                                 BaseEltNo, 1));
      continue;
    }

    // open_existential_addr is a use of the protocol value,
    // so it is modeled as a load.
    if (isa<OpenExistentialAddrInst>(User)) {
      Uses.push_back(DIMemoryUse(User, DIUseKind::Load, BaseEltNo, 1));
      // TODO: Is it safe to ignore all uses of the open_existential_addr?
      continue;
    }

    // We model destroy_addr as a release of the entire value.
    if (isa<DestroyAddrInst>(User)) {
      Releases.push_back(User);
      continue;
    }

    if (isa<DeallocStackInst>(User)) {
      continue;
    }

    // Sanitizer instrumentation is not user visible, so it should not
    // count as a use and must not affect compile-time diagnostics.
    if (isSanitizerInstrumentation(User, Module.getASTContext()))
      continue;

    // Otherwise, the use is something complicated, it escapes.
    addElementUses(BaseEltNo, PointeeType, User, DIUseKind::Escape);
  }

  // Now that we've walked all of the immediate uses, scalarize any operations
  // working on tuples if we need to for canonicalization or analysis reasons.
  if (!UsesToScalarize.empty()) {
    SILInstruction *PointerInst = Pointer->getDefiningInstruction();
    SmallVector<SILValue, 4> ElementAddrs;
    SILBuilderWithScope AddrBuilder(++SILBasicBlock::iterator(PointerInst),
                                    PointerInst);
    getScalarizedElementAddresses(Pointer, AddrBuilder, PointerInst->getLoc(),
                                  ElementAddrs);
    
    SmallVector<SILValue, 4> ElementTmps;
    for (auto *User : UsesToScalarize) {
      ElementTmps.clear();

      DEBUG(llvm::errs() << "  *** Scalarizing: " << *User << "\n");

      // Scalarize LoadInst
      if (auto *LI = dyn_cast<LoadInst>(User)) {
        SILValue Result = scalarizeLoad(LI, ElementAddrs);
        LI->replaceAllUsesWith(Result);
        LI->eraseFromParent();
        continue;
      }

      // Scalarize StoreInst
      if (auto *SI = dyn_cast<StoreInst>(User)) {
        SILBuilderWithScope B(User, SI);
        getScalarizedElements(SI->getOperand(0), ElementTmps, SI->getLoc(), B);
        
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createStore(SI->getLoc(), ElementTmps[i], ElementAddrs[i],
                        StoreOwnershipQualifier::Unqualified);
        SI->eraseFromParent();
        continue;
      }
      
      // Scalarize CopyAddrInst.
      auto *CAI = cast<CopyAddrInst>(User);
      SILBuilderWithScope B(User, CAI);

      // Determine if this is a copy *from* or *to* "Pointer".
      if (CAI->getSrc() == Pointer) {
        // Copy from pointer.
        getScalarizedElementAddresses(CAI->getDest(), B, CAI->getLoc(),
                                      ElementTmps);
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createCopyAddr(CAI->getLoc(), ElementAddrs[i], ElementTmps[i],
                           CAI->isTakeOfSrc(), CAI->isInitializationOfDest());
        
      } else {
        getScalarizedElementAddresses(CAI->getSrc(), B, CAI->getLoc(),
                                      ElementTmps);
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createCopyAddr(CAI->getLoc(), ElementTmps[i], ElementAddrs[i],
                           CAI->isTakeOfSrc(), CAI->isInitializationOfDest());
      }
      CAI->eraseFromParent();
    }
    
    // Now that we've scalarized some stuff, recurse down into the newly created
    // element address computations to recursively process it.  This can cause
    // further scalarization.
    if (llvm::any_of(ElementAddrs, [&](SILValue V) {
          return !collectTupleElementUses(cast<TupleElementAddrInst>(V),
                                          BaseEltNo);
        })) {
      return false;
    }
  }

  return true;
}
Example #24
0
/// runPasses - Run the specified passes on Program, outputting a bitcode file
/// and writing the filename into OutputFile if successful.  If the
/// optimizations fail for some reason (optimizer crashes), return true,
/// otherwise return false.  If DeleteOutput is set to true, the bitcode is
/// deleted on success, and the filename string is undefined.  This prints to
/// outs() a single line message indicating whether compilation was successful
/// or failed.
///
bool BugDriver::runPasses(Module *Program,
                          const std::vector<std::string> &Passes,
                          std::string &OutputFilename, bool DeleteOutput,
                          bool Quiet, unsigned NumExtraArgs,
                          const char * const *ExtraArgs) const {
  // setup the output file name
  outs().flush();
  SmallString<128> UniqueFilename;
  error_code EC = sys::fs::createUniqueFile(
      OutputPrefix + "-output-%%%%%%%.bc", UniqueFilename);
  if (EC) {
    errs() << getToolName() << ": Error making unique filename: "
           << EC.message() << "\n";
    return 1;
  }
  OutputFilename = UniqueFilename.str();

  // set up the input file name
  SmallString<128> InputFilename;
  int InputFD;
  EC = sys::fs::createUniqueFile(OutputPrefix + "-input-%%%%%%%.bc", InputFD,
                                 InputFilename);
  if (EC) {
    errs() << getToolName() << ": Error making unique filename: "
           << EC.message() << "\n";
    return 1;
  }

  tool_output_file InFile(InputFilename.c_str(), InputFD);

  WriteBitcodeToFile(Program, InFile.os());
  InFile.os().close();
  if (InFile.os().has_error()) {
    errs() << "Error writing bitcode file: " << InputFilename << "\n";
    InFile.os().clear_error();
    return 1;
  }

  std::string tool = OptCmd.empty()? sys::FindProgramByName("opt") : OptCmd;
  if (tool.empty()) {
    errs() << "Cannot find `opt' in PATH!\n";
    return 1;
  }

  // Ok, everything that could go wrong before running opt is done.
  InFile.keep();

  // setup the child process' arguments
  SmallVector<const char*, 8> Args;
  if (UseValgrind) {
    Args.push_back("valgrind");
    Args.push_back("--error-exitcode=1");
    Args.push_back("-q");
    Args.push_back(tool.c_str());
  } else
    Args.push_back(tool.c_str());

  Args.push_back("-o");
  Args.push_back(OutputFilename.c_str());
  for (unsigned i = 0, e = OptArgs.size(); i != e; ++i)
    Args.push_back(OptArgs[i].c_str());
  std::vector<std::string> pass_args;
  for (unsigned i = 0, e = PluginLoader::getNumPlugins(); i != e; ++i) {
    pass_args.push_back( std::string("-load"));
    pass_args.push_back( PluginLoader::getPlugin(i));
  }
  for (std::vector<std::string>::const_iterator I = Passes.begin(),
       E = Passes.end(); I != E; ++I )
    pass_args.push_back( std::string("-") + (*I) );
  for (std::vector<std::string>::const_iterator I = pass_args.begin(),
       E = pass_args.end(); I != E; ++I )
    Args.push_back(I->c_str());
  Args.push_back(InputFilename.c_str());
  for (unsigned i = 0; i < NumExtraArgs; ++i)
    Args.push_back(*ExtraArgs);
  Args.push_back(nullptr);

  DEBUG(errs() << "\nAbout to run:\t";
        for (unsigned i = 0, e = Args.size()-1; i != e; ++i)
          errs() << " " << Args[i];
        errs() << "\n";
        );
Example #25
0
ParserResult<GenericParamList> Parser::parseGenericParameters(SourceLoc LAngleLoc) {
  // Parse the generic parameter list.
  SmallVector<GenericTypeParamDecl *, 4> GenericParams;
  bool Invalid = false;
  do {
    // Parse the name of the parameter.
    Identifier Name;
    SourceLoc NameLoc;
    if (parseIdentifier(Name, NameLoc, diag::expected_generics_parameter_name)) {
      Invalid = true;
      break;
    }

    // Parse the ':' followed by a type.
    SmallVector<TypeLoc, 1> Inherited;
    if (Tok.is(tok::colon)) {
      (void)consumeToken();
      ParserResult<TypeRepr> Ty;
      if (Tok.getKind() == tok::identifier ||
          Tok.getKind() == tok::code_complete) {
        Ty = parseTypeIdentifier();
      } else if (Tok.getKind() == tok::kw_protocol) {
        Ty = parseTypeComposition();
      } else {
        diagnose(Tok, diag::expected_generics_type_restriction, Name);
        Invalid = true;
      }

      if (Ty.hasCodeCompletion())
        return makeParserCodeCompletionStatus();

      if (Ty.isNonNull())
        Inherited.push_back(Ty.get());
    }

    // We always create generic type parameters with a depth of zero.
    // Semantic analysis fills in the depth when it processes the generic
    // parameter list.
    auto Param = new (Context) GenericTypeParamDecl(CurDeclContext, Name,
                                                    NameLoc, /*Depth=*/0,
                                                    GenericParams.size());
    if (!Inherited.empty())
      Param->setInherited(Context.AllocateCopy(Inherited));
    GenericParams.push_back(Param);

    // Add this parameter to the scope.
    addToScope(Param);

    // Parse the comma, if the list continues.
  } while (consumeIf(tok::comma));

  // Parse the optional where-clause.
  SourceLoc WhereLoc;
  SmallVector<RequirementRepr, 4> Requirements;
  if (Tok.is(tok::kw_where) &&
      parseGenericWhereClause(WhereLoc, Requirements)) {
    Invalid = true;
  }
  
  // Parse the closing '>'.
  SourceLoc RAngleLoc;
  if (!startsWithGreater(Tok)) {
    if (!Invalid) {
      diagnose(Tok, diag::expected_rangle_generics_param);
      diagnose(LAngleLoc, diag::opening_angle);
      
      Invalid = true;
    }
    
    // Skip until we hit the '>'.
    skipUntilGreaterInTypeList();
    if (startsWithGreater(Tok))
      RAngleLoc = consumeStartingGreater();
    else
      RAngleLoc = Tok.getLoc();
  } else {
    RAngleLoc = consumeStartingGreater();
  }

  if (GenericParams.empty())
    return nullptr;

  return makeParserResult(GenericParamList::create(Context, LAngleLoc,
                                                   GenericParams, WhereLoc,
                                                   Requirements, RAngleLoc));
}
Example #26
0
void MatcherGen::
EmitResultInstructionAsOperand(const TreePatternNode *N,
                               SmallVectorImpl<unsigned> &OutputOps) {
  Record *Op = N->getOperator();
  const CodeGenTarget &CGT = CGP.getTargetInfo();
  CodeGenInstruction &II = CGT.getInstruction(Op);
  const DAGInstruction &Inst = CGP.getInstruction(Op);

  // If we can, get the pattern for the instruction we're generating. We derive
  // a variety of information from this pattern, such as whether it has a chain.
  //
  // FIXME2: This is extremely dubious for several reasons, not the least of
  // which it gives special status to instructions with patterns that Pat<>
  // nodes can't duplicate.
  const TreePatternNode *InstPatNode = GetInstPatternNode(Inst, N);

  // NodeHasChain - Whether the instruction node we're creating takes chains.
  bool NodeHasChain = InstPatNode &&
                      InstPatNode->TreeHasProperty(SDNPHasChain, CGP);

  // Instructions which load and store from memory should have a chain,
  // regardless of whether they happen to have an internal pattern saying so.
  if (Pattern.getSrcPattern()->TreeHasProperty(SDNPHasChain, CGP)
      && (II.hasCtrlDep || II.mayLoad || II.mayStore || II.canFoldAsLoad ||
          II.hasSideEffects))
      NodeHasChain = true;

  bool isRoot = N == Pattern.getDstPattern();

  // TreeHasOutGlue - True if this tree has glue.
  bool TreeHasInGlue = false, TreeHasOutGlue = false;
  if (isRoot) {
    const TreePatternNode *SrcPat = Pattern.getSrcPattern();
    TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
                    SrcPat->TreeHasProperty(SDNPInGlue, CGP);

    // FIXME2: this is checking the entire pattern, not just the node in
    // question, doing this just for the root seems like a total hack.
    TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
  }

  // NumResults - This is the number of results produced by the instruction in
  // the "outs" list.
  unsigned NumResults = Inst.getNumResults();

  // Number of operands we know the output instruction must have. If it is
  // variadic, we could have more operands.
  unsigned NumFixedOperands = II.Operands.size();

  SmallVector<unsigned, 8> InstOps;

  // Loop over all of the fixed operands of the instruction pattern, emitting
  // code to fill them all in. The node 'N' usually has number children equal to
  // the number of input operands of the instruction.  However, in cases where
  // there are predicate operands for an instruction, we need to fill in the
  // 'execute always' values. Match up the node operands to the instruction
  // operands to do this.
  unsigned ChildNo = 0;
  for (unsigned InstOpNo = NumResults, e = NumFixedOperands;
       InstOpNo != e; ++InstOpNo) {
    // Determine what to emit for this operand.
    Record *OperandNode = II.Operands[InstOpNo].Rec;
    if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
        !CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
      // This is a predicate or optional def operand; emit the
      // 'default ops' operands.
      const DAGDefaultOperand &DefaultOp
        = CGP.getDefaultOperand(OperandNode);
      for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
        EmitResultOperand(DefaultOp.DefaultOps[i], InstOps);
      continue;
    }

    // Otherwise this is a normal operand or a predicate operand without
    // 'execute always'; emit it.

    // For operands with multiple sub-operands we may need to emit
    // multiple child patterns to cover them all.  However, ComplexPattern
    // children may themselves emit multiple MI operands.
    unsigned NumSubOps = 1;
    if (OperandNode->isSubClassOf("Operand")) {
      DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
      if (unsigned NumArgs = MIOpInfo->getNumArgs())
        NumSubOps = NumArgs;
    }

    unsigned FinalNumOps = InstOps.size() + NumSubOps;
    while (InstOps.size() < FinalNumOps) {
      const TreePatternNode *Child = N->getChild(ChildNo);
      unsigned BeforeAddingNumOps = InstOps.size();
      EmitResultOperand(Child, InstOps);
      assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");

      // If the operand is an instruction and it produced multiple results, just
      // take the first one.
      if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
        InstOps.resize(BeforeAddingNumOps+1);

      ++ChildNo;
    }
  }

  // If this is a variadic output instruction (i.e. REG_SEQUENCE), we can't
  // expand suboperands, use default operands, or other features determined from
  // the CodeGenInstruction after the fixed operands, which were handled
  // above. Emit the remaining instructions implicitly added by the use for
  // variable_ops.
  if (II.Operands.isVariadic) {
    for (unsigned I = ChildNo, E = N->getNumChildren(); I < E; ++I)
      EmitResultOperand(N->getChild(I), InstOps);
  }

  // If this node has input glue or explicitly specified input physregs, we
  // need to add chained and glued copyfromreg nodes and materialize the glue
  // input.
  if (isRoot && !PhysRegInputs.empty()) {
    // Emit all of the CopyToReg nodes for the input physical registers.  These
    // occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
    for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
      AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
                                          PhysRegInputs[i].first));
    // Even if the node has no other glue inputs, the resultant node must be
    // glued to the CopyFromReg nodes we just generated.
    TreeHasInGlue = true;
  }

  // Result order: node results, chain, glue

  // Determine the result types.
  SmallVector<MVT::SimpleValueType, 4> ResultVTs;
  for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
    ResultVTs.push_back(N->getType(i));

  // If this is the root instruction of a pattern that has physical registers in
  // its result pattern, add output VTs for them.  For example, X86 has:
  //   (set AL, (mul ...))
  // This also handles implicit results like:
  //   (implicit EFLAGS)
  if (isRoot && !Pattern.getDstRegs().empty()) {
    // If the root came from an implicit def in the instruction handling stuff,
    // don't re-add it.
    Record *HandledReg = nullptr;
    if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
      HandledReg = II.ImplicitDefs[0];

    for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
      Record *Reg = Pattern.getDstRegs()[i];
      if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
      ResultVTs.push_back(getRegisterValueType(Reg, CGT));
    }
  }

  // If this is the root of the pattern and the pattern we're matching includes
  // a node that is variadic, mark the generated node as variadic so that it
  // gets the excess operands from the input DAG.
  int NumFixedArityOperands = -1;
  if (isRoot &&
      Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP))
    NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();

  // If this is the root node and multiple matched nodes in the input pattern
  // have MemRefs in them, have the interpreter collect them and plop them onto
  // this node. If there is just one node with MemRefs, leave them on that node
  // even if it is not the root.
  //
  // FIXME3: This is actively incorrect for result patterns with multiple
  // memory-referencing instructions.
  bool PatternHasMemOperands =
    Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);

  bool NodeHasMemRefs = false;
  if (PatternHasMemOperands) {
    unsigned NumNodesThatLoadOrStore =
      numNodesThatMayLoadOrStore(Pattern.getDstPattern(), CGP);
    bool NodeIsUniqueLoadOrStore = mayInstNodeLoadOrStore(N, CGP) &&
                                   NumNodesThatLoadOrStore == 1;
    NodeHasMemRefs =
      NodeIsUniqueLoadOrStore || (isRoot && (mayInstNodeLoadOrStore(N, CGP) ||
                                             NumNodesThatLoadOrStore != 1));
  }

  assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
         "Node has no result");

  AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName(),
                                 ResultVTs, InstOps,
                                 NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
                                 NodeHasMemRefs, NumFixedArityOperands,
                                 NextRecordedOperandNo));

  // The non-chain and non-glue results of the newly emitted node get recorded.
  for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
    if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
    OutputOps.push_back(NextRecordedOperandNo++);
  }
}
void ScheduleDAGSDNodes::BuildSchedUnits() {
  // During scheduling, the NodeId field of SDNode is used to map SDNodes
  // to their associated SUnits by holding SUnits table indices. A value
  // of -1 means the SDNode does not yet have an associated SUnit.
  unsigned NumNodes = 0;
  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
       E = DAG->allnodes_end(); NI != E; ++NI) {
    NI->setNodeId(-1);
    ++NumNodes;
  }

  // Reserve entries in the vector for each of the SUnits we are creating.  This
  // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
  // invalidated.
  // FIXME: Multiply by 2 because we may clone nodes during scheduling.
  // This is a temporary workaround.
  SUnits.reserve(NumNodes * 2);

  // Add all nodes in depth first order.
  SmallVector<SDNode*, 64> Worklist;
  SmallPtrSet<SDNode*, 64> Visited;
  Worklist.push_back(DAG->getRoot().getNode());
  Visited.insert(DAG->getRoot().getNode());

  SmallVector<SUnit*, 8> CallSUnits;
  while (!Worklist.empty()) {
    SDNode *NI = Worklist.pop_back_val();

    // Add all operands to the worklist unless they've already been added.
    for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
      if (Visited.insert(NI->getOperand(i).getNode()))
        Worklist.push_back(NI->getOperand(i).getNode());

    if (isPassiveNode(NI))  // Leaf node, e.g. a TargetImmediate.
      continue;

    // If this node has already been processed, stop now.
    if (NI->getNodeId() != -1) continue;

    SUnit *NodeSUnit = NewSUnit(NI);

    // See if anything is glued to this node, if so, add them to glued
    // nodes.  Nodes can have at most one glue input and one glue output.  Glue
    // is required to be the last operand and result of a node.

    // Scan up to find glued preds.
    SDNode *N = NI;
    while (N->getNumOperands() &&
           N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
      N = N->getOperand(N->getNumOperands()-1).getNode();
      assert(N->getNodeId() == -1 && "Node already inserted!");
      N->setNodeId(NodeSUnit->NodeNum);
      if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
        NodeSUnit->isCall = true;
    }

    // Scan down to find any glued succs.
    N = NI;
    while (N->getValueType(N->getNumValues()-1) == MVT::Glue) {
      SDValue GlueVal(N, N->getNumValues()-1);

      // There are either zero or one users of the Glue result.
      bool HasGlueUse = false;
      for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
           UI != E; ++UI)
        if (GlueVal.isOperandOf(*UI)) {
          HasGlueUse = true;
          assert(N->getNodeId() == -1 && "Node already inserted!");
          N->setNodeId(NodeSUnit->NodeNum);
          N = *UI;
          if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
            NodeSUnit->isCall = true;
          break;
        }
      if (!HasGlueUse) break;
    }

    if (NodeSUnit->isCall)
      CallSUnits.push_back(NodeSUnit);

    // Schedule zero-latency TokenFactor below any nodes that may increase the
    // schedule height. Otherwise, ancestors of the TokenFactor may appear to
    // have false stalls.
    if (NI->getOpcode() == ISD::TokenFactor)
      NodeSUnit->isScheduleLow = true;

    // If there are glue operands involved, N is now the bottom-most node
    // of the sequence of nodes that are glued together.
    // Update the SUnit.
    NodeSUnit->setNode(N);
    assert(N->getNodeId() == -1 && "Node already inserted!");
    N->setNodeId(NodeSUnit->NodeNum);

    // Compute NumRegDefsLeft. This must be done before AddSchedEdges.
    InitNumRegDefsLeft(NodeSUnit);

    // Assign the Latency field of NodeSUnit using target-provided information.
    ComputeLatency(NodeSUnit);
  }

  // Find all call operands.
  while (!CallSUnits.empty()) {
    SUnit *SU = CallSUnits.pop_back_val();
    for (const SDNode *SUNode = SU->getNode(); SUNode;
         SUNode = SUNode->getGluedNode()) {
      if (SUNode->getOpcode() != ISD::CopyToReg)
        continue;
      SDNode *SrcN = SUNode->getOperand(2).getNode();
      if (isPassiveNode(SrcN)) continue;   // Not scheduled.
      SUnit *SrcSU = &SUnits[SrcN->getNodeId()];
      SrcSU->isCallOp = true;
    }
  }
}
Example #28
0
// Specialize F by replacing the arguments (keys) in replacements with the 
// constants (values).  Replace all calls to F with those constants with
// a call to the specialized function.  Returns the specialized function
static Function* 
SpecializeFunction(Function* F, 
                   ValueMap<const Value*, Value*>& replacements) {
  // arg numbers of deleted arguments
  DenseMap<unsigned, const Argument*> deleted;
  for (ValueMap<const Value*, Value*>::iterator 
         repb = replacements.begin(), repe = replacements.end();
       repb != repe; ++repb) {
    Argument const *arg = cast<const Argument>(repb->first);
    deleted[arg->getArgNo()] = arg;
  }

  Function* NF = CloneFunction(F, replacements);
  NF->setLinkage(GlobalValue::InternalLinkage);
  F->getParent()->getFunctionList().push_back(NF);

  for (Value::use_iterator ii = F->use_begin(), ee = F->use_end(); 
       ii != ee; ) {
    Value::use_iterator i = ii;
    ++ii;
    User *U = *i;
    CallSite CS(U);
    if (CS) {
      if (CS.getCalledFunction() == F) {
        SmallVector<Value*, 6> args;
        // Assemble the non-specialized arguments for the updated callsite.
        // In the process, make sure that the specialized arguments are
        // constant and match the specialization.  If that's not the case,
        // this callsite needs to call the original or some other
        // specialization; don't change it here.
        CallSite::arg_iterator as = CS.arg_begin(), ae = CS.arg_end();
        for (CallSite::arg_iterator ai = as; ai != ae; ++ai) {
          DenseMap<unsigned, const Argument*>::iterator delit = deleted.find(
            std::distance(as, ai));
          if (delit == deleted.end())
            args.push_back(cast<Value>(ai));
          else {
            Constant *ci = dyn_cast<Constant>(ai);
            if (!(ci && ci == replacements[delit->second]))
              goto next_use;
          }
        }
        Value* NCall;
        if (CallInst *CI = dyn_cast<CallInst>(U)) {
          NCall = CallInst::Create(NF, args.begin(), args.end(), 
                                   CI->getName(), CI);
          cast<CallInst>(NCall)->setTailCall(CI->isTailCall());
          cast<CallInst>(NCall)->setCallingConv(CI->getCallingConv());
        } else {
          InvokeInst *II = cast<InvokeInst>(U);
          NCall = InvokeInst::Create(NF, II->getNormalDest(),
                                     II->getUnwindDest(),
                                     args.begin(), args.end(), 
                                     II->getName(), II);
          cast<InvokeInst>(NCall)->setCallingConv(II->getCallingConv());
        }
        CS.getInstruction()->replaceAllUsesWith(NCall);
        CS.getInstruction()->eraseFromParent();
        ++numReplaced;
      }
    }
    next_use:;
  }
  return NF;
}
Example #29
0
TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
  // Create the TargetMachine for generating code.
  std::string Error;
  std::string Triple = TheModule->getTargetTriple();
  const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
  if (!TheTarget) {
    if (MustCreateTM)
      Diags.Report(diag::err_fe_unable_to_create_target) << Error;
    return 0;
  }

  // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
  // being gross, this is also totally broken if we ever care about
  // concurrency.

  TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);

  TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
  TargetMachine::setDataSections    (CodeGenOpts.DataSections);

  // FIXME: Parse this earlier.
  llvm::CodeModel::Model CM;
  if (CodeGenOpts.CodeModel == "small") {
    CM = llvm::CodeModel::Small;
  } else if (CodeGenOpts.CodeModel == "kernel") {
    CM = llvm::CodeModel::Kernel;
  } else if (CodeGenOpts.CodeModel == "medium") {
    CM = llvm::CodeModel::Medium;
  } else if (CodeGenOpts.CodeModel == "large") {
    CM = llvm::CodeModel::Large;
  } else {
    assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
    CM = llvm::CodeModel::Default;
  }

  SmallVector<const char *, 16> BackendArgs;
  BackendArgs.push_back("clang"); // Fake program name.
  if (!CodeGenOpts.DebugPass.empty()) {
    BackendArgs.push_back("-debug-pass");
    BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
  }
  if (!CodeGenOpts.LimitFloatPrecision.empty()) {
    BackendArgs.push_back("-limit-float-precision");
    BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
  }
  if (llvm::TimePassesIsEnabled)
    BackendArgs.push_back("-time-passes");
  for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i)
    BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str());
  if (CodeGenOpts.NoGlobalMerge)
    BackendArgs.push_back("-global-merge=false");
  BackendArgs.push_back(0);
  llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
                                    BackendArgs.data());

  std::string FeaturesStr;
  if (TargetOpts.Features.size()) {
    SubtargetFeatures Features;
    for (std::vector<std::string>::const_iterator
           it = TargetOpts.Features.begin(),
           ie = TargetOpts.Features.end(); it != ie; ++it)
      Features.AddFeature(*it);
    FeaturesStr = Features.getString();
  }

  llvm::Reloc::Model RM = llvm::Reloc::Default;
  if (CodeGenOpts.RelocationModel == "static") {
    RM = llvm::Reloc::Static;
  } else if (CodeGenOpts.RelocationModel == "pic") {
    RM = llvm::Reloc::PIC_;
  } else {
    assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
           "Invalid PIC model!");
    RM = llvm::Reloc::DynamicNoPIC;
  }

  CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
  switch (CodeGenOpts.OptimizationLevel) {
  default: break;
  case 0: OptLevel = CodeGenOpt::None; break;
  case 3: OptLevel = CodeGenOpt::Aggressive; break;
  }

  llvm::TargetOptions Options;

  // Set frame pointer elimination mode.
  if (!CodeGenOpts.DisableFPElim) {
    Options.NoFramePointerElim = false;
  } else if (CodeGenOpts.OmitLeafFramePointer) {
    Options.NoFramePointerElim = false;
  } else {
    Options.NoFramePointerElim = true;
  }

  if (CodeGenOpts.UseInitArray)
    Options.UseInitArray = true;

  // Set float ABI type.
  if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
    Options.FloatABIType = llvm::FloatABI::Soft;
  else if (CodeGenOpts.FloatABI == "hard")
    Options.FloatABIType = llvm::FloatABI::Hard;
  else {
    assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
    Options.FloatABIType = llvm::FloatABI::Default;
  }

  // Set FP fusion mode.
  switch (CodeGenOpts.getFPContractMode()) {
  case CodeGenOptions::FPC_Off:
    Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
    break;
  case CodeGenOptions::FPC_On:
    Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
    break;
  case CodeGenOptions::FPC_Fast:
    Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
    break;
  }

  Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
  Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
  Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
  Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
  Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
  Options.UseSoftFloat = CodeGenOpts.SoftFloat;
  Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
  Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
  Options.TrapFuncName = CodeGenOpts.TrapFuncName;
  Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
  Options.EnableSegmentedStacks = CodeGenOpts.EnableSegmentedStacks;

  TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
                                                     FeaturesStr, Options,
                                                     RM, CM, OptLevel);

  if (CodeGenOpts.RelaxAll)
    TM->setMCRelaxAll(true);
  if (CodeGenOpts.SaveTempLabels)
    TM->setMCSaveTempLabels(true);
  if (CodeGenOpts.NoDwarf2CFIAsm)
    TM->setMCUseCFI(false);
  if (!CodeGenOpts.NoDwarfDirectoryAsm)
    TM->setMCUseDwarfDirectory(true);
  if (CodeGenOpts.NoExecStack)
    TM->setMCNoExecStack(true);

  return TM;
}
Example #30
0
/// Create a fake body for std::call_once.
/// Emulates the following function body:
///
/// \code
/// typedef struct once_flag_s {
///   unsigned long __state = 0;
/// } once_flag;
/// template<class Callable>
/// void call_once(once_flag& o, Callable func) {
///   if (!o.__state) {
///     func();
///   }
///   o.__state = 1;
/// }
/// \endcode
static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
  LLVM_DEBUG(llvm::dbgs() << "Generating body for call_once\n");

  // We need at least two parameters.
  if (D->param_size() < 2)
    return nullptr;

  ASTMaker M(C);

  const ParmVarDecl *Flag = D->getParamDecl(0);
  const ParmVarDecl *Callback = D->getParamDecl(1);

  if (!Callback->getType()->isReferenceType()) {
    llvm::dbgs() << "libcxx03 std::call_once implementation, skipping.\n";
    return nullptr;
  }
  if (!Flag->getType()->isReferenceType()) {
    llvm::dbgs() << "unknown std::call_once implementation, skipping.\n";
    return nullptr;
  }

  QualType CallbackType = Callback->getType().getNonReferenceType();

  // Nullable pointer, non-null iff function is a CXXRecordDecl.
  CXXRecordDecl *CallbackRecordDecl = CallbackType->getAsCXXRecordDecl();
  QualType FlagType = Flag->getType().getNonReferenceType();
  auto *FlagRecordDecl = dyn_cast_or_null<RecordDecl>(FlagType->getAsTagDecl());

  if (!FlagRecordDecl) {
    LLVM_DEBUG(llvm::dbgs() << "Flag field is not a record: "
                            << "unknown std::call_once implementation, "
                            << "ignoring the call.\n");
    return nullptr;
  }

  // We initially assume libc++ implementation of call_once,
  // where the once_flag struct has a field `__state_`.
  ValueDecl *FlagFieldDecl = M.findMemberField(FlagRecordDecl, "__state_");

  // Otherwise, try libstdc++ implementation, with a field
  // `_M_once`
  if (!FlagFieldDecl) {
    FlagFieldDecl = M.findMemberField(FlagRecordDecl, "_M_once");
  }

  if (!FlagFieldDecl) {
    LLVM_DEBUG(llvm::dbgs() << "No field _M_once or __state_ found on "
                            << "std::once_flag struct: unknown std::call_once "
                            << "implementation, ignoring the call.");
    return nullptr;
  }

  bool isLambdaCall = CallbackRecordDecl && CallbackRecordDecl->isLambda();
  if (CallbackRecordDecl && !isLambdaCall) {
    LLVM_DEBUG(llvm::dbgs()
               << "Not supported: synthesizing body for functors when "
               << "body farming std::call_once, ignoring the call.");
    return nullptr;
  }

  SmallVector<Expr *, 5> CallArgs;
  const FunctionProtoType *CallbackFunctionType;
  if (isLambdaCall) {

    // Lambda requires callback itself inserted as a first parameter.
    CallArgs.push_back(
        M.makeDeclRefExpr(Callback,
                          /* RefersToEnclosingVariableOrCapture=*/ true));
    CallbackFunctionType = CallbackRecordDecl->getLambdaCallOperator()
                               ->getType()
                               ->getAs<FunctionProtoType>();
  } else if (!CallbackType->getPointeeType().isNull()) {
    CallbackFunctionType =
        CallbackType->getPointeeType()->getAs<FunctionProtoType>();
  } else {
    CallbackFunctionType = CallbackType->getAs<FunctionProtoType>();
  }

  if (!CallbackFunctionType)
    return nullptr;

  // First two arguments are used for the flag and for the callback.
  if (D->getNumParams() != CallbackFunctionType->getNumParams() + 2) {
    LLVM_DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
                            << "params passed to std::call_once, "
                            << "ignoring the call\n");
    return nullptr;
  }

  // All arguments past first two ones are passed to the callback,
  // and we turn lvalues into rvalues if the argument is not passed by
  // reference.
  for (unsigned int ParamIdx = 2; ParamIdx < D->getNumParams(); ParamIdx++) {
    const ParmVarDecl *PDecl = D->getParamDecl(ParamIdx);
    if (PDecl &&
        CallbackFunctionType->getParamType(ParamIdx - 2)
                .getNonReferenceType()
                .getCanonicalType() !=
            PDecl->getType().getNonReferenceType().getCanonicalType()) {
      LLVM_DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
                              << "params passed to std::call_once, "
                              << "ignoring the call\n");
      return nullptr;
    }
    Expr *ParamExpr = M.makeDeclRefExpr(PDecl);
    if (!CallbackFunctionType->getParamType(ParamIdx - 2)->isReferenceType()) {
      QualType PTy = PDecl->getType().getNonReferenceType();
      ParamExpr = M.makeLvalueToRvalue(ParamExpr, PTy);
    }
    CallArgs.push_back(ParamExpr);
  }

  CallExpr *CallbackCall;
  if (isLambdaCall) {

    CallbackCall = create_call_once_lambda_call(C, M, Callback,
                                                CallbackRecordDecl, CallArgs);
  } else {

    // Function pointer case.
    CallbackCall = create_call_once_funcptr_call(C, M, Callback, CallArgs);
  }

  DeclRefExpr *FlagDecl =
      M.makeDeclRefExpr(Flag,
                        /* RefersToEnclosingVariableOrCapture=*/true);


  MemberExpr *Deref = M.makeMemberExpression(FlagDecl, FlagFieldDecl);
  assert(Deref->isLValue());
  QualType DerefType = Deref->getType();

  // Negation predicate.
  UnaryOperator *FlagCheck = new (C) UnaryOperator(
      /* input=*/
      M.makeImplicitCast(M.makeLvalueToRvalue(Deref, DerefType), DerefType,
                         CK_IntegralToBoolean),
      /* opc=*/ UO_LNot,
      /* QualType=*/ C.IntTy,
      /* ExprValueKind=*/ VK_RValue,
      /* ExprObjectKind=*/ OK_Ordinary, SourceLocation(),
      /* CanOverflow*/ false);

  // Create assignment.
  BinaryOperator *FlagAssignment = M.makeAssignment(
      Deref, M.makeIntegralCast(M.makeIntegerLiteral(1, C.IntTy), DerefType),
      DerefType);

  IfStmt *Out = new (C)
      IfStmt(C, SourceLocation(),
             /* IsConstexpr=*/ false,
             /* init=*/ nullptr,
             /* var=*/ nullptr,
             /* cond=*/ FlagCheck,
             /* then=*/ M.makeCompound({CallbackCall, FlagAssignment}));

  return Out;
}