Example #1
0
static llvm::Optional<unsigned> 
mapOffsetToOlderSnapshot(unsigned Offset,
                         ImmutableTextSnapshotRef NewSnap,
                         ImmutableTextSnapshotRef OldSnap) {
  SmallVector<ReplaceImmutableTextUpdateRef, 16> Updates;
  OldSnap->foreachReplaceUntil(NewSnap,
    [&](ReplaceImmutableTextUpdateRef Upd)->bool {
      Updates.push_back(Upd);
      return true;
    });

  // Walk the updates backwards and "undo" them.
  for (auto I = Updates.rbegin(), E = Updates.rend(); I != E; ++I) {
    auto Upd = *I;
    if (Upd->getByteOffset() <= Offset &&
        Offset < Upd->getByteOffset() + Upd->getText().size())
      return None; // Offset is part of newly inserted text.

    if (Upd->getByteOffset() <= Offset) {
      Offset += Upd->getLength(); // "bring back" what was removed.
      Offset -= Upd->getText().size(); // "remove" what was added.
    }
  }
  return Offset;
}
Example #2
0
  /// stripOffsets -- strips the offsets
  /// Walks backwards, stripping offsets.
  /// Returns serialized without the offsets
  ///
  std::string stripOffsets() {
    std::vector<unsigned> offsets_reversed;
    SmallVector<StringRef,5> colonSeparated;
    StringRef serializedRef = serialized;
    serializedRef.split(colonSeparated,":");
    SmallVector<StringRef,5>::reverse_iterator I = colonSeparated.rbegin(),
      E = colonSeparated.rend();
    for(; I != E; ++I ) {
      unsigned offset;
      // If this isn't an integer (offset), then bail
      if (I->getAsInteger(0,offset))
        break;
      offsets_reversed.push_back(offset);
    }
    // Okay so we built reversed list of offsets, now put things back together

    // If we have more than 2 values left, then we have something like:
    // name1:name2:name3[:offset]*, which is no good.
    // Also, if we have *nothing* left, something is similarly wrong.
    assert(((E - I) > 0) && "Node was entirely made of offsets?");
    assert(((E - I) <= 2) && "Too many colons! (Invalid node/offset given)");

    // Now rebuild the string, without the offsets.
    std::string rebuilt = I++->str();
    for(; I != E; ++I) {
      rebuilt = I->str() + ":" + rebuilt;
    }

    // Reverse the offsets (since we parsed backwards) and put the result
    // into the 'offsets' vector for use elsewhere.
    offsets.insert(offsets.begin(),
        offsets_reversed.rbegin(),offsets_reversed.rend());

    return rebuilt;
  }
Example #3
0
// \brief Update the first occurrence of the "switch statement" BB in the PHI
// node with the "new" BB. The other occurrences will:
//
// 1) Be updated by subsequent calls to this function.  Switch statements may
// have more than one outcoming edge into the same BB if they all have the same
// value. When the switch statement is converted these incoming edges are now
// coming from multiple BBs.
// 2) Removed if subsequent incoming values now share the same case, i.e.,
// multiple outcome edges are condensed into one. This is necessary to keep the
// number of phi values equal to the number of branches to SuccBB.
static void fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB,
                    unsigned NumMergedCases) {
  for (BasicBlock::iterator I = SuccBB->begin(), IE = SuccBB->getFirstNonPHI();
       I != IE; ++I) {
    PHINode *PN = cast<PHINode>(I);

    // Only update the first occurence.
    unsigned Idx = 0, E = PN->getNumIncomingValues();
    unsigned LocalNumMergedCases = NumMergedCases;
    for (; Idx != E; ++Idx) {
      if (PN->getIncomingBlock(Idx) == OrigBB) {
        PN->setIncomingBlock(Idx, NewBB);
        break;
      }
    }

    // Remove additional occurences coming from condensed cases and keep the
    // number of incoming values equal to the number of branches to SuccBB.
    SmallVector<unsigned, 8> Indices;
    for (++Idx; LocalNumMergedCases > 0 && Idx < E; ++Idx)
      if (PN->getIncomingBlock(Idx) == OrigBB) {
        Indices.push_back(Idx);
        LocalNumMergedCases--;
      }
    // Remove incoming values in the reverse order to prevent invalidating
    // *successive* index.
    for (auto III = Indices.rbegin(), IIE = Indices.rend(); III != IIE; ++III)
      PN->removeIncomingValue(*III);
  }
}
Example #4
0
bool AArch64FrameLowering::spillCalleeSavedRegisters(
    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    const std::vector<CalleeSavedInfo> &CSI,
    const TargetRegisterInfo *TRI) const {
  MachineFunction &MF = *MBB.getParent();
  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  DebugLoc DL;
  SmallVector<RegPairInfo, 8> RegPairs;

  computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);

  for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE;
       ++RPII) {
    RegPairInfo RPI = *RPII;
    unsigned Reg1 = RPI.Reg1;
    unsigned Reg2 = RPI.Reg2;
    unsigned StrOpc;

    // Issue sequence of spills for cs regs.  The first spill may be converted
    // to a pre-decrement store later by emitPrologue if the callee-save stack
    // area allocation can't be combined with the local stack area allocation.
    // For example:
    //    stp     x22, x21, [sp, #0]     // addImm(+0)
    //    stp     x20, x19, [sp, #16]    // addImm(+2)
    //    stp     fp, lr, [sp, #32]      // addImm(+4)
    // Rationale: This sequence saves uop updates compared to a sequence of
    // pre-increment spills like stp xi,xj,[sp,#-16]!
    // Note: Similar rationale and sequence for restores in epilog.
    if (RPI.IsGPR)
      StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
    else
      StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
    DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1);
          if (RPI.isPaired())
            dbgs() << ", " << TRI->getName(Reg2);
          dbgs() << ") -> fi#(" << RPI.FrameIdx;
          if (RPI.isPaired())
            dbgs() << ", " << RPI.FrameIdx+1;
          dbgs() << ")\n");

    MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
    MBB.addLiveIn(Reg1);
    if (RPI.isPaired()) {
      MBB.addLiveIn(Reg2);
      MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
      MIB.addMemOperand(MF.getMachineMemOperand(
          MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
          MachineMemOperand::MOStore, 8, 8));
    }
    MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
        .addReg(AArch64::SP)
        .addImm(RPI.Offset) // [sp, #offset*8], where factor*8 is implicit
        .setMIFlag(MachineInstr::FrameSetup);
    MIB.addMemOperand(MF.getMachineMemOperand(
        MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
        MachineMemOperand::MOStore, 8, 8));
  }
  return true;
}
Example #5
0
void LowerIntrinsics::InsertGCRegisterRoots(Function &F, GCStrategy &S) {
  GCRootMapType GCRoots;
  FindGCRegisterRoots(F, GCRoots);

  // For each live root at a call site, insert a call to llvm.gcregroot.
  Function *GCRegRootFn = Intrinsic::getDeclaration(F.getParent(),
                                                    Intrinsic::gcregroot);
  Type *PtrTy = Type::getInt8PtrTy(F.getParent()->getContext());
  for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
    for (BasicBlock::iterator II = BBI->begin(),
                              IE = BBI->end(); II != IE; ++II) {
      if (!isa<CallInst>(&*II) && !isa<InvokeInst>(&*II))
        continue;

      // Skip intrinsic calls. This prevents infinite loops.
      if (isa<CallInst>(&*II)) {
        Function *Fn = cast<CallInst>(&*II)->getCalledFunction();
        if (Fn && Fn->isIntrinsic())
          continue;
      }

      // Find all the live GC roots, and create gcregroot intrinsics for them.
      SmallVector<Instruction *, 8> GeneratedInsts;
      for (GCRootMapType::iterator RI = GCRoots.begin(),
                                   RE = GCRoots.end(); RI != RE; ++RI) {
        Value *Origin = RI->first;
        if (!IsRootLiveAt(Origin, II, GCRoots))
          continue;

        // Ok, we need to root this value. First, cast the value to an i8* to
        // make a type-compatible intrinsic call.
        Instruction *GCRegRootArg = new BitCastInst(Origin, PtrTy);
        GeneratedInsts.push_back(GCRegRootArg);

        // Create the intrinsic call.
        Value *Args[1];
        Args[0] = GCRegRootArg;
        GeneratedInsts.push_back(CallInst::Create(GCRegRootFn, Args));
      }

      // Insert the gcregroot intrinsic calls after the call.
      if (isa<CallInst>(&*II)) {
        for (SmallVector<Instruction *, 8>::reverse_iterator CII =
             GeneratedInsts.rbegin(), CIE = GeneratedInsts.rend();
             CII != CIE; ++CII) {
          (*CII)->insertAfter(&*II);
        }
      } else {  // Invoke instruction
        BasicBlock *NormalDest = cast<InvokeInst>(&*II)->getNormalDest();
        Instruction &InsertPt = NormalDest->front();
        for (SmallVector<Instruction *, 8>::iterator CII =
             GeneratedInsts.begin(), CIE = GeneratedInsts.end();
             CII != CIE; ++CII) {
          (*CII)->insertBefore(&InsertPt);
        }
      }
    }
  }
}
Example #6
0
    void redirect(llvm::StringRef file, bool apnd,
                  MetaProcessor::RedirectionScope scope) {
      if (file.empty()) {
        // Unredirection, remove last redirection state(s) for given scope(s)
        if (m_Stack.empty()) {
          cling::errs() << "No redirections left to remove\n";
          return;
        }

        MetaProcessor::RedirectionScope lScope = scope;
        SmallVector<RedirectStack::iterator, 2> Remove;
        for (auto it = m_Stack.rbegin(), e = m_Stack.rend(); it != e; ++it) {
          Redirect *R = (*it).get();
          const unsigned Match = R->Scope & lScope;
          if (Match) {
#ifdef LLVM_ON_WIN32
            // stdout back from stderr, fix up our console output on destruction
            if (m_TTY && R->FD == m_Bak[1] && scope & kSTDOUT)
              m_TTY = 2;
#endif
            // Clear the flag so restore below will ignore R for scope
            R->Scope = MetaProcessor::RedirectionScope(R->Scope & ~Match);
            // If no scope left, then R should be removed
            if (!R->Scope) {
              // standard [24.4.1/1] says &*(reverse_iterator(i)) == &*(i - 1)
              Remove.push_back(std::next(it).base());
            }
            // Clear match to reduce lScope (kSTDBOTH -> kSTDOUT or kSTDERR)
            lScope = MetaProcessor::RedirectionScope(lScope & ~Match);
            // If nothing to match anymore, then we're done
            if (!lScope)
              break;
          }
        }
        // std::vector::erase invalidates iterators at or after the point of
        // the erase, so if we reverse iterate on Remove everything is fine
        for (auto it = Remove.rbegin(), e = Remove.rend(); it != e; ++it)
          m_Stack.erase(*it);
      } else {
        // Add new redirection state
        if (push(new Redirect(file.str(), apnd, scope, m_Bak)) != kInvalidFD) {
          // Save a backup for the scope(s), if not already done
          if (scope & MetaProcessor::kSTDOUT)
            dupOnce(STDOUT_FILENO, m_Bak[0]);
          if (scope & MetaProcessor::kSTDERR)
            dupOnce(STDERR_FILENO, m_Bak[1]);
        } else
          return; // Failure
      }

      if (scope & MetaProcessor::kSTDOUT)
        m_CurStdOut =
            restore(STDOUT_FILENO, stdout, MetaProcessor::kSTDOUT, m_Bak[0]);
      if (scope & MetaProcessor::kSTDERR)
        restore(STDERR_FILENO, stderr, MetaProcessor::kSTDERR, m_Bak[1]);
    }
Example #7
0
std::string Module::getFullModuleName(bool AllowStringLiterals) const {
  SmallVector<StringRef, 2> Names;
  
  // Build up the set of module names (from innermost to outermost).
  for (const Module *M = this; M; M = M->Parent)
    Names.push_back(M->Name);
  
  std::string Result;

  llvm::raw_string_ostream Out(Result);
  printModuleId(Out, Names.rbegin(), Names.rend(), AllowStringLiterals);
  Out.flush(); 

  return Result;
}
Example #8
0
std::string Module::getFullModuleName() const {
  SmallVector<StringRef, 2> Names;
  
  // Build up the set of module names (from innermost to outermost).
  for (const Module *M = this; M; M = M->Parent)
    Names.push_back(M->Name);
  
  std::string Result;
  for (SmallVectorImpl<StringRef>::reverse_iterator I = Names.rbegin(),
                                                 IEnd = Names.rend();
       I != IEnd; ++I) {
    if (!Result.empty())
      Result += '.';
    
    Result += *I;
  }
  
  return Result;
}
void FunctionLoweringInfo::addSEHHandlersForLPads() {
  MachineModuleInfo &MMI = MF->getMMI();

  // Iterate over all landing pads with llvm.eh.actions calls.
  for (const BasicBlock &BB : *Fn) {
    const LandingPadInst *LP = BB.getLandingPadInst();
    if (!LP)
      continue;
    const IntrinsicInst *ActionsCall =
        dyn_cast<IntrinsicInst>(LP->getNextNode());
    if (!ActionsCall ||
        ActionsCall->getIntrinsicID() != Intrinsic::eh_actions)
      continue;

    // Parse the llvm.eh.actions call we found.
    MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()];
    SmallVector<ActionHandler *, 4> Actions;
    parseEHActions(ActionsCall, Actions);

    // Iterate EH actions from most to least precedence, which means
    // iterating in reverse.
    for (auto I = Actions.rbegin(), E = Actions.rend(); I != E; ++I) {
      ActionHandler *Action = *I;
      if (auto *CH = dyn_cast<CatchHandler>(Action)) {
        const auto *Filter =
            dyn_cast<Function>(CH->getSelector()->stripPointerCasts());
        assert((Filter || CH->getSelector()->isNullValue()) &&
               "expected function or catch-all");
        const auto *RecoverBA =
            cast<BlockAddress>(CH->getHandlerBlockOrFunc());
        MMI.addSEHCatchHandler(LPadMBB, Filter, RecoverBA);
      } else {
        assert(isa<CleanupHandler>(Action));
        const auto *Fini = cast<Function>(Action->getHandlerBlockOrFunc());
        MMI.addSEHCleanupHandler(LPadMBB, Fini);
      }
    }
    DeleteContainerPointers(Actions);
  }
}
SILValue ConstantTracker::getStoredValue(SILInstruction *loadInst,
                                         ProjectionPath &projStack) {
  SILInstruction *store = links[loadInst];
  if (!store && callerTracker)
    store = callerTracker->links[loadInst];
  if (!store) return SILValue();

  assert(isa<LoadInst>(loadInst) || isa<CopyAddrInst>(loadInst));

  // Push the address projections of the load onto the stack.
  SmallVector<Projection, 4> loadProjections;
  scanProjections(loadInst->getOperand(0), &loadProjections);
  for (const Projection &proj : loadProjections) {
    projStack.push_back(proj);
  }

  //  Pop the address projections of the store from the stack.
  SmallVector<Projection, 4> storeProjections;
  scanProjections(store->getOperand(1), &storeProjections);
  for (auto iter = storeProjections.rbegin(); iter != storeProjections.rend();
       ++iter) {
    const Projection &proj = *iter;
    // The corresponding load-projection must match the store-projection.
    if (projStack.empty() || projStack.back() != proj)
      return SILValue();
    projStack.pop_back();
  }

  if (isa<StoreInst>(store))
    return store->getOperand(0);

  // The copy_addr instruction is both a load and a store. So we follow the link
  // again.
  assert(isa<CopyAddrInst>(store));
  return getStoredValue(store, projStack);
}
Example #11
0
/// \brief Recursively emit notes for each macro expansion and caret
/// diagnostics where appropriate.
///
/// Walks up the macro expansion stack printing expansion notes, the code
/// snippet, caret, underlines and FixItHint display as appropriate at each
/// level.
///
/// \param Loc The location for this caret.
/// \param Level The diagnostic level currently being emitted.
/// \param Ranges The underlined ranges for this code snippet.
/// \param Hints The FixIt hints active for this diagnostic.
void DiagnosticRenderer::emitMacroExpansions(SourceLocation Loc,
                                             DiagnosticsEngine::Level Level,
                                             ArrayRef<CharSourceRange> Ranges,
                                             ArrayRef<FixItHint> Hints,
                                             const SourceManager &SM) {
  assert(!Loc.isInvalid() && "must have a valid source location here");

  // Produce a stack of macro backtraces.
  SmallVector<SourceLocation, 8> LocationStack;
  unsigned IgnoredEnd = 0;
  while (Loc.isMacroID()) {
    // If this is the expansion of a macro argument, point the caret at the
    // use of the argument in the definition of the macro, not the expansion.
    if (SM.isMacroArgExpansion(Loc))
      LocationStack.push_back(SM.getImmediateExpansionRange(Loc).first);
    else
      LocationStack.push_back(Loc);

    if (checkRangesForMacroArgExpansion(Loc, Ranges, SM))
      IgnoredEnd = LocationStack.size();

    Loc = SM.getImmediateMacroCallerLoc(Loc);

    // Once the location no longer points into a macro, try stepping through
    // the last found location.  This sometimes produces additional useful
    // backtraces.
    if (Loc.isFileID())
      Loc = SM.getImmediateMacroCallerLoc(LocationStack.back());
    assert(!Loc.isInvalid() && "must have a valid source location here");
  }

  LocationStack.erase(LocationStack.begin(),
                      LocationStack.begin() + IgnoredEnd);

  unsigned MacroDepth = LocationStack.size();
  unsigned MacroLimit = DiagOpts->MacroBacktraceLimit;
  if (MacroDepth <= MacroLimit || MacroLimit == 0) {
    for (auto I = LocationStack.rbegin(), E = LocationStack.rend();
         I != E; ++I)
      emitSingleMacroExpansion(*I, Level, Ranges, SM);
    return;
  }

  unsigned MacroStartMessages = MacroLimit / 2;
  unsigned MacroEndMessages = MacroLimit / 2 + MacroLimit % 2;

  for (auto I = LocationStack.rbegin(),
            E = LocationStack.rbegin() + MacroStartMessages;
       I != E; ++I)
    emitSingleMacroExpansion(*I, Level, Ranges, SM);

  SmallString<200> MessageStorage;
  llvm::raw_svector_ostream Message(MessageStorage);
  Message << "(skipping " << (MacroDepth - MacroLimit)
          << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
             "see all)";
  emitBasicNote(Message.str());

  for (auto I = LocationStack.rend() - MacroEndMessages,
            E = LocationStack.rend();
       I != E; ++I)
    emitSingleMacroExpansion(*I, Level, Ranges, SM);
}
Example #12
0
bool AArch64FrameLowering::spillCalleeSavedRegisters(
    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
    const std::vector<CalleeSavedInfo> &CSI,
    const TargetRegisterInfo *TRI) const {
  MachineFunction &MF = *MBB.getParent();
  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  DebugLoc DL;
  SmallVector<RegPairInfo, 8> RegPairs;

  computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);

  for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE;
       ++RPII) {
    RegPairInfo RPI = *RPII;
    unsigned Reg1 = RPI.Reg1;
    unsigned Reg2 = RPI.Reg2;
    unsigned StrOpc;

    // Issue sequence of non-sp increment and pi sp spills for cs regs. The
    // first spill is a pre-increment that allocates the stack.
    // For example:
    //    stp     x22, x21, [sp, #-48]!   // addImm(-6)
    //    stp     x20, x19, [sp, #16]    // addImm(+2)
    //    stp     fp, lr, [sp, #32]      // addImm(+4)
    // Rationale: This sequence saves uop updates compared to a sequence of
    // pre-increment spills like stp xi,xj,[sp,#-16]!
    // Note: Similar rationale and sequence for restores in epilog.
    bool BumpSP = RPII == RegPairs.rbegin();
    if (RPI.IsGPR) {
      // For first spill use pre-increment store.
      if (BumpSP)
        StrOpc = RPI.isPaired() ? AArch64::STPXpre : AArch64::STRXpre;
      else
        StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
    } else {
      // For first spill use pre-increment store.
      if (BumpSP)
        StrOpc = RPI.isPaired() ? AArch64::STPDpre : AArch64::STRDpre;
      else
        StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
    }
    DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1);
          if (RPI.isPaired())
            dbgs() << ", " << TRI->getName(Reg2);
          dbgs() << ") -> fi#(" << RPI.FrameIdx;
          if (RPI.isPaired())
            dbgs() << ", " << RPI.FrameIdx+1;
          dbgs() << ")\n");

    const int Offset = BumpSP ? -RPI.Offset : RPI.Offset;
    MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
    if (BumpSP)
      MIB.addReg(AArch64::SP, RegState::Define);

    if (RPI.isPaired()) {
      MBB.addLiveIn(Reg1);
      MBB.addLiveIn(Reg2);
      MIB.addReg(Reg2, getPrologueDeath(MF, Reg2))
        .addReg(Reg1, getPrologueDeath(MF, Reg1))
        .addReg(AArch64::SP)
        .addImm(Offset) // [sp, #offset * 8], where factor * 8 is implicit
        .setMIFlag(MachineInstr::FrameSetup);
    } else {
      MBB.addLiveIn(Reg1);
      MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
        .addReg(AArch64::SP)
        .addImm(BumpSP ? Offset * 8 : Offset) // pre-inc version is unscaled
        .setMIFlag(MachineInstr::FrameSetup);
    }
  }
  return true;
}
Example #13
0
/// \brief Collect the set of header includes needed to construct the given 
/// module and update the TopHeaders file set of the module.
///
/// \param Module The module we're collecting includes from.
///
/// \param Includes Will be augmented with the set of \#includes or \#imports
/// needed to load all of the named headers.
static std::error_code
collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
                            ModuleMap &ModMap, clang::Module *Module,
                            SmallVectorImpl<char> &Includes) {
  // Don't collect any headers for unavailable modules.
  if (!Module->isAvailable())
    return std::error_code();

  // Add includes for each of these headers.
  for (Module::Header &H : Module->Headers[Module::HK_Normal]) {
    Module->addTopHeader(H.Entry);
    // Use the path as specified in the module map file. We'll look for this
    // file relative to the module build directory (the directory containing
    // the module map file) so this will find the same file that we found
    // while parsing the module map.
    if (std::error_code Err = addHeaderInclude(H.NameAsWritten, Includes,
                                               LangOpts, Module->IsExternC))
      return Err;
  }
  // Note that Module->PrivateHeaders will not be a TopHeader.

  if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) {
    Module->addTopHeader(UmbrellaHeader.Entry);
    if (Module->Parent) {
      // Include the umbrella header for submodules.
      if (std::error_code Err = addHeaderInclude(UmbrellaHeader.NameAsWritten,
                                                 Includes, LangOpts,
                                                 Module->IsExternC))
        return Err;
    }
  } else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
    // Add all of the headers we find in this subdirectory.
    std::error_code EC;
    SmallString<128> DirNative;
    llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
    for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative, EC), 
                                                     DirEnd;
         Dir != DirEnd && !EC; Dir.increment(EC)) {
      // Check whether this entry has an extension typically associated with 
      // headers.
      if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
          .Cases(".h", ".H", ".hh", ".hpp", true)
          .Default(false))
        continue;

      const FileEntry *Header = FileMgr.getFile(Dir->path());
      // FIXME: This shouldn't happen unless there is a file system race. Is
      // that worth diagnosing?
      if (!Header)
        continue;

      // If this header is marked 'unavailable' in this module, don't include 
      // it.
      if (ModMap.isHeaderUnavailableInModule(Header, Module))
        continue;

      // Compute the relative path from the directory to this file.
      SmallVector<StringRef, 16> Components;
      auto PathIt = llvm::sys::path::rbegin(Dir->path());
      for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
        Components.push_back(*PathIt);
      SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten);
      for (auto It = Components.rbegin(), End = Components.rend(); It != End;
           ++It)
        llvm::sys::path::append(RelativeHeader, *It);

      // Include this header as part of the umbrella directory.
      Module->addTopHeader(Header);
      if (std::error_code Err = addHeaderInclude(RelativeHeader, Includes,
                                                 LangOpts, Module->IsExternC))
        return Err;
    }

    if (EC)
      return EC;
  }

  // Recurse into submodules.
  for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
                                      SubEnd = Module->submodule_end();
       Sub != SubEnd; ++Sub)
    if (std::error_code Err = collectModuleHeaderIncludes(
            LangOpts, FileMgr, ModMap, *Sub, Includes))
      return Err;

  return std::error_code();
}
Example #14
0
void TempScopInfo::buildCondition(BasicBlock *BB, Region &R) {
  BasicBlock *RegionEntry = R.getEntry();
  BBCond Cond;

  DomTreeNode *BBNode = DT->getNode(BB), *EntryNode = DT->getNode(RegionEntry);
  assert(BBNode && EntryNode && "Get null node while building condition!");

  // Walk up the dominance tree until reaching the entry node. Collect all
  // branching blocks on the path to BB except if BB postdominates the block
  // containing the condition.
  SmallVector<BasicBlock *, 4> DominatorBrBlocks;
  while (BBNode != EntryNode) {
    BasicBlock *CurBB = BBNode->getBlock();
    BBNode = BBNode->getIDom();
    assert(BBNode && "BBNode should not reach the root node!");

    if (PDT->dominates(CurBB, BBNode->getBlock()))
      continue;

    BranchInst *Br = dyn_cast<BranchInst>(BBNode->getBlock()->getTerminator());
    assert(Br && "A Valid Scop should only contain branch instruction");

    if (Br->isUnconditional())
      continue;

    DominatorBrBlocks.push_back(BBNode->getBlock());
  }

  RegionInfo *RI = R.getRegionInfo();
  // Iterate in reverse order over the dominating blocks.  Until a non-affine
  // branch was encountered add all conditions collected. If a non-affine branch
  // was encountered, stop as we overapproximate from here on anyway.
  for (auto BIt = DominatorBrBlocks.rbegin(), BEnd = DominatorBrBlocks.rend();
       BIt != BEnd; BIt++) {

    BasicBlock *BBNode = *BIt;
    BranchInst *Br = dyn_cast<BranchInst>(BBNode->getTerminator());
    assert(Br && "A Valid Scop should only contain branch instruction");
    assert(Br->isConditional() && "Assumed a conditional branch");

    if (SD->isNonAffineSubRegion(RI->getRegionFor(BBNode), &R))
      break;

    BasicBlock *TrueBB = Br->getSuccessor(0), *FalseBB = Br->getSuccessor(1);

    // Is BB on the ELSE side of the branch?
    bool inverted = DT->dominates(FalseBB, BB);

    // If both TrueBB and FalseBB dominate BB, one of them must be the target of
    // a back-edge, i.e. a loop header.
    if (inverted && DT->dominates(TrueBB, BB)) {
      assert(
          (DT->dominates(TrueBB, FalseBB) || DT->dominates(FalseBB, TrueBB)) &&
          "One of the successors should be the loop header and dominate the"
          "other!");

      // It is not an invert if the FalseBB is the header.
      if (DT->dominates(FalseBB, TrueBB))
        inverted = false;
    }

    Comparison *Cmp;
    buildAffineCondition(*(Br->getCondition()), inverted, &Cmp);
    Cond.push_back(*Cmp);
  }

  if (!Cond.empty())
    BBConds[BB] = Cond;
}
Example #15
0
/// Given \p BBs as input, find another set of BBs which collectively
/// dominates \p BBs and have the minimal sum of frequencies. Return the BB
/// set found in \p BBs.
static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI,
                                 BasicBlock *Entry,
                                 SmallPtrSet<BasicBlock *, 8> &BBs) {
  assert(!BBs.count(Entry) && "Assume Entry is not in BBs");
  // Nodes on the current path to the root.
  SmallPtrSet<BasicBlock *, 8> Path;
  // Candidates includes any block 'BB' in set 'BBs' that is not strictly
  // dominated by any other blocks in set 'BBs', and all nodes in the path
  // in the dominator tree from Entry to 'BB'.
  SmallPtrSet<BasicBlock *, 16> Candidates;
  for (auto BB : BBs) {
    // Ignore unreachable basic blocks.
    if (!DT.isReachableFromEntry(BB))
      continue;
    Path.clear();
    // Walk up the dominator tree until Entry or another BB in BBs
    // is reached. Insert the nodes on the way to the Path.
    BasicBlock *Node = BB;
    // The "Path" is a candidate path to be added into Candidates set.
    bool isCandidate = false;
    do {
      Path.insert(Node);
      if (Node == Entry || Candidates.count(Node)) {
        isCandidate = true;
        break;
      }
      assert(DT.getNode(Node)->getIDom() &&
             "Entry doens't dominate current Node");
      Node = DT.getNode(Node)->getIDom()->getBlock();
    } while (!BBs.count(Node));

    // If isCandidate is false, Node is another Block in BBs dominating
    // current 'BB'. Drop the nodes on the Path.
    if (!isCandidate)
      continue;

    // Add nodes on the Path into Candidates.
    Candidates.insert(Path.begin(), Path.end());
  }

  // Sort the nodes in Candidates in top-down order and save the nodes
  // in Orders.
  unsigned Idx = 0;
  SmallVector<BasicBlock *, 16> Orders;
  Orders.push_back(Entry);
  while (Idx != Orders.size()) {
    BasicBlock *Node = Orders[Idx++];
    for (auto ChildDomNode : DT.getNode(Node)->getChildren()) {
      if (Candidates.count(ChildDomNode->getBlock()))
        Orders.push_back(ChildDomNode->getBlock());
    }
  }

  // Visit Orders in bottom-up order.
  using InsertPtsCostPair =
      std::pair<SmallPtrSet<BasicBlock *, 16>, BlockFrequency>;

  // InsertPtsMap is a map from a BB to the best insertion points for the
  // subtree of BB (subtree not including the BB itself).
  DenseMap<BasicBlock *, InsertPtsCostPair> InsertPtsMap;
  InsertPtsMap.reserve(Orders.size() + 1);
  for (auto RIt = Orders.rbegin(); RIt != Orders.rend(); RIt++) {
    BasicBlock *Node = *RIt;
    bool NodeInBBs = BBs.count(Node);
    SmallPtrSet<BasicBlock *, 16> &InsertPts = InsertPtsMap[Node].first;
    BlockFrequency &InsertPtsFreq = InsertPtsMap[Node].second;

    // Return the optimal insert points in BBs.
    if (Node == Entry) {
      BBs.clear();
      if (InsertPtsFreq > BFI.getBlockFreq(Node) ||
          (InsertPtsFreq == BFI.getBlockFreq(Node) && InsertPts.size() > 1))
        BBs.insert(Entry);
      else
        BBs.insert(InsertPts.begin(), InsertPts.end());
      break;
    }

    BasicBlock *Parent = DT.getNode(Node)->getIDom()->getBlock();
    // Initially, ParentInsertPts is empty and ParentPtsFreq is 0. Every child
    // will update its parent's ParentInsertPts and ParentPtsFreq.
    SmallPtrSet<BasicBlock *, 16> &ParentInsertPts = InsertPtsMap[Parent].first;
    BlockFrequency &ParentPtsFreq = InsertPtsMap[Parent].second;
    // Choose to insert in Node or in subtree of Node.
    // Don't hoist to EHPad because we may not find a proper place to insert
    // in EHPad.
    // If the total frequency of InsertPts is the same as the frequency of the
    // target Node, and InsertPts contains more than one nodes, choose hoisting
    // to reduce code size.
    if (NodeInBBs ||
        (!Node->isEHPad() &&
         (InsertPtsFreq > BFI.getBlockFreq(Node) ||
          (InsertPtsFreq == BFI.getBlockFreq(Node) && InsertPts.size() > 1)))) {
      ParentInsertPts.insert(Node);
      ParentPtsFreq += BFI.getBlockFreq(Node);
    } else {
      ParentInsertPts.insert(InsertPts.begin(), InsertPts.end());
      ParentPtsFreq += InsertPtsFreq;
    }
  }
}
Example #16
0
/// \brief Late parse a C++ function template in Microsoft mode.
void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
  if(!LMT.D)
     return;

  // Get the FunctionDecl.
  FunctionDecl *FD = 0;
  if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(LMT.D))
    FD = FunTmpl->getTemplatedDecl();
  else
    FD = cast<FunctionDecl>(LMT.D);

  // To restore the context after late parsing.
  Sema::ContextRAII GlobalSavedContext(Actions, Actions.CurContext);

  SmallVector<ParseScope*, 4> TemplateParamScopeStack;
  DeclaratorDecl* Declarator = dyn_cast<DeclaratorDecl>(FD);
  if (Declarator && Declarator->getNumTemplateParameterLists() != 0) {
    TemplateParamScopeStack.push_back(new ParseScope(this, Scope::TemplateParamScope));
    Actions.ActOnReenterDeclaratorTemplateScope(getCurScope(), Declarator);
    Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
  } else {
    // Get the list of DeclContext to reenter.
    SmallVector<DeclContext*, 4> DeclContextToReenter;
    DeclContext *DD = FD->getLexicalParent();
    while (DD && !DD->isTranslationUnit()) {
      DeclContextToReenter.push_back(DD);
      DD = DD->getLexicalParent();
    }

    // Reenter template scopes from outmost to innermost.
    SmallVector<DeclContext*, 4>::reverse_iterator II =
    DeclContextToReenter.rbegin();
    for (; II != DeclContextToReenter.rend(); ++II) {
      if (ClassTemplatePartialSpecializationDecl* MD =
                dyn_cast_or_null<ClassTemplatePartialSpecializationDecl>(*II)) {
        TemplateParamScopeStack.push_back(new ParseScope(this,
                                                   Scope::TemplateParamScope));
        Actions.ActOnReenterTemplateScope(getCurScope(), MD);
      } else if (CXXRecordDecl* MD = dyn_cast_or_null<CXXRecordDecl>(*II)) {
        TemplateParamScopeStack.push_back(new ParseScope(this,
                                                    Scope::TemplateParamScope,
                                       MD->getDescribedClassTemplate() != 0 ));
        Actions.ActOnReenterTemplateScope(getCurScope(),
                                          MD->getDescribedClassTemplate());
      }
      TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
      Actions.PushDeclContext(Actions.getCurScope(), *II);
    }
    TemplateParamScopeStack.push_back(new ParseScope(this,
                                      Scope::TemplateParamScope));
    Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
  }

  assert(!LMT.Toks.empty() && "Empty body!");

  // Append the current token at the end of the new token stream so that it
  // doesn't get lost.
  LMT.Toks.push_back(Tok);
  PP.EnterTokenStream(LMT.Toks.data(), LMT.Toks.size(), true, false);

  // Consume the previously pushed token.
  ConsumeAnyToken();
  assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try))
         && "Inline method not starting with '{', ':' or 'try'");

  // Parse the method body. Function body parsing code is similar enough
  // to be re-used for method bodies as well.
  ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);

  // Recreate the containing function DeclContext.
  Sema::ContextRAII FunctionSavedContext(Actions, Actions.getContainingDC(FD));

  if (FunctionTemplateDecl *FunctionTemplate
        = dyn_cast_or_null<FunctionTemplateDecl>(LMT.D))
    Actions.ActOnStartOfFunctionDef(getCurScope(),
                                   FunctionTemplate->getTemplatedDecl());
  if (FunctionDecl *Function = dyn_cast_or_null<FunctionDecl>(LMT.D))
    Actions.ActOnStartOfFunctionDef(getCurScope(), Function);


  if (Tok.is(tok::kw_try)) {
    ParseFunctionTryBlock(LMT.D, FnScope);
  } else {
    if (Tok.is(tok::colon))
      ParseConstructorInitializer(LMT.D);
    else
      Actions.ActOnDefaultCtorInitializers(LMT.D);

    if (Tok.is(tok::l_brace)) {
      ParseFunctionStatementBody(LMT.D, FnScope);
      Actions.MarkAsLateParsedTemplate(FD, false);
    } else
      Actions.ActOnFinishFunctionBody(LMT.D, 0);
  }

  // Exit scopes.
  FnScope.Exit();
  SmallVector<ParseScope*, 4>::reverse_iterator I =
   TemplateParamScopeStack.rbegin();
  for (; I != TemplateParamScopeStack.rend(); ++I)
    delete *I;

  DeclGroupPtrTy grp = Actions.ConvertDeclToDeclGroup(LMT.D);
  if (grp)
    Actions.getASTConsumer().HandleTopLevelDecl(grp.get());
}
Example #17
0
/// Collect the set of header includes needed to construct the given
/// module and update the TopHeaders file set of the module.
///
/// \param Module The module we're collecting includes from.
///
/// \param Includes Will be augmented with the set of \#includes or \#imports
/// needed to load all of the named headers.
static std::error_code collectModuleHeaderIncludes(
    const LangOptions &LangOpts, FileManager &FileMgr, DiagnosticsEngine &Diag,
    ModuleMap &ModMap, clang::Module *Module, SmallVectorImpl<char> &Includes) {
  // Don't collect any headers for unavailable modules.
  if (!Module->isAvailable())
    return std::error_code();

  // Resolve all lazy header directives to header files.
  ModMap.resolveHeaderDirectives(Module);

  // If any headers are missing, we can't build this module. In most cases,
  // diagnostics for this should have already been produced; we only get here
  // if explicit stat information was provided.
  // FIXME: If the name resolves to a file with different stat information,
  // produce a better diagnostic.
  if (!Module->MissingHeaders.empty()) {
    auto &MissingHeader = Module->MissingHeaders.front();
    Diag.Report(MissingHeader.FileNameLoc, diag::err_module_header_missing)
      << MissingHeader.IsUmbrella << MissingHeader.FileName;
    return std::error_code();
  }

  // Add includes for each of these headers.
  for (auto HK : {Module::HK_Normal, Module::HK_Private}) {
    for (Module::Header &H : Module->Headers[HK]) {
      Module->addTopHeader(H.Entry);
      // Use the path as specified in the module map file. We'll look for this
      // file relative to the module build directory (the directory containing
      // the module map file) so this will find the same file that we found
      // while parsing the module map.
      addHeaderInclude(H.NameAsWritten, Includes, LangOpts, Module->IsExternC);
    }
  }
  // Note that Module->PrivateHeaders will not be a TopHeader.

  if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) {
    Module->addTopHeader(UmbrellaHeader.Entry);
    if (Module->Parent)
      // Include the umbrella header for submodules.
      addHeaderInclude(UmbrellaHeader.NameAsWritten, Includes, LangOpts,
                       Module->IsExternC);
  } else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
    // Add all of the headers we find in this subdirectory.
    std::error_code EC;
    SmallString<128> DirNative;
    llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);

    vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
    for (vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
         Dir != End && !EC; Dir.increment(EC)) {
      // Check whether this entry has an extension typically associated with
      // headers.
      if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->getName()))
          .Cases(".h", ".H", ".hh", ".hpp", true)
          .Default(false))
        continue;

      const FileEntry *Header = FileMgr.getFile(Dir->getName());
      // FIXME: This shouldn't happen unless there is a file system race. Is
      // that worth diagnosing?
      if (!Header)
        continue;

      // If this header is marked 'unavailable' in this module, don't include
      // it.
      if (ModMap.isHeaderUnavailableInModule(Header, Module))
        continue;

      // Compute the relative path from the directory to this file.
      SmallVector<StringRef, 16> Components;
      auto PathIt = llvm::sys::path::rbegin(Dir->getName());
      for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
        Components.push_back(*PathIt);
      SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten);
      for (auto It = Components.rbegin(), End = Components.rend(); It != End;
           ++It)
        llvm::sys::path::append(RelativeHeader, *It);

      // Include this header as part of the umbrella directory.
      Module->addTopHeader(Header);
      addHeaderInclude(RelativeHeader, Includes, LangOpts, Module->IsExternC);
    }

    if (EC)
      return EC;
  }

  // Recurse into submodules.
  for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
                                      SubEnd = Module->submodule_end();
       Sub != SubEnd; ++Sub)
    if (std::error_code Err = collectModuleHeaderIncludes(
            LangOpts, FileMgr, Diag, ModMap, *Sub, Includes))
      return Err;

  return std::error_code();
}