Esempio n. 1
0
bool TokenLexer::MaybeRemoveCommaBeforeVaArgs(
    SmallVectorImpl<Token> &ResultToks, bool HasPasteOperator, MacroInfo *Macro,
    unsigned MacroArgNo, Preprocessor &PP) {
  // Is the macro argument __VA_ARGS__?
  if (!Macro->isVariadic() || MacroArgNo != Macro->getNumArgs()-1)
    return false;

  // In Microsoft-compatibility mode, a comma is removed in the expansion
  // of " ... , __VA_ARGS__ " if __VA_ARGS__ is empty.  This extension is
  // not supported by gcc.
  if (!HasPasteOperator && !PP.getLangOpts().MSVCCompat)
    return false;

  // GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if
  // __VA_ARGS__ is empty, but not in strict C99 mode where there are no
  // named arguments, where it remains.  In all other modes, including C99
  // with GNU extensions, it is removed regardless of named arguments.
  // Microsoft also appears to support this extension, unofficially.
  if (PP.getLangOpts().C99 && !PP.getLangOpts().GNUMode
        && Macro->getNumArgs() < 2)
    return false;

  // Is a comma available to be removed?
  if (ResultToks.empty() || !ResultToks.back().is(tok::comma))
    return false;

  // Issue an extension diagnostic for the paste operator.
  if (HasPasteOperator)
    PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);

  // Remove the comma.
  ResultToks.pop_back();

  if (!ResultToks.empty()) {
    // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
    // then removal of the comma should produce a placemarker token (in C99
    // terms) which we model by popping off the previous ##, giving us a plain
    // "X" when __VA_ARGS__ is empty.
    if (ResultToks.back().is(tok::hashhash))
      ResultToks.pop_back();

    // Remember that this comma was elided.
    ResultToks.back().setFlag(Token::CommaAfterElided);
  }

  // Never add a space, even if the comma, ##, or arg had a space.
  NextTokGetsSpace = false;
  return true;
}
Esempio n. 2
0
static void addCommentToList(SmallVectorImpl<SingleRawComment> &Comments,
                             const SingleRawComment &SRC) {
  // TODO: consider producing warnings when we decide not to merge comments.

  if (SRC.isOrdinary()) {
    // Skip gyb comments that are line number markers.
    if (SRC.RawText.startswith("// ###"))
      return;

    Comments.clear();
    return;
  }

  // If this is the first documentation comment, save it (because there isn't
  // anything to merge it with).
  if (Comments.empty()) {
    Comments.push_back(SRC);
    return;
  }

  auto &Last = Comments.back();

  // Merge comments if they are on same or consecutive lines.
  if (Last.EndLine + 1 < SRC.StartLine) {
    Comments.clear();
    return;
  }

  Comments.push_back(SRC);
}
Esempio n. 3
0
/// Parse function arguments.
///   func-arguments:
///     curried-arguments | selector-arguments
///   curried-arguments:
///     parameter-clause+
///   selector-arguments:
///     '(' selector-element ')' (identifier '(' selector-element ')')+
///   selector-element:
///      identifier '(' pattern-atom (':' type)? ('=' expr)? ')'
///
ParserStatus
Parser::parseFunctionArguments(SmallVectorImpl<Identifier> &NamePieces,
                               SmallVectorImpl<ParameterList*> &BodyParams,
                               ParameterContextKind paramContext,
                               DefaultArgumentInfo &DefaultArgs) {
  // Parse parameter-clauses.
  ParserStatus status;
  bool isFirstParameterClause = true;
  unsigned FirstBodyPatternIndex = BodyParams.size();
  while (Tok.is(tok::l_paren)) {
    SmallVector<ParsedParameter, 4> params;
    SourceLoc leftParenLoc, rightParenLoc;

    // Parse the parameter clause.
    status |= parseParameterClause(leftParenLoc, params, rightParenLoc,
                                   &DefaultArgs, paramContext);

    // Turn the parameter clause into argument and body patterns.
    auto pattern = mapParsedParameters(*this, leftParenLoc, params,
                                       rightParenLoc, 
                                       isFirstParameterClause,
                                       isFirstParameterClause ? &NamePieces
                                                              : nullptr,
                                       paramContext);
    BodyParams.push_back(pattern);
    isFirstParameterClause = false;
    paramContext = ParameterContextKind::Curried;
  }

  // If the decl uses currying syntax, complain that that syntax has gone away.
  if (BodyParams.size() - FirstBodyPatternIndex > 1) {
    SourceRange allPatternsRange(
      BodyParams[FirstBodyPatternIndex]->getStartLoc(),
      BodyParams.back()->getEndLoc());
    auto diag = diagnose(allPatternsRange.Start,
                         diag::parameter_curry_syntax_removed);
    diag.highlight(allPatternsRange);
    bool seenArg = false;
    for (unsigned i = FirstBodyPatternIndex; i < BodyParams.size() - 1; i++) {
      // Replace ")(" with ", ", so "(x: Int)(y: Int)" becomes
      // "(x: Int, y: Int)". But just delete them if they're not actually
      // separating any arguments, e.g. in "()(y: Int)".
      StringRef replacement(", ");
      auto *leftParamList = BodyParams[i];
      auto *rightParamList = BodyParams[i + 1];
      if (leftParamList->size() != 0)
        seenArg = true;
      if (!seenArg || rightParamList->size() == 0)
        replacement = "";
    
      diag.fixItReplace(SourceRange(leftParamList->getEndLoc(),
                                    rightParamList->getStartLoc()),
                        replacement);
    }
  }

  return status;
}
bool RenameIndependentSubregs::findComponents(IntEqClasses &Classes,
    SmallVectorImpl<RenameIndependentSubregs::SubRangeInfo> &SubRangeInfos,
    LiveInterval &LI) const {
  // First step: Create connected components for the VNInfos inside the
  // subranges and count the global number of such components.
  unsigned NumComponents = 0;
  for (LiveInterval::SubRange &SR : LI.subranges()) {
    SubRangeInfos.push_back(SubRangeInfo(*LIS, SR, NumComponents));
    ConnectedVNInfoEqClasses &ConEQ = SubRangeInfos.back().ConEQ;

    unsigned NumSubComponents = ConEQ.Classify(SR);
    NumComponents += NumSubComponents;
  }
  // Shortcut: With only 1 subrange, the normal separate component tests are
  // enough and we do not need to perform the union-find on the subregister
  // segments.
  if (SubRangeInfos.size() < 2)
    return false;

  // Next step: Build union-find structure over all subranges and merge classes
  // across subranges when they are affected by the same MachineOperand.
  const TargetRegisterInfo &TRI = *MRI->getTargetRegisterInfo();
  Classes.grow(NumComponents);
  unsigned Reg = LI.reg;
  for (const MachineOperand &MO : MRI->reg_nodbg_operands(Reg)) {
    if (!MO.isDef() && !MO.readsReg())
      continue;
    unsigned SubRegIdx = MO.getSubReg();
    LaneBitmask LaneMask = TRI.getSubRegIndexLaneMask(SubRegIdx);
    unsigned MergedID = ~0u;
    for (RenameIndependentSubregs::SubRangeInfo &SRInfo : SubRangeInfos) {
      const LiveInterval::SubRange &SR = *SRInfo.SR;
      if ((SR.LaneMask & LaneMask) == 0)
        continue;
      SlotIndex Pos = LIS->getInstructionIndex(*MO.getParent());
      Pos = MO.isDef() ? Pos.getRegSlot(MO.isEarlyClobber())
                       : Pos.getBaseIndex();
      const VNInfo *VNI = SR.getVNInfoAt(Pos);
      if (VNI == nullptr)
        continue;

      // Map to local representant ID.
      unsigned LocalID = SRInfo.ConEQ.getEqClass(VNI);
      // Global ID
      unsigned ID = LocalID + SRInfo.Index;
      // Merge other sets
      MergedID = MergedID == ~0u ? ID : Classes.join(MergedID, ID);
    }
  }

  // Early exit if we ended up with a single equivalence class.
  Classes.compress();
  unsigned NumClasses = Classes.getNumClasses();
  return NumClasses > 1;
}
Esempio n. 5
0
/// This function takes a raw source line and produces a mapping from columns
///  to the byte of the source line that produced the character displaying at
///  that column. This is the inverse of the mapping produced by byteToColumn()
///
/// The last element in the array is the number of bytes in the source string
///
/// example: (given a tabstop of 8)
///
///    "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
///
///  (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
///   display)
static void columnToByte(StringRef SourceLine, unsigned TabStop,
                         SmallVectorImpl<int> &out) {
  out.clear();

  if (SourceLine.empty()) {
    out.resize(1u, 0);
    return;
  }

  int columns = 0;
  size_t i = 0;
  while (i<SourceLine.size()) {
    out.resize(columns+1, -1);
    out.back() = i;
    std::pair<SmallString<16>,bool> res
      = printableTextForNextCharacter(SourceLine, &i, TabStop);
    columns += llvm::sys::locale::columnWidth(res.first);
  }
  out.resize(columns+1, -1);
  out.back() = i;
}
Esempio n. 6
0
static void ParseFunctionArgs(const SmallVectorImpl<ArgT> &Args,
                              SmallVectorImpl<unsigned> &Out) {
  unsigned CurrentArgIndex = ~0U;
  for (unsigned i = 0, e = Args.size(); i != e; i++) {
    if (CurrentArgIndex == Args[i].OrigArgIndex) {
      Out.back()++;
    } else {
      Out.push_back(1);
      CurrentArgIndex++;
    }
  }
}
Esempio n. 7
0
void TokenLexer::stringifyVAOPTContents(
    SmallVectorImpl<Token> &ResultToks, const VAOptExpansionContext &VCtx,
    const SourceLocation VAOPTClosingParenLoc) {
  const int NumToksPriorToVAOpt = VCtx.getNumberOfTokensPriorToVAOpt();
  const unsigned int NumVAOptTokens = ResultToks.size() - NumToksPriorToVAOpt;
  Token *const VAOPTTokens =
      NumVAOptTokens ? &ResultToks[NumToksPriorToVAOpt] : nullptr;

  SmallVector<Token, 64> ConcatenatedVAOPTResultToks;
  // FIXME: Should we keep track within VCtx that we did or didnot
  // encounter pasting - and only then perform this loop.

  // Perform token pasting (concatenation) prior to stringization.
  for (unsigned int CurTokenIdx = 0; CurTokenIdx != NumVAOptTokens;
       ++CurTokenIdx) {
    if (VAOPTTokens[CurTokenIdx].is(tok::hashhash)) {
      assert(CurTokenIdx != 0 &&
             "Can not have __VAOPT__ contents begin with a ##");
      Token &LHS = VAOPTTokens[CurTokenIdx - 1];
      pasteTokens(LHS, llvm::makeArrayRef(VAOPTTokens, NumVAOptTokens),
                  CurTokenIdx);
      // Replace the token prior to the first ## in this iteration.
      ConcatenatedVAOPTResultToks.back() = LHS;
      if (CurTokenIdx == NumVAOptTokens)
        break;
    }
    ConcatenatedVAOPTResultToks.push_back(VAOPTTokens[CurTokenIdx]);
  }

  ConcatenatedVAOPTResultToks.push_back(VCtx.getEOFTok());
  // Get the SourceLocation that represents the start location within
  // the macro definition that marks where this string is substituted
  // into: i.e. the __VA_OPT__ and the ')' within the spelling of the
  // macro definition, and use it to indicate that the stringified token
  // was generated from that location.
  const SourceLocation ExpansionLocStartWithinMacro =
      getExpansionLocForMacroDefLoc(VCtx.getVAOptLoc());
  const SourceLocation ExpansionLocEndWithinMacro =
      getExpansionLocForMacroDefLoc(VAOPTClosingParenLoc);

  Token StringifiedVAOPT = MacroArgs::StringifyArgument(
      &ConcatenatedVAOPTResultToks[0], PP, VCtx.hasCharifyBefore() /*Charify*/,
      ExpansionLocStartWithinMacro, ExpansionLocEndWithinMacro);

  if (VCtx.getLeadingSpaceForStringifiedToken())
    StringifiedVAOPT.setFlag(Token::LeadingSpace);

  StringifiedVAOPT.setFlag(Token::StringifiedInMacro);
  // Resize (shrink) the token stream to just capture this stringified token.
  ResultToks.resize(NumToksPriorToVAOpt + 1);
  ResultToks.back() = StringifiedVAOPT;
}
Esempio n. 8
0
void AArch64CallLowering::splitToValueTypes(
    const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
    const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv,
    const SplitArgTy &PerformArgSplit) const {
  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
  LLVMContext &Ctx = OrigArg.Ty->getContext();

  if (OrigArg.Ty->isVoidTy())
    return;

  SmallVector<EVT, 4> SplitVTs;
  SmallVector<uint64_t, 4> Offsets;
  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);

  if (SplitVTs.size() == 1) {
    // No splitting to do, but we want to replace the original type (e.g. [1 x
    // double] -> double).
    SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx),
                           OrigArg.Flags, OrigArg.IsFixed);
    return;
  }

  unsigned FirstRegIdx = SplitArgs.size();
  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
      OrigArg.Ty, CallConv, false);
  for (auto SplitVT : SplitVTs) {
    Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
    SplitArgs.push_back(
        ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
                SplitTy, OrigArg.Flags, OrigArg.IsFixed});
    if (NeedsRegBlock)
      SplitArgs.back().Flags.setInConsecutiveRegs();
  }

  SplitArgs.back().Flags.setInConsecutiveRegsLast();

  for (unsigned i = 0; i < Offsets.size(); ++i)
    PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8);
}
Esempio n. 9
0
void LiveVariables::UpdatePhysRegDefs(MachineInstr *MI,
                                      SmallVectorImpl<unsigned> &Defs) {
  while (!Defs.empty()) {
    unsigned Reg = Defs.back();
    Defs.pop_back();
    for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
         SubRegs.isValid(); ++SubRegs) {
      unsigned SubReg = *SubRegs;
      PhysRegDef[SubReg]  = MI;
      PhysRegUse[SubReg]  = nullptr;
    }
  }
}
// \brief Update the register that describes location of @Var in @RegVars map.
static void
updateRegForVariable(RegDescribedVarsMap &RegVars, const MDNode *Var,
                     const SmallVectorImpl<const MachineInstr *> &VarHistory,
                     const MachineInstr &MI) {
  if (!VarHistory.empty()) {
     const MachineInstr &Prev = *VarHistory.back();
     // Check if Var is currently described by a register by instruction in the
     // same basic block.
     if (Prev.isDebugValue() && Prev.getDebugVariable() == Var &&
         Prev.getParent() == MI.getParent()) {
       if (unsigned PrevReg = isDescribedByReg(Prev))
         dropRegDescribedVar(RegVars, PrevReg, Var);
     }
  }

  assert(MI.getDebugVariable() == Var);
  if (unsigned MIReg = isDescribedByReg(MI))
    addRegDescribedVar(RegVars, MIReg, Var);
}
Esempio n. 11
0
/// \brief Find ambiguity among base classes.
static void
detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) {
  llvm::SmallPtrSet<const CXXRecordDecl *, 8> VirtualBases;
  llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases;
  llvm::SmallPtrSet<const CXXRecordDecl *, 8> AmbiguousBases;
  for (MSRTTIClass *Class = &Classes.front(); Class <= &Classes.back();) {
    if ((Class->Flags & MSRTTIClass::IsVirtual) &&
        !VirtualBases.insert(Class->RD)) {
      Class = MSRTTIClass::getNextChild(Class);
      continue;
    }
    if (!UniqueBases.insert(Class->RD))
      AmbiguousBases.insert(Class->RD);
    Class++;
  }
  if (AmbiguousBases.empty())
    return;
  for (MSRTTIClass &Class : Classes)
    if (AmbiguousBases.count(Class.RD))
      Class.Flags |= MSRTTIClass::IsAmbiguous;
}
Esempio n. 12
0
/// ComputeCallSiteTable - Compute the call-site table.  The entry for an invoke
/// has a try-range containing the call, a non-zero landing pad, and an
/// appropriate action.  The entry for an ordinary call has a try-range
/// containing the call and zero for the landing pad and the action.  Calls
/// marked 'nounwind' have no entry and must not be contained in the try-range
/// of any entry - they form gaps in the table.  Entries must be ordered by
/// try-range address.
void DwarfException::
ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
                     const RangeMapType &PadMap,
                     const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
                     const SmallVectorImpl<unsigned> &FirstActions) {
  // The end label of the previous invoke or nounwind try-range.
  MCSymbol *LastLabel = 0;

  // Whether there is a potentially throwing instruction (currently this means
  // an ordinary call) between the end of the previous try-range and now.
  bool SawPotentiallyThrowing = false;

  // Whether the last CallSite entry was for an invoke.
  bool PreviousIsInvoke = false;

  // Visit all instructions in order of address.
  for (MachineFunction::const_iterator I = Asm->MF->begin(), E = Asm->MF->end();
       I != E; ++I) {
    for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
         MI != E; ++MI) {
      if (!MI->isLabel()) {
        if (MI->isCall())
          SawPotentiallyThrowing |= !CallToNoUnwindFunction(MI);
        continue;
      }

      // End of the previous try-range?
      MCSymbol *BeginLabel = MI->getOperand(0).getMCSymbol();
      if (BeginLabel == LastLabel)
        SawPotentiallyThrowing = false;

      // Beginning of a new try-range?
      RangeMapType::const_iterator L = PadMap.find(BeginLabel);
      if (L == PadMap.end())
        // Nope, it was just some random label.
        continue;

      const PadRange &P = L->second;
      const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
      assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
             "Inconsistent landing pad map!");

      // For Dwarf exception handling (SjLj handling doesn't use this). If some
      // instruction between the previous try-range and this one may throw,
      // create a call-site entry with no landing pad for the region between the
      // try-ranges.
      if (SawPotentiallyThrowing && Asm->MAI->isExceptionHandlingDwarf()) {
        CallSiteEntry Site = { LastLabel, BeginLabel, 0, 0 };
        CallSites.push_back(Site);
        PreviousIsInvoke = false;
      }

      LastLabel = LandingPad->EndLabels[P.RangeIndex];
      assert(BeginLabel && LastLabel && "Invalid landing pad!");

      if (!LandingPad->LandingPadLabel) {
        // Create a gap.
        PreviousIsInvoke = false;
      } else {
        // This try-range is for an invoke.
        CallSiteEntry Site = {
          BeginLabel,
          LastLabel,
          LandingPad->LandingPadLabel,
          FirstActions[P.PadIndex]
        };

        // Try to merge with the previous call-site. SJLJ doesn't do this
        if (PreviousIsInvoke && Asm->MAI->isExceptionHandlingDwarf()) {
          CallSiteEntry &Prev = CallSites.back();
          if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
            // Extend the range of the previous entry.
            Prev.EndLabel = Site.EndLabel;
            continue;
          }
        }

        // Otherwise, create a new call-site.
        if (Asm->MAI->isExceptionHandlingDwarf())
          CallSites.push_back(Site);
        else {
          // SjLj EH must maintain the call sites in the order assigned
          // to them by the SjLjPrepare pass.
          unsigned SiteNo = MMI->getCallSiteBeginLabel(BeginLabel);
          if (CallSites.size() < SiteNo)
            CallSites.resize(SiteNo);
          CallSites[SiteNo - 1] = Site;
        }
        PreviousIsInvoke = true;
      }
    }
  }

  // If some instruction between the previous try-range and the end of the
  // function may throw, create a call-site entry with no landing pad for the
  // region following the try-range.
  if (SawPotentiallyThrowing && Asm->MAI->isExceptionHandlingDwarf()) {
    CallSiteEntry Site = { LastLabel, 0, 0, 0 };
    CallSites.push_back(Site);
  }
}
Esempio n. 13
0
static bool ParseFile(const std::string &Filename,
                      const std::vector<std::string> &IncludeDirs,
                      SmallVectorImpl<std::string> &OutputFiles) {
  ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
      MemoryBuffer::getFileOrSTDIN(Filename);
  if (std::error_code EC = MBOrErr.getError()) {
    llvm::errs() << "Could not open input file '" << Filename << "': " 
                 << EC.message() <<"\n";
    return true;
  }
  std::unique_ptr<llvm::MemoryBuffer> MB = std::move(MBOrErr.get());

  // Record the location of the include directory so that the lexer can find it
  // later.
  SourceMgr SrcMgr;
  SrcMgr.setIncludeDirs(IncludeDirs);

  // Tell SrcMgr about this buffer, which is what Parser will pick up.
  SrcMgr.AddNewSourceBuffer(std::move(MB), llvm::SMLoc());

  LangOptions Opts;
  Opts.DefaultReal8 = DefaultReal8;
  Opts.DefaultDouble8 = DefaultDouble8;
  Opts.DefaultInt8 = DefaultInt8;
  Opts.ReturnComments = ReturnComments;
  if(!FreeForm && !FixedForm) {
    llvm::StringRef Ext = llvm::sys::path::extension(Filename);
    if(Ext.equals_lower(".f")) {
      Opts.FixedForm = 1;
      Opts.FreeForm = 0;
    }
  } else if(FixedForm) {
    Opts.FixedForm = 1;
    Opts.FreeForm = 0;
  }

  TextDiagnosticPrinter TDP(SrcMgr);
  DiagnosticsEngine Diag(new DiagnosticIDs,&SrcMgr, &TDP, false);
  // Chain in -verify checker, if requested.
  if(RunVerifier)
    Diag.setClient(new VerifyDiagnosticConsumer(Diag));

  ASTContext Context(SrcMgr, Opts);
  Sema SA(Context, Diag);
  Parser P(SrcMgr, Opts, Diag, SA);
  Diag.getClient()->BeginSourceFile(Opts, &P.getLexer());
  P.ParseProgramUnits();
  Diag.getClient()->EndSourceFile();

  // Dump
  if(PrintAST || DumpAST) {
    auto Dumper = CreateASTDumper("");
    Dumper->HandleTranslationUnit(Context);
    delete Dumper;
  }

  // Emit
  if(!SyntaxOnly && !Diag.hadErrors()) {    
    flang::TargetOptions TargetOptions;
    TargetOptions.Triple = TargetTriple.empty()? llvm::sys::getDefaultTargetTriple() :
                                                 TargetTriple;
    TargetOptions.CPU = llvm::sys::getHostCPUName();

    auto CG = CreateLLVMCodeGen(Diag, Filename == ""? std::string("module") : Filename,
                                CodeGenOptions(), TargetOptions, llvm::getGlobalContext());
    CG->Initialize(Context);
    CG->HandleTranslationUnit(Context);

    BackendAction BA = Backend_EmitObj;
    if(EmitASM)   BA = Backend_EmitAssembly;
    if(EmitLLVM)  BA = Backend_EmitLL;

    const llvm::Target *TheTarget = 0;
    std::string Err;
    TheTarget = llvm::TargetRegistry::lookupTarget(TargetOptions.Triple, Err);

    CodeGenOpt::Level TMOptLevel = CodeGenOpt::Default;
    switch(OptLevel) {
    case 0:  TMOptLevel = CodeGenOpt::None; break;
    case 3:  TMOptLevel = CodeGenOpt::Aggressive; break;
    }

    llvm::TargetOptions Options;

    auto TM = TheTarget->createTargetMachine(TargetOptions.Triple, TargetOptions.CPU, "", Options,
                                             Reloc::Default, CodeModel::Default,
                                             TMOptLevel);

    if(!(EmitLLVM && OptLevel == 0)) {
      auto TheModule = CG->GetModule();
      auto PM = new llvm::legacy::PassManager();
      //llvm::legacy::FunctionPassManager *FPM = new llvm::legacy::FunctionPassManager(TheModule);
      //FPM->add(new DataLayoutPass());
      //PM->add(new llvm::DataLayoutPass());
      //TM->addAnalysisPasses(*PM);
      PM->add(createPromoteMemoryToRegisterPass());

      PassManagerBuilder PMBuilder;
      PMBuilder.OptLevel = OptLevel;
      PMBuilder.SizeLevel = 0;
      PMBuilder.LoopVectorize = true;
      PMBuilder.SLPVectorize = true;
      unsigned Threshold = 225;
      if (OptLevel > 2)
        Threshold = 275;
      PMBuilder.Inliner = createFunctionInliningPass(Threshold);


      PMBuilder.populateModulePassManager(*PM);
      //llvm::legacy::PassManager *MPM = new llvm::legacy::PassManager();
      //PMBuilder.populateModulePassManager(*MPM);

      PM->run(*TheModule);
      //MPM->run(*TheModule);
      delete PM;
      //delete MPM;
    }

    if(Interpret) {
      //const char *Env[] = { "", nullptr };
      //Execute(CG->ReleaseModule(), Env);
    } else if(OutputFile == "-"){
      // FIXME: outputting to stdout is broken 
      //EmitFile(llvm::outs(), CG->GetModule(), TM, BA);
      OutputFiles.push_back(GetOutputName("stdout",BA));
      EmitOutputFile(OutputFiles.back(), CG->GetModule(), TM, BA);
    }else {
      OutputFiles.push_back(GetOutputName(Filename, BA));
      EmitOutputFile(OutputFiles.back(), CG->GetModule(), TM, BA);
    }
    delete CG;
  }

  return Diag.hadErrors();
}
Esempio n. 14
0
void X86CmovConverterPass::convertCmovInstsToBranches(
    SmallVectorImpl<MachineInstr *> &Group) const {
  assert(!Group.empty() && "No CMOV instructions to convert");
  ++NumOfOptimizedCmovGroups;

  // If the CMOV group is not packed, e.g., there are debug instructions between
  // first CMOV and last CMOV, then pack the group and make the CMOV instruction
  // consecutive by moving the debug instructions to after the last CMOV. 
  packCmovGroup(Group.front(), Group.back());

  // To convert a CMOVcc instruction, we actually have to insert the diamond
  // control-flow pattern.  The incoming instruction knows the destination vreg
  // to set, the condition code register to branch on, the true/false values to
  // select between, and a branch opcode to use.

  // Before
  // -----
  // MBB:
  //   cond = cmp ...
  //   v1 = CMOVge t1, f1, cond
  //   v2 = CMOVlt t2, f2, cond
  //   v3 = CMOVge v1, f3, cond
  //
  // After
  // -----
  // MBB:
  //   cond = cmp ...
  //   jge %SinkMBB
  //
  // FalseMBB:
  //   jmp %SinkMBB
  //
  // SinkMBB:
  //   %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
  //   %v2 = phi[%t2, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
  //                                          ; true-value with false-value
  //   %v3 = phi[%f3, %FalseMBB], [%t1, %MBB] ; Phi instruction cannot use
  //                                          ; previous Phi instruction result

  MachineInstr &MI = *Group.front();
  MachineInstr *LastCMOV = Group.back();
  DebugLoc DL = MI.getDebugLoc();

  X86::CondCode CC = X86::CondCode(X86::getCondFromCMovOpc(MI.getOpcode()));
  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
  // Potentially swap the condition codes so that any memory operand to a CMOV
  // is in the *false* position instead of the *true* position. We can invert
  // any non-memory operand CMOV instructions to cope with this and we ensure
  // memory operand CMOVs are only included with a single condition code.
  if (llvm::any_of(Group, [&](MachineInstr *I) {
        return I->mayLoad() && X86::getCondFromCMovOpc(I->getOpcode()) == CC;
      }))
    std::swap(CC, OppCC);

  MachineBasicBlock *MBB = MI.getParent();
  MachineFunction::iterator It = ++MBB->getIterator();
  MachineFunction *F = MBB->getParent();
  const BasicBlock *BB = MBB->getBasicBlock();

  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
  F->insert(It, FalseMBB);
  F->insert(It, SinkMBB);

  // If the EFLAGS register isn't dead in the terminator, then claim that it's
  // live into the sink and copy blocks.
  if (checkEFLAGSLive(LastCMOV)) {
    FalseMBB->addLiveIn(X86::EFLAGS);
    SinkMBB->addLiveIn(X86::EFLAGS);
  }

  // Transfer the remainder of BB and its successor edges to SinkMBB.
  SinkMBB->splice(SinkMBB->begin(), MBB,
                  std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
  SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);

  // Add the false and sink blocks as its successors.
  MBB->addSuccessor(FalseMBB);
  MBB->addSuccessor(SinkMBB);

  // Create the conditional branch instruction.
  BuildMI(MBB, DL, TII->get(X86::GetCondBranchFromCond(CC))).addMBB(SinkMBB);

  // Add the sink block to the false block successors.
  FalseMBB->addSuccessor(SinkMBB);

  MachineInstrBuilder MIB;
  MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
  MachineBasicBlock::iterator MIItEnd =
      std::next(MachineBasicBlock::iterator(LastCMOV));
  MachineBasicBlock::iterator FalseInsertionPoint = FalseMBB->begin();
  MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();

  // First we need to insert an explicit load on the false path for any memory
  // operand. We also need to potentially do register rewriting here, but it is
  // simpler as the memory operands are always on the false path so we can
  // simply take that input, whatever it is.
  DenseMap<unsigned, unsigned> FalseBBRegRewriteTable;
  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) {
    auto &MI = *MIIt++;
    // Skip any CMOVs in this group which don't load from memory.
    if (!MI.mayLoad()) {
      // Remember the false-side register input.
      unsigned FalseReg =
          MI.getOperand(X86::getCondFromCMovOpc(MI.getOpcode()) == CC ? 1 : 2)
              .getReg();
      // Walk back through any intermediate cmovs referenced.
      while (true) {
        auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
        if (FRIt == FalseBBRegRewriteTable.end())
          break;
        FalseReg = FRIt->second;
      }
      FalseBBRegRewriteTable[MI.getOperand(0).getReg()] = FalseReg;
      continue;
    }

    // The condition must be the *opposite* of the one we've decided to branch
    // on as the branch will go *around* the load and the load should happen
    // when the CMOV condition is false.
    assert(X86::getCondFromCMovOpc(MI.getOpcode()) == OppCC &&
           "Can only handle memory-operand cmov instructions with a condition "
           "opposite to the selected branch direction.");

    // The goal is to rewrite the cmov from:
    //
    //   MBB:
    //     %A = CMOVcc %B (tied), (mem)
    //
    // to
    //
    //   MBB:
    //     %A = CMOVcc %B (tied), %C
    //   FalseMBB:
    //     %C = MOV (mem)
    //
    // Which will allow the next loop to rewrite the CMOV in terms of a PHI:
    //
    //   MBB:
    //     JMP!cc SinkMBB
    //   FalseMBB:
    //     %C = MOV (mem)
    //   SinkMBB:
    //     %A = PHI [ %C, FalseMBB ], [ %B, MBB]

    // Get a fresh register to use as the destination of the MOV.
    const TargetRegisterClass *RC = MRI->getRegClass(MI.getOperand(0).getReg());
    unsigned TmpReg = MRI->createVirtualRegister(RC);

    SmallVector<MachineInstr *, 4> NewMIs;
    bool Unfolded = TII->unfoldMemoryOperand(*MBB->getParent(), MI, TmpReg,
                                             /*UnfoldLoad*/ true,
                                             /*UnfoldStore*/ false, NewMIs);
    (void)Unfolded;
    assert(Unfolded && "Should never fail to unfold a loading cmov!");

    // Move the new CMOV to just before the old one and reset any impacted
    // iterator.
    auto *NewCMOV = NewMIs.pop_back_val();
    assert(X86::getCondFromCMovOpc(NewCMOV->getOpcode()) == OppCC &&
           "Last new instruction isn't the expected CMOV!");
    LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
    MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
    if (&*MIItBegin == &MI)
      MIItBegin = MachineBasicBlock::iterator(NewCMOV);

    // Sink whatever instructions were needed to produce the unfolded operand
    // into the false block.
    for (auto *NewMI : NewMIs) {
      LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
      FalseMBB->insert(FalseInsertionPoint, NewMI);
      // Re-map any operands that are from other cmovs to the inputs for this block.
      for (auto &MOp : NewMI->uses()) {
        if (!MOp.isReg())
          continue;
        auto It = FalseBBRegRewriteTable.find(MOp.getReg());
        if (It == FalseBBRegRewriteTable.end())
          continue;

        MOp.setReg(It->second);
        // This might have been a kill when it referenced the cmov result, but
        // it won't necessarily be once rewritten.
        // FIXME: We could potentially improve this by tracking whether the
        // operand to the cmov was also a kill, and then skipping the PHI node
        // construction below.
        MOp.setIsKill(false);
      }
    }
    MBB->erase(MachineBasicBlock::iterator(MI),
               std::next(MachineBasicBlock::iterator(MI)));

    // Add this PHI to the rewrite table.
    FalseBBRegRewriteTable[NewCMOV->getOperand(0).getReg()] = TmpReg;
  }

  // As we are creating the PHIs, we have to be careful if there is more than
  // one.  Later CMOVs may reference the results of earlier CMOVs, but later
  // PHIs have to reference the individual true/false inputs from earlier PHIs.
  // That also means that PHI construction must work forward from earlier to
  // later, and that the code must maintain a mapping from earlier PHI's
  // destination registers, and the registers that went into the PHI.
  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;

  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
    unsigned DestReg = MIIt->getOperand(0).getReg();
    unsigned Op1Reg = MIIt->getOperand(1).getReg();
    unsigned Op2Reg = MIIt->getOperand(2).getReg();

    // If this CMOV we are processing is the opposite condition from the jump we
    // generated, then we have to swap the operands for the PHI that is going to
    // be generated.
    if (X86::getCondFromCMovOpc(MIIt->getOpcode()) == OppCC)
      std::swap(Op1Reg, Op2Reg);

    auto Op1Itr = RegRewriteTable.find(Op1Reg);
    if (Op1Itr != RegRewriteTable.end())
      Op1Reg = Op1Itr->second.first;

    auto Op2Itr = RegRewriteTable.find(Op2Reg);
    if (Op2Itr != RegRewriteTable.end())
      Op2Reg = Op2Itr->second.second;

    //  SinkMBB:
    //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, MBB ]
    //  ...
    MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
              .addReg(Op1Reg)
              .addMBB(FalseMBB)
              .addReg(Op2Reg)
              .addMBB(MBB);
    (void)MIB;
    LLVM_DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
    LLVM_DEBUG(dbgs() << "\tTo: "; MIB->dump());

    // Add this PHI to the rewrite table.
    RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
  }

  // Now remove the CMOV(s).
  MBB->erase(MIItBegin, MIItEnd);
}
Esempio n. 15
0
/// Parse a function definition signature.
///   func-signature:
///     func-arguments func-throws? func-signature-result?
///   func-signature-result:
///     '->' type
///
/// Note that this leaves retType as null if unspecified.
ParserStatus
Parser::parseFunctionSignature(Identifier SimpleName,
                               DeclName &FullName,
                               SmallVectorImpl<ParameterList*> &bodyParams,
                               DefaultArgumentInfo &defaultArgs,
                               SourceLoc &throwsLoc,
                               bool &rethrows,
                               TypeRepr *&retType) {
  SmallVector<Identifier, 4> NamePieces;
  NamePieces.push_back(SimpleName);
  FullName = SimpleName;
  
  ParserStatus Status;
  // We force first type of a func declaration to be a tuple for consistency.
  if (Tok.is(tok::l_paren)) {
    ParameterContextKind paramContext;
    if (SimpleName.isOperator())
      paramContext = ParameterContextKind::Operator;
    else
      paramContext = ParameterContextKind::Function;

    Status = parseFunctionArguments(NamePieces, bodyParams, paramContext,
                                    defaultArgs);
    FullName = DeclName(Context, SimpleName, 
                        llvm::makeArrayRef(NamePieces.begin() + 1,
                                           NamePieces.end()));

    if (bodyParams.empty()) {
      // If we didn't get anything, add a () pattern to avoid breaking
      // invariants.
      assert(Status.hasCodeCompletion() || Status.isError());
      bodyParams.push_back(ParameterList::createEmpty(Context));
    }
  } else {
    diagnose(Tok, diag::func_decl_without_paren);
    Status = makeParserError();

    // Recover by creating a '() -> ?' signature.
    bodyParams.push_back(ParameterList::createEmpty(Context, PreviousLoc,
                                                    PreviousLoc));
    FullName = DeclName(Context, SimpleName, bodyParams.back());
  }
  
  // Check for the 'throws' keyword.
  rethrows = false;
  if (Tok.is(tok::kw_throws)) {
    throwsLoc = consumeToken();
  } else if (Tok.is(tok::kw_rethrows)) {
    throwsLoc = consumeToken();
    rethrows = true;
  } else if (Tok.is(tok::kw_throw)) {
    throwsLoc = consumeToken();
    diagnose(throwsLoc, diag::throw_in_function_type)
      .fixItReplace(throwsLoc, "throws");
  }

  SourceLoc arrowLoc;
  // If there's a trailing arrow, parse the rest as the result type.
  if (Tok.isAny(tok::arrow, tok::colon)) {
    if (!consumeIf(tok::arrow, arrowLoc)) {
      // FixIt ':' to '->'.
      diagnose(Tok, diag::func_decl_expected_arrow)
          .fixItReplace(SourceRange(Tok.getLoc()), "->");
      arrowLoc = consumeToken(tok::colon);
    }

    ParserResult<TypeRepr> ResultType =
      parseType(diag::expected_type_function_result);
    if (ResultType.hasCodeCompletion())
      return ResultType;
    retType = ResultType.getPtrOrNull();
    if (!retType) {
      Status.setIsParseError();
      return Status;
    }
  } else {
    // Otherwise, we leave retType null.
    retType = nullptr;
  }

  // Check for 'throws' and 'rethrows' after the type and correct it.
  if (!throwsLoc.isValid()) {
    if (Tok.is(tok::kw_throws)) {
      throwsLoc = consumeToken();
    } else if (Tok.is(tok::kw_rethrows)) {
      throwsLoc = consumeToken();
      rethrows = true;
    }

    if (throwsLoc.isValid()) {
      assert(arrowLoc.isValid());
      assert(retType);
      auto diag = rethrows ? diag::rethrows_after_function_result
                           : diag::throws_after_function_result;
      SourceLoc typeEndLoc = Lexer::getLocForEndOfToken(SourceMgr,
                                                        retType->getEndLoc());
      SourceLoc throwsEndLoc = Lexer::getLocForEndOfToken(SourceMgr, throwsLoc);
      diagnose(Tok, diag)
        .fixItInsert(arrowLoc, rethrows ? "rethrows " : "throws ")
        .fixItRemoveChars(typeEndLoc, throwsEndLoc);
    }
  }

  return Status;
}
Esempio n. 16
0
bool X86ATTAsmParser::
ParseInstruction(StringRef Name, SMLoc NameLoc,
                 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
  StringRef PatchedName = Name;

  // FIXME: Hack to recognize setneb as setne.
  if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
      PatchedName != "setb" && PatchedName != "setnb")
    PatchedName = PatchedName.substr(0, Name.size()-1);
  
  // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
  const MCExpr *ExtraImmOp = 0;
  if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
      (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
       PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
    bool IsVCMP = PatchedName.startswith("vcmp");
    unsigned SSECCIdx = IsVCMP ? 4 : 3;
    unsigned SSEComparisonCode = StringSwitch<unsigned>(
      PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
      .Case("eq",          0)
      .Case("lt",          1)
      .Case("le",          2)
      .Case("unord",       3)
      .Case("neq",         4)
      .Case("nlt",         5)
      .Case("nle",         6)
      .Case("ord",         7)
      .Case("eq_uq",       8)
      .Case("nge",         9)
      .Case("ngt",      0x0A)
      .Case("false",    0x0B)
      .Case("neq_oq",   0x0C)
      .Case("ge",       0x0D)
      .Case("gt",       0x0E)
      .Case("true",     0x0F)
      .Case("eq_os",    0x10)
      .Case("lt_oq",    0x11)
      .Case("le_oq",    0x12)
      .Case("unord_s",  0x13)
      .Case("neq_us",   0x14)
      .Case("nlt_uq",   0x15)
      .Case("nle_uq",   0x16)
      .Case("ord_s",    0x17)
      .Case("eq_us",    0x18)
      .Case("nge_uq",   0x19)
      .Case("ngt_uq",   0x1A)
      .Case("false_os", 0x1B)
      .Case("neq_os",   0x1C)
      .Case("ge_oq",    0x1D)
      .Case("gt_oq",    0x1E)
      .Case("true_us",  0x1F)
      .Default(~0U);
    if (SSEComparisonCode != ~0U) {
      ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
                                          getParser().getContext());
      if (PatchedName.endswith("ss")) {
        PatchedName = IsVCMP ? "vcmpss" : "cmpss";
      } else if (PatchedName.endswith("sd")) {
        PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
      } else if (PatchedName.endswith("ps")) {
        PatchedName = IsVCMP ? "vcmpps" : "cmpps";
      } else {
        assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
        PatchedName = IsVCMP ? "vcmppd" : "cmppd";
      }
    }
  }

  // FIXME: Hack to recognize vpclmul<src1_quadword, src2_quadword>dq
  if (PatchedName.startswith("vpclmul")) {
    unsigned CLMULQuadWordSelect = StringSwitch<unsigned>(
      PatchedName.slice(7, PatchedName.size() - 2))
      .Case("lqlq", 0x00) // src1[63:0],   src2[63:0]
      .Case("hqlq", 0x01) // src1[127:64], src2[63:0]
      .Case("lqhq", 0x10) // src1[63:0],   src2[127:64]
      .Case("hqhq", 0x11) // src1[127:64], src2[127:64]
      .Default(~0U);
    if (CLMULQuadWordSelect != ~0U) {
      ExtraImmOp = MCConstantExpr::Create(CLMULQuadWordSelect,
                                          getParser().getContext());
      assert(PatchedName.endswith("dq") && "Unexpected mnemonic!");
      PatchedName = "vpclmulqdq";
    }
  }

  Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));

  if (ExtraImmOp)
    Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));


  // Determine whether this is an instruction prefix.
  bool isPrefix =
    Name == "lock" || Name == "rep" ||
    Name == "repe" || Name == "repz" ||
    Name == "repne" || Name == "repnz" ||
    Name == "rex64" || Name == "data16";


  // This does the actual operand parsing.  Don't parse any more if we have a
  // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
  // just want to parse the "lock" as the first instruction and the "incl" as
  // the next one.
  if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {

    // Parse '*' modifier.
    if (getLexer().is(AsmToken::Star)) {
      SMLoc Loc = Parser.getTok().getLoc();
      Operands.push_back(X86Operand::CreateToken("*", Loc));
      Parser.Lex(); // Eat the star.
    }

    // Read the first operand.
    if (X86Operand *Op = ParseOperand())
      Operands.push_back(Op);
    else {
      Parser.EatToEndOfStatement();
      return true;
    }

    while (getLexer().is(AsmToken::Comma)) {
      Parser.Lex();  // Eat the comma.

      // Parse and remember the operand.
      if (X86Operand *Op = ParseOperand())
        Operands.push_back(Op);
      else {
        Parser.EatToEndOfStatement();
        return true;
      }
    }

    if (getLexer().isNot(AsmToken::EndOfStatement)) {
      SMLoc Loc = getLexer().getLoc();
      Parser.EatToEndOfStatement();
      return Error(Loc, "unexpected token in argument list");
    }
  }

  if (getLexer().is(AsmToken::EndOfStatement))
    Parser.Lex(); // Consume the EndOfStatement
  else if (isPrefix && getLexer().is(AsmToken::Slash))
    Parser.Lex(); // Consume the prefix separator Slash

  // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
  // "outb %al, %dx".  Out doesn't take a memory form, but this is a widely
  // documented form in various unofficial manuals, so a lot of code uses it.
  if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
      Operands.size() == 3) {
    X86Operand &Op = *(X86Operand*)Operands.back();
    if (Op.isMem() && Op.Mem.SegReg == 0 &&
        isa<MCConstantExpr>(Op.Mem.Disp) &&
        cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
        Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
      SMLoc Loc = Op.getEndLoc();
      Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
      delete &Op;
    }
  }
  
  // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>.  Canonicalize to
  // "shift <op>".
  if ((Name.startswith("shr") || Name.startswith("sar") ||
       Name.startswith("shl") || Name.startswith("sal") ||
       Name.startswith("rcl") || Name.startswith("rcr") ||
       Name.startswith("rol") || Name.startswith("ror")) &&
      Operands.size() == 3) {
    X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
    if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
        cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
      delete Operands[1];
      Operands.erase(Operands.begin() + 1);
    }
  }

  return false;
}
Esempio n. 17
0
/// ComputeActionsTable - Compute the actions table and gather the first action
/// index for each landing pad site.
unsigned DwarfException::
ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
                    SmallVectorImpl<ActionEntry> &Actions,
                    SmallVectorImpl<unsigned> &FirstActions) {

    // The action table follows the call-site table in the LSDA. The individual
    // records are of two types:
    //
    //   * Catch clause
    //   * Exception specification
    //
    // The two record kinds have the same format, with only small differences.
    // They are distinguished by the "switch value" field: Catch clauses
    // (TypeInfos) have strictly positive switch values, and exception
    // specifications (FilterIds) have strictly negative switch values. Value 0
    // indicates a catch-all clause.
    //
    // Negative type IDs index into FilterIds. Positive type IDs index into
    // TypeInfos.  The value written for a positive type ID is just the type ID
    // itself.  For a negative type ID, however, the value written is the
    // (negative) byte offset of the corresponding FilterIds entry.  The byte
    // offset is usually equal to the type ID (because the FilterIds entries are
    // written using a variable width encoding, which outputs one byte per entry
    // as long as the value written is not too large) but can differ.  This kind
    // of complication does not occur for positive type IDs because type infos are
    // output using a fixed width encoding.  FilterOffsets[i] holds the byte
    // offset corresponding to FilterIds[i].

    const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
    SmallVector<int, 16> FilterOffsets;
    FilterOffsets.reserve(FilterIds.size());
    int Offset = -1;

    for (std::vector<unsigned>::const_iterator
            I = FilterIds.begin(), E = FilterIds.end(); I != E; ++I) {
        FilterOffsets.push_back(Offset);
        Offset -= TargetAsmInfo::getULEB128Size(*I);
    }

    FirstActions.reserve(LandingPads.size());

    int FirstAction = 0;
    unsigned SizeActions = 0;
    const LandingPadInfo *PrevLPI = 0;

    for (SmallVectorImpl<const LandingPadInfo *>::const_iterator
            I = LandingPads.begin(), E = LandingPads.end(); I != E; ++I) {
        const LandingPadInfo *LPI = *I;
        const std::vector<int> &TypeIds = LPI->TypeIds;
        const unsigned NumShared = PrevLPI ? SharedTypeIds(LPI, PrevLPI) : 0;
        unsigned SizeSiteActions = 0;

        if (NumShared < TypeIds.size()) {
            unsigned SizeAction = 0;
            ActionEntry *PrevAction = 0;

            if (NumShared) {
                const unsigned SizePrevIds = PrevLPI->TypeIds.size();
                assert(Actions.size());
                PrevAction = &Actions.back();
                SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) +
                             TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);

                for (unsigned j = NumShared; j != SizePrevIds; ++j) {
                    SizeAction -=
                        TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
                    SizeAction += -PrevAction->NextAction;
                    PrevAction = PrevAction->Previous;
                }
            }

            // Compute the actions.
            for (unsigned J = NumShared, M = TypeIds.size(); J != M; ++J) {
                int TypeID = TypeIds[J];
                assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
                int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
                unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID);

                int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
                SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction);
                SizeSiteActions += SizeAction;

                ActionEntry Action = { ValueForTypeID, NextAction, PrevAction };
                Actions.push_back(Action);
                PrevAction = &Actions.back();
            }

            // Record the first action of the landing pad site.
            FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
        } // else identical - re-use previous FirstAction

        // Information used when created the call-site table. The action record
        // field of the call site record is the offset of the first associated
        // action record, relative to the start of the actions table. This value is
        // biased by 1 (1 in dicating the start of the actions table), and 0
        // indicates that there are no actions.
        FirstActions.push_back(FirstAction);

        // Compute this sites contribution to size.
        SizeActions += SizeSiteActions;

        PrevLPI = LPI;
    }

    return SizeActions;
}
Esempio n. 18
0
// First thing we need to do is scan the whole function for values that are
// live across unwind edges.  Each value that is live across an unwind edge
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge.  This process also splits all critical edges
// coming out of invoke's.
void LowerInvoke::
splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);
    SplitCriticalEdge(II, 1, this);
    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    Type *Ty = AI->getType();
    // Aggregate types can't be cast, but are legal argument types, so we have
    // to handle them differently. We use an extract/insert pair as a
    // lightweight method to achieve the same goal.
    if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
      Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
      Instruction *NI = InsertValueInst::Create(AI, EI, 0);
      NI->insertAfter(EI);
      AI->replaceAllUsesWith(NI);
      // Set the operand of the instructions back to the AllocaInst.
      EI->setOperand(0, AI);
      NI->setOperand(0, AI);
    } else {
      // This is always a no-op cast because we're casting AI to AI->getType()
      // so src and destination types are identical. BitCast is the only
      // possibility.
      CastInst *NC = new BitCastInst(
        AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
      AI->replaceAllUsesWith(NC);
      // Set the operand of the cast instruction back to the AllocaInst.
      // Normally it's forbidden to replace a CastInst's operand because it
      // could cause the opcode to reflect an illegal conversion. However,
      // we're replacing it here with the same value it was constructed with.
      // We do this because the above replaceAllUsesWith() clobbered the
      // operand, but we want this one to remain.
      NC->setOperand(0, AI);
    }
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*,16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Scan all of the uses and see if the live range is live across an unwind
      // edge.  If we find a use live across an invoke edge, create an alloca
      // and spill the value.
      std::set<InvokeInst*> InvokesWithStoreInserted;

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
Esempio n. 19
0
static void computeCalleeSaveRegisterPairs(
    MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI,
    const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs) {

  if (CSI.empty())
    return;

  AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
  MachineFrameInfo *MFI = MF.getFrameInfo();
  CallingConv::ID CC = MF.getFunction()->getCallingConv();
  unsigned Count = CSI.size();
  (void)CC;
  // MachO's compact unwind format relies on all registers being stored in
  // pairs.
  assert((!MF.getSubtarget<AArch64Subtarget>().isTargetMachO() ||
          CC == CallingConv::PreserveMost ||
          (Count & 1) == 0) &&
         "Odd number of callee-saved regs to spill!");
  unsigned Offset = AFI->getCalleeSavedStackSize();

  for (unsigned i = 0; i < Count; ++i) {
    RegPairInfo RPI;
    RPI.Reg1 = CSI[i].getReg();

    assert(AArch64::GPR64RegClass.contains(RPI.Reg1) ||
           AArch64::FPR64RegClass.contains(RPI.Reg1));
    RPI.IsGPR = AArch64::GPR64RegClass.contains(RPI.Reg1);

    // Add the next reg to the pair if it is in the same register class.
    if (i + 1 < Count) {
      unsigned NextReg = CSI[i + 1].getReg();
      if ((RPI.IsGPR && AArch64::GPR64RegClass.contains(NextReg)) ||
          (!RPI.IsGPR && AArch64::FPR64RegClass.contains(NextReg)))
        RPI.Reg2 = NextReg;
    }

    // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
    // list to come in sorted by frame index so that we can issue the store
    // pair instructions directly. Assert if we see anything otherwise.
    //
    // The order of the registers in the list is controlled by
    // getCalleeSavedRegs(), so they will always be in-order, as well.
    assert((!RPI.isPaired() ||
            (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) &&
           "Out of order callee saved regs!");

    // MachO's compact unwind format relies on all registers being stored in
    // adjacent register pairs.
    assert((!MF.getSubtarget<AArch64Subtarget>().isTargetMachO() ||
            CC == CallingConv::PreserveMost ||
            (RPI.isPaired() &&
             ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
              RPI.Reg1 + 1 == RPI.Reg2))) &&
           "Callee-save registers not saved as adjacent register pair!");

    RPI.FrameIdx = CSI[i].getFrameIdx();

    if (Count * 8 != AFI->getCalleeSavedStackSize() && !RPI.isPaired()) {
      // Round up size of non-pair to pair size if we need to pad the
      // callee-save area to ensure 16-byte alignment.
      Offset -= 16;
      assert(MFI->getObjectAlignment(RPI.FrameIdx) <= 16);
      MFI->setObjectSize(RPI.FrameIdx, 16);
    } else
      Offset -= RPI.isPaired() ? 16 : 8;
    assert(Offset % 8 == 0);
    RPI.Offset = Offset / 8;
    assert((RPI.Offset >= -64 && RPI.Offset <= 63) &&
           "Offset out of bounds for LDP/STP immediate");

    RegPairs.push_back(RPI);
    if (RPI.isPaired())
      ++i;
  }

  // Align first offset to even 16-byte boundary to avoid additional SP
  // adjustment instructions.
  // Last pair offset is size of whole callee-save region for SP
  // pre-dec/post-inc.
  RegPairInfo &LastPair = RegPairs.back();
  assert(AFI->getCalleeSavedStackSize() % 8 == 0);
  LastPair.Offset = AFI->getCalleeSavedStackSize() / 8;
}
Esempio n. 20
0
/// Compute the call-site table.  The entry for an invoke has a try-range
/// containing the call, a non-zero landing pad, and an appropriate action.  The
/// entry for an ordinary call has a try-range containing the call and zero for
/// the landing pad and the action.  Calls marked 'nounwind' have no entry and
/// must not be contained in the try-range of any entry - they form gaps in the
/// table.  Entries must be ordered by try-range address.
void EHStreamer::
computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
                     const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
                     const SmallVectorImpl<unsigned> &FirstActions) {
  // Invokes and nounwind calls have entries in PadMap (due to being bracketed
  // by try-range labels when lowered).  Ordinary calls do not, so appropriate
  // try-ranges for them need be deduced so we can put them in the LSDA.
  RangeMapType PadMap;
  for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
    const LandingPadInfo *LandingPad = LandingPads[i];
    for (unsigned j = 0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
      MCSymbol *BeginLabel = LandingPad->BeginLabels[j];
      assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
      PadRange P = { i, j };
      PadMap[BeginLabel] = P;
    }
  }

  // The end label of the previous invoke or nounwind try-range.
  MCSymbol *LastLabel = nullptr;

  // Whether there is a potentially throwing instruction (currently this means
  // an ordinary call) between the end of the previous try-range and now.
  bool SawPotentiallyThrowing = false;

  // Whether the last CallSite entry was for an invoke.
  bool PreviousIsInvoke = false;

  bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;

  // Visit all instructions in order of address.
  for (const auto &MBB : *Asm->MF) {
    for (const auto &MI : MBB) {
      if (!MI.isEHLabel()) {
        if (MI.isCall())
          SawPotentiallyThrowing |= !callToNoUnwindFunction(&MI);
        continue;
      }

      // End of the previous try-range?
      MCSymbol *BeginLabel = MI.getOperand(0).getMCSymbol();
      if (BeginLabel == LastLabel)
        SawPotentiallyThrowing = false;

      // Beginning of a new try-range?
      RangeMapType::const_iterator L = PadMap.find(BeginLabel);
      if (L == PadMap.end())
        // Nope, it was just some random label.
        continue;

      const PadRange &P = L->second;
      const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
      assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
             "Inconsistent landing pad map!");

      // For Dwarf exception handling (SjLj handling doesn't use this). If some
      // instruction between the previous try-range and this one may throw,
      // create a call-site entry with no landing pad for the region between the
      // try-ranges.
      if (SawPotentiallyThrowing && !IsSJLJ) {
        CallSiteEntry Site = { LastLabel, BeginLabel, nullptr, 0 };
        CallSites.push_back(Site);
        PreviousIsInvoke = false;
      }

      LastLabel = LandingPad->EndLabels[P.RangeIndex];
      assert(BeginLabel && LastLabel && "Invalid landing pad!");

      if (!LandingPad->LandingPadLabel) {
        // Create a gap.
        PreviousIsInvoke = false;
      } else {
        // This try-range is for an invoke.
        CallSiteEntry Site = {
          BeginLabel,
          LastLabel,
          LandingPad->LandingPadLabel,
          FirstActions[P.PadIndex]
        };

        // Try to merge with the previous call-site. SJLJ doesn't do this
        if (PreviousIsInvoke && !IsSJLJ) {
          CallSiteEntry &Prev = CallSites.back();
          if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
            // Extend the range of the previous entry.
            Prev.EndLabel = Site.EndLabel;
            continue;
          }
        }

        // Otherwise, create a new call-site.
        if (!IsSJLJ)
          CallSites.push_back(Site);
        else {
          // SjLj EH must maintain the call sites in the order assigned
          // to them by the SjLjPrepare pass.
          unsigned SiteNo = MMI->getCallSiteBeginLabel(BeginLabel);
          if (CallSites.size() < SiteNo)
            CallSites.resize(SiteNo);
          CallSites[SiteNo - 1] = Site;
        }
        PreviousIsInvoke = true;
      }
    }
  }

  // If some instruction between the previous try-range and the end of the
  // function may throw, create a call-site entry with no landing pad for the
  // region following the try-range.
  if (SawPotentiallyThrowing && !IsSJLJ) {
    CallSiteEntry Site = { LastLabel, nullptr, nullptr, 0 };
    CallSites.push_back(Site);
  }
}