Esempio n. 1
0
// Compare compares the result of MI against zero.  If MI is a suitable load
// instruction and if CCUsers is a single conditional trap on zero, eliminate
// the load and convert the branch to a load-and-trap.  Return true on success.
bool SystemZElimCompare::convertToLoadAndTrap(
    MachineInstr &MI, MachineInstr &Compare,
    SmallVectorImpl<MachineInstr *> &CCUsers) {
  unsigned LATOpcode = TII->getLoadAndTrap(MI.getOpcode());
  if (!LATOpcode)
    return false;

  // Check whether we have a single CondTrap that traps on zero.
  if (CCUsers.size() != 1)
    return false;
  MachineInstr *Branch = CCUsers[0];
  if (Branch->getOpcode() != SystemZ::CondTrap ||
      Branch->getOperand(0).getImm() != SystemZ::CCMASK_ICMP ||
      Branch->getOperand(1).getImm() != SystemZ::CCMASK_CMP_EQ)
    return false;

  // We already know that there are no references to the register between
  // MI and Compare.  Make sure that there are also no references between
  // Compare and Branch.
  unsigned SrcReg = getCompareSourceReg(Compare);
  MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
  for (++MBBI; MBBI != MBBE; ++MBBI)
    if (getRegReferences(*MBBI, SrcReg))
      return false;

  // The transformation is OK.  Rebuild Branch as a load-and-trap.
  while (Branch->getNumOperands())
    Branch->RemoveOperand(0);
  Branch->setDesc(TII->get(LATOpcode));
  MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
      .add(MI.getOperand(0))
      .add(MI.getOperand(1))
      .add(MI.getOperand(2))
      .add(MI.getOperand(3));
  MI.eraseFromParent();
  return true;
}
Esempio n. 2
0
bool convertUTF8ToUTF16String(StringRef SrcUTF8,
                              SmallVectorImpl<UTF16> &DstUTF16) {
  assert(DstUTF16.empty());

  // Avoid OOB by returning early on empty input.
  if (SrcUTF8.empty()) {
    DstUTF16.push_back(0);
    DstUTF16.pop_back();
    return true;
  }

  const UTF8 *Src = reinterpret_cast<const UTF8 *>(SrcUTF8.begin());
  const UTF8 *SrcEnd = reinterpret_cast<const UTF8 *>(SrcUTF8.end());

  // Allocate the same number of UTF-16 code units as UTF-8 code units. Encoding
  // as UTF-16 should always require the same amount or less code units than the
  // UTF-8 encoding.  Allocate one extra byte for the null terminator though,
  // so that someone calling DstUTF16.data() gets a null terminated string.
  // We resize down later so we don't have to worry that this over allocates.
  DstUTF16.resize(SrcUTF8.size()+1);
  UTF16 *Dst = &DstUTF16[0];
  UTF16 *DstEnd = Dst + DstUTF16.size();

  ConversionResult CR =
      ConvertUTF8toUTF16(&Src, SrcEnd, &Dst, DstEnd, strictConversion);
  assert(CR != targetExhausted);

  if (CR != conversionOK) {
    DstUTF16.clear();
    return false;
  }

  DstUTF16.resize(Dst - &DstUTF16[0]);
  DstUTF16.push_back(0);
  DstUTF16.pop_back();
  return true;
}
Esempio n. 3
0
void append(SmallVectorImpl<char> &path, Style style, const Twine &a,
            const Twine &b, const Twine &c, const Twine &d) {
  SmallString<32> a_storage;
  SmallString<32> b_storage;
  SmallString<32> c_storage;
  SmallString<32> d_storage;

  SmallVector<StringRef, 4> components;
  if (!a.isTriviallyEmpty()) components.push_back(a.toStringRef(a_storage));
  if (!b.isTriviallyEmpty()) components.push_back(b.toStringRef(b_storage));
  if (!c.isTriviallyEmpty()) components.push_back(c.toStringRef(c_storage));
  if (!d.isTriviallyEmpty()) components.push_back(d.toStringRef(d_storage));

  for (auto &component : components) {
    bool path_has_sep =
        !path.empty() && is_separator(path[path.size() - 1], style);
    if (path_has_sep) {
      // Strip separators from beginning of component.
      size_t loc = component.find_first_not_of(separators(style));
      StringRef c = component.substr(loc);

      // Append it.
      path.append(c.begin(), c.end());
      continue;
    }

    bool component_has_sep =
        !component.empty() && is_separator(component[0], style);
    if (!component_has_sep &&
        !(path.empty() || has_root_name(component, style))) {
      // Add a separator.
      path.push_back(preferred_separator(style));
    }

    path.append(component.begin(), component.end());
  }
}
Esempio n. 4
0
/*!
  \note We are really pessimistic here about what kind of a load we're doing.
 */
void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
                                   SmallVectorImpl<MachineOperand> &Addr,
                                   const TargetRegisterClass *RC,
                                   SmallVectorImpl<MachineInstr*> &NewMIs)
    const {
  cerr << "loadRegToAddr() invoked!\n";
  abort();

  if (Addr[0].isFI()) {
    /* do what loadRegFromStackSlot does here... */
  } else {
    unsigned Opc = 0;
    if (RC == SPU::R8CRegisterClass) {
      /* do brilliance here */
    } else if (RC == SPU::R16CRegisterClass) {
      /* Opc = PPC::LWZ; */
    } else if (RC == SPU::R32CRegisterClass) {
      /* Opc = PPC::LD; */
    } else if (RC == SPU::R32FPRegisterClass) {
      /* Opc = PPC::LFD; */
    } else if (RC == SPU::R64FPRegisterClass) {
      /* Opc = PPC::LFS; */
    } else if (RC == SPU::VECREGRegisterClass) {
      /* Opc = PPC::LVX; */
    } else if (RC == SPU::GPRCRegisterClass) {
      /* Opc = something else! */
    } else {
      assert(0 && "Unknown regclass!");
      abort();
    }
    DebugLoc DL = DebugLoc::getUnknownLoc();
    MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
    for (unsigned i = 0, e = Addr.size(); i != e; ++i)
      MIB.addOperand(Addr[i]);
    NewMIs.push_back(MIB);
  }
}
Esempio n. 5
0
static bool isAlternateVectorMask(SmallVectorImpl<int> &Mask) {
  bool isAlternate = true;
  unsigned MaskSize = Mask.size();

  // Example: shufflevector A, B, <0,5,2,7>
  for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
    if (Mask[i] < 0)
      continue;
    isAlternate = Mask[i] == (int)((i & 1) ? MaskSize + i : i);
  }

  if (isAlternate)
    return true;

  isAlternate = true;
  // Example: shufflevector A, B, <4,1,6,3>
  for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
    if (Mask[i] < 0)
      continue;
    isAlternate = Mask[i] == (int)((i & 1) ? i : MaskSize + i);
  }

  return isAlternate;
}
Esempio n. 6
0
void Sema::EraseUnwantedCUDAMatches(
    const FunctionDecl *Caller,
    SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
  if (Matches.size() <= 1)
    return;

  using Pair = std::pair<DeclAccessPair, FunctionDecl*>;

  // Gets the CUDA function preference for a call from Caller to Match.
  auto GetCFP = [&](const Pair &Match) {
    return IdentifyCUDAPreference(Caller, Match.second);
  };

  // Find the best call preference among the functions in Matches.
  CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
      Matches.begin(), Matches.end(),
      [&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));

  // Erase all functions with lower priority.
  Matches.erase(
      llvm::remove_if(
          Matches, [&](const Pair &Match) { return GetCFP(Match) < BestCFP; }),
      Matches.end());
}
Esempio n. 7
0
StringRef
camel_case::toLowercaseInitialisms(StringRef string,
                                   SmallVectorImpl<char> &scratch) {

  if (string.empty())
    return string;

  // Already lowercase.
  if (!clang::isUppercase(string[0]))
    return string;

  // Lowercase until we hit the an uppercase letter followed by a
  // non-uppercase letter.
  llvm::SmallString<32> scratchStr;
  for (unsigned i = 0, n = string.size(); i != n; ++i) {
    // If the next character is not uppercase, stop.
    if (i < n - 1 && !clang::isUppercase(string[i+1])) {
      // If the next non-uppercase character was not a letter, we seem
      // to have a plural, or we're at the beginning, we should still
      // lowercase the character we're on.
      if (i == 0 || !clang::isLetter(string[i+1]) ||
          isPluralSuffix(camel_case::getFirstWord(string.substr(i+1)))) {
        scratchStr.push_back(clang::toLowercase(string[i]));
        ++i;
      }

      scratchStr.append(string.substr(i));
      break;
    }

    scratchStr.push_back(clang::toLowercase(string[i]));
  }

  scratch = scratchStr;
  return {scratch.begin(), scratch.size()};
}
/// \brief Diagnose all of the unexpanded parameter packs in the given
/// vector.
static void 
DiagnoseUnexpandedParameterPacks(Sema &S, SourceLocation Loc,
                                 Sema::UnexpandedParameterPackContext UPPC,
             const SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
  SmallVector<SourceLocation, 4> Locations;
  SmallVector<IdentifierInfo *, 4> Names;
  llvm::SmallPtrSet<IdentifierInfo *, 4> NamesKnown;

  for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
    IdentifierInfo *Name = 0;
    if (const TemplateTypeParmType *TTP
          = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>())
      Name = TTP->getIdentifier();
    else
      Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier();

    if (Name && NamesKnown.insert(Name))
      Names.push_back(Name);

    if (Unexpanded[I].second.isValid())
      Locations.push_back(Unexpanded[I].second);
  }

  DiagnosticBuilder DB
    = Names.size() == 0? S.Diag(Loc, diag::err_unexpanded_parameter_pack_0)
                           << (int)UPPC
    : Names.size() == 1? S.Diag(Loc, diag::err_unexpanded_parameter_pack_1)
                           << (int)UPPC << Names[0]
    : Names.size() == 2? S.Diag(Loc, diag::err_unexpanded_parameter_pack_2)
                           << (int)UPPC << Names[0] << Names[1]
    : S.Diag(Loc, diag::err_unexpanded_parameter_pack_3_or_more)
        << (int)UPPC << Names[0] << Names[1];

  for (unsigned I = 0, N = Locations.size(); I != N; ++I)
    DB << SourceRange(Locations[I]);
}
Esempio n. 9
0
void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
                                        SmallVectorImpl<MachineOperand> &Addr,
                                        const TargetRegisterClass *RC,
                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
  unsigned Opc = 0;
  if (RC == Alpha::F4RCRegisterClass)
    Opc = Alpha::LDS;
  else if (RC == Alpha::F8RCRegisterClass)
    Opc = Alpha::LDT;
  else if (RC == Alpha::GPRCRegisterClass)
    Opc = Alpha::LDQ;
  else
    abort();
  MachineInstrBuilder MIB = 
    BuildMI(MF, get(Opc), DestReg);
  for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
    MachineOperand &MO = Addr[i];
    if (MO.isReg())
      MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
    else
      MIB.addImm(MO.getImm());
  }
  NewMIs.push_back(MIB);
}
Esempio n. 10
0
void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,
                                       MDNode*> > &Result) const {
  Result.clear();
  
  // Handle 'dbg' as a special case since it is not stored in the hash table.
  if (!DbgLoc.isUnknown()) {
    Result.push_back(std::make_pair((unsigned)LLVMContext::MD_dbg,
                                    DbgLoc.getAsMDNode(getContext())));
    if (!hasMetadataHashEntry()) return;
  }
  
  assert(hasMetadataHashEntry() &&
         getContext().pImpl->MetadataStore.count(this) &&
         "Shouldn't have called this");
  const LLVMContextImpl::MDMapTy &Info =
    getContext().pImpl->MetadataStore.find(this)->second;
  assert(!Info.empty() && "Shouldn't have called this");

  Result.append(Info.begin(), Info.end());

  // Sort the resulting array so it is stable.
  if (Result.size() > 1)
    array_pod_sort(Result.begin(), Result.end());
}
Esempio n. 11
0
void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
                                  bool isKill,
                                  SmallVectorImpl<MachineOperand> &Addr,
                                  const TargetRegisterClass *RC,
                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
  cerr << "storeRegToAddr() invoked!\n";
  abort();

  if (Addr[0].isFI()) {
    /* do what storeRegToStackSlot does here */
  } else {
    unsigned Opc = 0;
    if (RC == SPU::GPRCRegisterClass) {
      /* Opc = PPC::STW; */
    } else if (RC == SPU::R16CRegisterClass) {
      /* Opc = PPC::STD; */
    } else if (RC == SPU::R32CRegisterClass) {
      /* Opc = PPC::STFD; */
    } else if (RC == SPU::R32FPRegisterClass) {
      /* Opc = PPC::STFD; */
    } else if (RC == SPU::R64FPRegisterClass) {
      /* Opc = PPC::STFS; */
    } else if (RC == SPU::VECREGRegisterClass) {
      /* Opc = PPC::STVX; */
    } else {
      assert(0 && "Unknown regclass!");
      abort();
    }
    DebugLoc DL = DebugLoc::getUnknownLoc();
    MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc))
      .addReg(SrcReg, false, false, isKill);
    for (unsigned i = 0, e = Addr.size(); i != e; ++i)
      MIB.addOperand(Addr[i]);
    NewMIs.push_back(MIB);
  }
}
Esempio n. 12
0
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
                          Module &M, bool isConst, unsigned AddrSpace) const {
  auto &DL = M.getDataLayout();
  // FIXME: Find better heuristics
  std::stable_sort(Globals.begin(), Globals.end(),
                   [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
                     return DL.getTypeAllocSize(GV1->getValueType()) <
                            DL.getTypeAllocSize(GV2->getValueType());
                   });

  // If we want to just blindly group all globals together, do so.
  if (!GlobalMergeGroupByUse) {
    BitVector AllGlobals(Globals.size());
    AllGlobals.set();
    return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
  }

  // If we want to be smarter, look at all uses of each global, to try to
  // discover all sets of globals used together, and how many times each of
  // these sets occurred.
  //
  // Keep this reasonably efficient, by having an append-only list of all sets
  // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
  // code (currently, a Function) to the set of globals seen so far that are
  // used together in that unit (GlobalUsesByFunction).
  //
  // When we look at the Nth global, we know that any new set is either:
  // - the singleton set {N}, containing this global only, or
  // - the union of {N} and a previously-discovered set, containing some
  //   combination of the previous N-1 globals.
  // Using that knowledge, when looking at the Nth global, we can keep:
  // - a reference to the singleton set {N} (CurGVOnlySetIdx)
  // - a list mapping each previous set to its union with {N} (EncounteredUGS),
  //   if it actually occurs.

  // We keep track of the sets of globals used together "close enough".
  struct UsedGlobalSet {
    BitVector Globals;
    unsigned UsageCount = 1;

    UsedGlobalSet(size_t Size) : Globals(Size) {}
  };

  // Each set is unique in UsedGlobalSets.
  std::vector<UsedGlobalSet> UsedGlobalSets;

  // Avoid repeating the create-global-set pattern.
  auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
    UsedGlobalSets.emplace_back(Globals.size());
    return UsedGlobalSets.back();
  };

  // The first set is the empty set.
  CreateGlobalSet().UsageCount = 0;

  // We define "close enough" to be "in the same function".
  // FIXME: Grouping uses by function is way too aggressive, so we should have
  // a better metric for distance between uses.
  // The obvious alternative would be to group by BasicBlock, but that's in
  // turn too conservative..
  // Anything in between wouldn't be trivial to compute, so just stick with
  // per-function grouping.

  // The value type is an index into UsedGlobalSets.
  // The default (0) conveniently points to the empty set.
  DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;

  // Now, look at each merge-eligible global in turn.

  // Keep track of the sets we already encountered to which we added the
  // current global.
  // Each element matches the same-index element in UsedGlobalSets.
  // This lets us efficiently tell whether a set has already been expanded to
  // include the current global.
  std::vector<size_t> EncounteredUGS;

  for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
    GlobalVariable *GV = Globals[GI];

    // Reset the encountered sets for this global...
    std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
    // ...and grow it in case we created new sets for the previous global.
    EncounteredUGS.resize(UsedGlobalSets.size());

    // We might need to create a set that only consists of the current global.
    // Keep track of its index into UsedGlobalSets.
    size_t CurGVOnlySetIdx = 0;

    // For each global, look at all its Uses.
    for (auto &U : GV->uses()) {
      // This Use might be a ConstantExpr.  We're interested in Instruction
      // users, so look through ConstantExpr...
      Use *UI, *UE;
      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
        if (CE->use_empty())
          continue;
        UI = &*CE->use_begin();
        UE = nullptr;
      } else if (isa<Instruction>(U.getUser())) {
        UI = &U;
        UE = UI->getNext();
      } else {
        continue;
      }

      // ...to iterate on all the instruction users of the global.
      // Note that we iterate on Uses and not on Users to be able to getNext().
      for (; UI != UE; UI = UI->getNext()) {
        Instruction *I = dyn_cast<Instruction>(UI->getUser());
        if (!I)
          continue;

        Function *ParentFn = I->getParent()->getParent();

        // If we're only optimizing for size, ignore non-minsize functions.
        if (OnlyOptimizeForSize && !ParentFn->optForMinSize())
          continue;

        size_t UGSIdx = GlobalUsesByFunction[ParentFn];

        // If this is the first global the basic block uses, map it to the set
        // consisting of this global only.
        if (!UGSIdx) {
          // If that set doesn't exist yet, create it.
          if (!CurGVOnlySetIdx) {
            CurGVOnlySetIdx = UsedGlobalSets.size();
            CreateGlobalSet().Globals.set(GI);
          } else {
            ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
          }

          GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
          continue;
        }

        // If we already encountered this BB, just increment the counter.
        if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
          ++UsedGlobalSets[UGSIdx].UsageCount;
          continue;
        }

        // If not, the previous set wasn't actually used in this function.
        --UsedGlobalSets[UGSIdx].UsageCount;

        // If we already expanded the previous set to include this global, just
        // reuse that expanded set.
        if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
          ++UsedGlobalSets[ExpandedIdx].UsageCount;
          GlobalUsesByFunction[ParentFn] = ExpandedIdx;
          continue;
        }

        // If not, create a new set consisting of the union of the previous set
        // and this global.  Mark it as encountered, so we can reuse it later.
        GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
            UsedGlobalSets.size();

        UsedGlobalSet &NewUGS = CreateGlobalSet();
        NewUGS.Globals.set(GI);
        NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
      }
    }
  }

  // Now we found a bunch of sets of globals used together.  We accumulated
  // the number of times we encountered the sets (i.e., the number of blocks
  // that use that exact set of globals).
  //
  // Multiply that by the size of the set to give us a crude profitability
  // metric.
  std::stable_sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
            [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
              return UGS1.Globals.count() * UGS1.UsageCount <
                     UGS2.Globals.count() * UGS2.UsageCount;
            });

  // We can choose to merge all globals together, but ignore globals never used
  // with another global.  This catches the obviously non-profitable cases of
  // having a single global, but is aggressive enough for any other case.
  if (GlobalMergeIgnoreSingleUse) {
    BitVector AllGlobals(Globals.size());
    for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
      const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
      if (UGS.UsageCount == 0)
        continue;
      if (UGS.Globals.count() > 1)
        AllGlobals |= UGS.Globals;
    }
    return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
  }

  // Starting from the sets with the best (=biggest) profitability, find a
  // good combination.
  // The ideal (and expensive) solution can only be found by trying all
  // combinations, looking for the one with the best profitability.
  // Don't be smart about it, and just pick the first compatible combination,
  // starting with the sets with the best profitability.
  BitVector PickedGlobals(Globals.size());
  bool Changed = false;

  for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
    const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
    if (UGS.UsageCount == 0)
      continue;
    if (PickedGlobals.anyCommon(UGS.Globals))
      continue;
    PickedGlobals |= UGS.Globals;
    // If the set only contains one global, there's no point in merging.
    // Ignore the global for inclusion in other sets though, so keep it in
    // PickedGlobals.
    if (UGS.Globals.count() < 2)
      continue;
    Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
  }

  return Changed;
}
Esempio n. 13
0
bool GlobalMerge::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
                          const BitVector &GlobalSet, Module &M, bool isConst,
                          unsigned AddrSpace) const {
  assert(Globals.size() > 1);

  Type *Int32Ty = Type::getInt32Ty(M.getContext());
  auto &DL = M.getDataLayout();

  LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
                    << GlobalSet.find_first() << "\n");

  bool Changed = false;
  ssize_t i = GlobalSet.find_first();
  while (i != -1) {
    ssize_t j = 0;
    uint64_t MergedSize = 0;
    std::vector<Type*> Tys;
    std::vector<Constant*> Inits;

    bool HasExternal = false;
    StringRef FirstExternalName;
    for (j = i; j != -1; j = GlobalSet.find_next(j)) {
      Type *Ty = Globals[j]->getValueType();
      MergedSize += DL.getTypeAllocSize(Ty);
      if (MergedSize > MaxOffset) {
        break;
      }
      Tys.push_back(Ty);
      Inits.push_back(Globals[j]->getInitializer());

      if (Globals[j]->hasExternalLinkage() && !HasExternal) {
        HasExternal = true;
        FirstExternalName = Globals[j]->getName();
      }
    }

    // Exit early if there is only one global to merge.
    if (Tys.size() < 2) {
      i = j;
      continue;
    }

    // If merged variables doesn't have external linkage, we needn't to expose
    // the symbol after merging.
    GlobalValue::LinkageTypes Linkage = HasExternal
                                            ? GlobalValue::ExternalLinkage
                                            : GlobalValue::InternalLinkage;
    StructType *MergedTy = StructType::get(M.getContext(), Tys);
    Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);

    // On Darwin external linkage needs to be preserved, otherwise
    // dsymutil cannot preserve the debug info for the merged
    // variables.  If they have external linkage, use the symbol name
    // of the first variable merged as the suffix of global symbol
    // name.  This avoids a link-time naming conflict for the
    // _MergedGlobals symbols.
    Twine MergedName =
        (IsMachO && HasExternal)
            ? "_MergedGlobals_" + FirstExternalName
            : "_MergedGlobals";
    auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage;
    auto *MergedGV = new GlobalVariable(
        M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr,
        GlobalVariable::NotThreadLocal, AddrSpace);

    const StructLayout *MergedLayout = DL.getStructLayout(MergedTy);

    for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) {
      GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
      std::string Name = Globals[k]->getName();
      GlobalValue::DLLStorageClassTypes DLLStorage =
          Globals[k]->getDLLStorageClass();

      // Copy metadata while adjusting any debug info metadata by the original
      // global's offset within the merged global.
      MergedGV->copyMetadata(Globals[k], MergedLayout->getElementOffset(idx));

      Constant *Idx[2] = {
        ConstantInt::get(Int32Ty, 0),
        ConstantInt::get(Int32Ty, idx),
      };
      Constant *GEP =
          ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
      Globals[k]->replaceAllUsesWith(GEP);
      Globals[k]->eraseFromParent();

      // When the linkage is not internal we must emit an alias for the original
      // variable name as it may be accessed from another object. On non-Mach-O
      // we can also emit an alias for internal linkage as it's safe to do so.
      // It's not safe on Mach-O as the alias (and thus the portion of the
      // MergedGlobals variable) may be dead stripped at link time.
      if (Linkage != GlobalValue::InternalLinkage || !IsMachO) {
        GlobalAlias *GA =
            GlobalAlias::create(Tys[idx], AddrSpace, Linkage, Name, GEP, &M);
        GA->setDLLStorageClass(DLLStorage);
      }

      NumMerged++;
    }
    Changed = true;
    i = j;
  }

  return Changed;
}
Esempio n. 14
0
bool TargetInstrInfoImpl::
canFoldMemoryOperand(const MachineInstr *MI,
                     const SmallVectorImpl<unsigned> &Ops) const {
  return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
}
Esempio n. 15
0
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
                          const BitVector &GlobalSet, Module &M, bool isConst,
                          unsigned AddrSpace) const {

  Type *Int32Ty = Type::getInt32Ty(M.getContext());
  auto &DL = M.getDataLayout();

  assert(Globals.size() > 1);

  DEBUG(dbgs() << " Trying to merge set, starts with #"
               << GlobalSet.find_first() << "\n");

  ssize_t i = GlobalSet.find_first();
  while (i != -1) {
    ssize_t j = 0;
    uint64_t MergedSize = 0;
    std::vector<Type*> Tys;
    std::vector<Constant*> Inits;

    bool HasExternal = false;
    GlobalVariable *TheFirstExternal = 0;
    for (j = i; j != -1; j = GlobalSet.find_next(j)) {
      Type *Ty = Globals[j]->getType()->getElementType();
      MergedSize += DL.getTypeAllocSize(Ty);
      if (MergedSize > MaxOffset) {
        break;
      }
      Tys.push_back(Ty);
      Inits.push_back(Globals[j]->getInitializer());

      if (Globals[j]->hasExternalLinkage() && !HasExternal) {
        HasExternal = true;
        TheFirstExternal = Globals[j];
      }
    }

    // If merged variables doesn't have external linkage, we needn't to expose
    // the symbol after merging.
    GlobalValue::LinkageTypes Linkage = HasExternal
                                            ? GlobalValue::ExternalLinkage
                                            : GlobalValue::InternalLinkage;

    StructType *MergedTy = StructType::get(M.getContext(), Tys);
    Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);

    // If merged variables have external linkage, we use symbol name of the
    // first variable merged as the suffix of global symbol name. This would
    // be able to avoid the link-time naming conflict for globalm symbols.
    GlobalVariable *MergedGV = new GlobalVariable(
        M, MergedTy, isConst, Linkage, MergedInit,
        HasExternal ? "_MergedGlobals_" + TheFirstExternal->getName()
                    : "_MergedGlobals",
        nullptr, GlobalVariable::NotThreadLocal, AddrSpace);

    for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k)) {
      GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
      std::string Name = Globals[k]->getName();

      Constant *Idx[2] = {
        ConstantInt::get(Int32Ty, 0),
        ConstantInt::get(Int32Ty, idx++)
      };
      Constant *GEP =
          ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
      Globals[k]->replaceAllUsesWith(GEP);
      Globals[k]->eraseFromParent();

      if (Linkage != GlobalValue::InternalLinkage) {
        // Generate a new alias...
        auto *PTy = cast<PointerType>(GEP->getType());
        GlobalAlias::create(PTy, Linkage, Name, GEP, &M);
      }

      NumMerged++;
    }
    i = j;
  }

  return true;
}
Esempio n. 16
0
StringRef Twine::toStringRef(SmallVectorImpl<char> &Out) const {
  if (isSingleStringRef())
    return getSingleStringRef();
  toVector(Out);
  return StringRef(Out.data(), Out.size());
}
void LiveRange::join(LiveRange &Other,
                     const int *LHSValNoAssignments,
                     const int *RHSValNoAssignments,
                     SmallVectorImpl<VNInfo *> &NewVNInfo) {
  verify();

  // Determine if any of our values are mapped.  This is uncommon, so we want
  // to avoid the range scan if not.
  bool MustMapCurValNos = false;
  unsigned NumVals = getNumValNums();
  unsigned NumNewVals = NewVNInfo.size();
  for (unsigned i = 0; i != NumVals; ++i) {
    unsigned LHSValID = LHSValNoAssignments[i];
    if (i != LHSValID ||
        (NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i))) {
      MustMapCurValNos = true;
      break;
    }
  }

  // If we have to apply a mapping to our base range assignment, rewrite it now.
  if (MustMapCurValNos && !empty()) {
    // Map the first live range.

    iterator OutIt = begin();
    OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
    for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
      VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
      assert(nextValNo != 0 && "Huh?");

      // If this live range has the same value # as its immediate predecessor,
      // and if they are neighbors, remove one Segment.  This happens when we
      // have [0,4:0)[4,7:1) and map 0/1 onto the same value #.
      if (OutIt->valno == nextValNo && OutIt->end == I->start) {
        OutIt->end = I->end;
      } else {
        // Didn't merge. Move OutIt to the next segment,
        ++OutIt;
        OutIt->valno = nextValNo;
        if (OutIt != I) {
          OutIt->start = I->start;
          OutIt->end = I->end;
        }
      }
    }
    // If we merge some segments, chop off the end.
    ++OutIt;
    segments.erase(OutIt, end());
  }

  // Rewrite Other values before changing the VNInfo ids.
  // This can leave Other in an invalid state because we're not coalescing
  // touching segments that now have identical values. That's OK since Other is
  // not supposed to be valid after calling join();
  for (iterator I = Other.begin(), E = Other.end(); I != E; ++I)
    I->valno = NewVNInfo[RHSValNoAssignments[I->valno->id]];

  // Update val# info. Renumber them and make sure they all belong to this
  // LiveRange now. Also remove dead val#'s.
  unsigned NumValNos = 0;
  for (unsigned i = 0; i < NumNewVals; ++i) {
    VNInfo *VNI = NewVNInfo[i];
    if (VNI) {
      if (NumValNos >= NumVals)
        valnos.push_back(VNI);
      else
        valnos[NumValNos] = VNI;
      VNI->id = NumValNos++;  // Renumber val#.
    }
  }
  if (NumNewVals < NumVals)
    valnos.resize(NumNewVals);  // shrinkify

  // Okay, now insert the RHS live segments into the LHS.
  LiveRangeUpdater Updater(this);
  for (iterator I = Other.begin(), E = Other.end(); I != E; ++I)
    Updater.add(*I);
}
Esempio n. 18
0
// The CC users in CCUsers are testing the result of a comparison of some
// value X against zero and we know that any CC value produced by MI
// would also reflect the value of X.  Try to adjust CCUsers so that
// they test the result of MI directly, returning true on success.
// Leave everything unchanged on failure.
bool SystemZElimCompare::
adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
                      SmallVectorImpl<MachineInstr *> &CCUsers) {
  int Opcode = MI->getOpcode();
  const MCInstrDesc &Desc = TII->get(Opcode);
  unsigned MIFlags = Desc.TSFlags;

  // See which compare-style condition codes are available.
  unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags);

  // For unsigned comparisons with zero, only equality makes sense.
  unsigned CompareFlags = Compare->getDesc().TSFlags;
  if (CompareFlags & SystemZII::IsLogical)
    ReusableCCMask &= SystemZ::CCMASK_CMP_EQ;

  if (ReusableCCMask == 0)
    return false;

  unsigned CCValues = SystemZII::getCCValues(MIFlags);
  assert((ReusableCCMask & ~CCValues) == 0 && "Invalid CCValues");

  // Now check whether these flags are enough for all users.
  SmallVector<MachineOperand *, 4> AlterMasks;
  for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) {
    MachineInstr *MI = CCUsers[I];

    // Fail if this isn't a use of CC that we understand.
    unsigned Flags = MI->getDesc().TSFlags;
    unsigned FirstOpNum;
    if (Flags & SystemZII::CCMaskFirst)
      FirstOpNum = 0;
    else if (Flags & SystemZII::CCMaskLast)
      FirstOpNum = MI->getNumExplicitOperands() - 2;
    else
      return false;

    // Check whether the instruction predicate treats all CC values
    // outside of ReusableCCMask in the same way.  In that case it
    // doesn't matter what those CC values mean.
    unsigned CCValid = MI->getOperand(FirstOpNum).getImm();
    unsigned CCMask = MI->getOperand(FirstOpNum + 1).getImm();
    unsigned OutValid = ~ReusableCCMask & CCValid;
    unsigned OutMask = ~ReusableCCMask & CCMask;
    if (OutMask != 0 && OutMask != OutValid)
      return false;

    AlterMasks.push_back(&MI->getOperand(FirstOpNum));
    AlterMasks.push_back(&MI->getOperand(FirstOpNum + 1));
  }

  // All users are OK.  Adjust the masks for MI.
  for (unsigned I = 0, E = AlterMasks.size(); I != E; I += 2) {
    AlterMasks[I]->setImm(CCValues);
    unsigned CCMask = AlterMasks[I + 1]->getImm();
    if (CCMask & ~ReusableCCMask)
      AlterMasks[I + 1]->setImm((CCMask & ReusableCCMask) |
                                (CCValues & ~ReusableCCMask));
  }

  // CC is now live after MI.
  int CCDef = MI->findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI);
  assert(CCDef >= 0 && "Couldn't find CC set");
  MI->getOperand(CCDef).setIsDead(false);

  // Clear any intervening kills of CC.
  MachineBasicBlock::iterator MBBI = MI, MBBE = Compare;
  for (++MBBI; MBBI != MBBE; ++MBBI)
    MBBI->clearRegisterKills(SystemZ::CC, TRI);

  return true;
}
Esempio n. 19
0
/// ApplyQAOverride - Apply a list of edits to the input argument lists.
///
/// The input string is a space separate list of edits to perform,
/// they are applied in order to the input argument lists. Edits
/// should be one of the following forms:
///
///  '#': Silence information about the changes to the command line arguments.
///
///  '^': Add FOO as a new argument at the beginning of the command line.
///
///  '+': Add FOO as a new argument at the end of the command line.
///
///  's/XXX/YYY/': Substitute the regular expression XXX with YYY in the command
///  line.
///
///  'xOPTION': Removes all instances of the literal argument OPTION.
///
///  'XOPTION': Removes all instances of the literal argument OPTION,
///  and the following argument.
///
///  'Ox': Removes all flags matching 'O' or 'O[sz0-9]' and adds 'Ox'
///  at the end of the command line.
///
/// \param OS - The stream to write edit information to.
/// \param Args - The vector of command line arguments.
/// \param Edit - The override command to perform.
/// \param SavedStrings - Set to use for storing string representations.
static void ApplyOneQAOverride(raw_ostream &OS,
                               SmallVectorImpl<const char*> &Args,
                               StringRef Edit,
                               std::set<std::string> &SavedStrings) {
  // This does not need to be efficient.

  if (Edit[0] == '^') {
    const char *Str =
      SaveStringInSet(SavedStrings, Edit.substr(1));
    OS << "### Adding argument " << Str << " at beginning\n";
    Args.insert(Args.begin() + 1, Str);
  } else if (Edit[0] == '+') {
    const char *Str =
      SaveStringInSet(SavedStrings, Edit.substr(1));
    OS << "### Adding argument " << Str << " at end\n";
    Args.push_back(Str);
  } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.endswith("/") &&
             Edit.slice(2, Edit.size()-1).find('/') != StringRef::npos) {
    StringRef MatchPattern = Edit.substr(2).split('/').first;
    StringRef ReplPattern = Edit.substr(2).split('/').second;
    ReplPattern = ReplPattern.slice(0, ReplPattern.size()-1);

    for (unsigned i = 1, e = Args.size(); i != e; ++i) {
      std::string Repl = llvm::Regex(MatchPattern).sub(ReplPattern, Args[i]);

      if (Repl != Args[i]) {
        OS << "### Replacing '" << Args[i] << "' with '" << Repl << "'\n";
        Args[i] = SaveStringInSet(SavedStrings, Repl);
      }
    }
  } else if (Edit[0] == 'x' || Edit[0] == 'X') {
    std::string Option = Edit.substr(1, std::string::npos);
    for (unsigned i = 1; i < Args.size();) {
      if (Option == Args[i]) {
        OS << "### Deleting argument " << Args[i] << '\n';
        Args.erase(Args.begin() + i);
        if (Edit[0] == 'X') {
          if (i < Args.size()) {
            OS << "### Deleting argument " << Args[i] << '\n';
            Args.erase(Args.begin() + i);
          } else
            OS << "### Invalid X edit, end of command line!\n";
        }
      } else
        ++i;
    }
  } else if (Edit[0] == 'O') {
    for (unsigned i = 1; i < Args.size();) {
      const char *A = Args[i];
      if (A[0] == '-' && A[1] == 'O' &&
          (A[2] == '\0' ||
           (A[3] == '\0' && (A[2] == 's' || A[2] == 'z' ||
                             ('0' <= A[2] && A[2] <= '9'))))) {
        OS << "### Deleting argument " << Args[i] << '\n';
        Args.erase(Args.begin() + i);
      } else
        ++i;
    }
    OS << "### Adding argument " << Edit << " at end\n";
    Args.push_back(SaveStringInSet(SavedStrings, '-' + Edit.str()));
  } else {
    OS << "### Unrecognized edit: " << Edit << "\n";
  }
}
Esempio n. 20
0
void
UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
                      const SmallVectorImpl<SlotIndex> &Kills,
                      SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
                      MachineRegisterInfo &MRI, LiveIntervals &LIS) {
  if (Kills.empty())
    return;
  // Don't track copies from physregs, there are too many uses.
  if (!TargetRegisterInfo::isVirtualRegister(LI->reg))
    return;

  // Collect all the (vreg, valno) pairs that are copies of LI.
  SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues;
  for (MachineRegisterInfo::use_nodbg_iterator
         UI = MRI.use_nodbg_begin(LI->reg),
         UE = MRI.use_nodbg_end(); UI != UE; ++UI) {
    // Copies of the full value.
    if (UI.getOperand().getSubReg() || !UI->isCopy())
      continue;
    MachineInstr *MI = &*UI;
    unsigned DstReg = MI->getOperand(0).getReg();

    // Don't follow copies to physregs. These are usually setting up call
    // arguments, and the argument registers are always call clobbered. We are
    // better off in the source register which could be a callee-saved register,
    // or it could be spilled.
    if (!TargetRegisterInfo::isVirtualRegister(DstReg))
      continue;

    // Is LocNo extended to reach this copy? If not, another def may be blocking
    // it, or we are looking at a wrong value of LI.
    SlotIndex Idx = LIS.getInstructionIndex(MI);
    LocMap::iterator I = locInts.find(Idx.getRegSlot(true));
    if (!I.valid() || I.value() != LocNo)
      continue;

    if (!LIS.hasInterval(DstReg))
      continue;
    LiveInterval *DstLI = &LIS.getInterval(DstReg);
    const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot());
    assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value");
    CopyValues.push_back(std::make_pair(DstLI, DstVNI));
  }

  if (CopyValues.empty())
    return;

  DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n');

  // Try to add defs of the copied values for each kill point.
  for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
    SlotIndex Idx = Kills[i];
    for (unsigned j = 0, e = CopyValues.size(); j != e; ++j) {
      LiveInterval *DstLI = CopyValues[j].first;
      const VNInfo *DstVNI = CopyValues[j].second;
      if (DstLI->getVNInfoAt(Idx) != DstVNI)
        continue;
      // Check that there isn't already a def at Idx
      LocMap::iterator I = locInts.find(Idx);
      if (I.valid() && I.start() <= Idx)
        continue;
      DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
                   << DstVNI->id << " in " << *DstLI << '\n');
      MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def);
      assert(CopyMI && CopyMI->isCopy() && "Bad copy value");
      unsigned LocNo = getLocationNo(CopyMI->getOperand(0));
      I.insert(Idx, Idx.getNextSlot(), LocNo);
      NewDefs.push_back(std::make_pair(Idx, LocNo));
      break;
    }
  }
}
Esempio n. 21
0
bool
ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
                                          SmallVectorImpl<unsigned> &SubIndices,
                                          unsigned &NewSubIdx) const {

  unsigned Size = RC->getSize() * 8;
  if (Size < 6)
    return 0;

  NewSubIdx = 0;  // Whole register.
  unsigned NumRegs = SubIndices.size();
  if (NumRegs == 8) {
    // 8 D registers -> 1 QQQQ register.
    return (Size == 512 &&
            SubIndices[0] == ARM::dsub_0 &&
            SubIndices[1] == ARM::dsub_1 &&
            SubIndices[2] == ARM::dsub_2 &&
            SubIndices[3] == ARM::dsub_3 &&
            SubIndices[4] == ARM::dsub_4 &&
            SubIndices[5] == ARM::dsub_5 &&
            SubIndices[6] == ARM::dsub_6 &&
            SubIndices[7] == ARM::dsub_7);
  } else if (NumRegs == 4) {
    if (SubIndices[0] == ARM::qsub_0) {
      // 4 Q registers -> 1 QQQQ register.
      return (Size == 512 &&
              SubIndices[1] == ARM::qsub_1 &&
              SubIndices[2] == ARM::qsub_2 &&
              SubIndices[3] == ARM::qsub_3);
    } else if (SubIndices[0] == ARM::dsub_0) {
      // 4 D registers -> 1 QQ register.
      if (Size >= 256 &&
          SubIndices[1] == ARM::dsub_1 &&
          SubIndices[2] == ARM::dsub_2 &&
          SubIndices[3] == ARM::dsub_3) {
        if (Size == 512)
          NewSubIdx = ARM::qqsub_0;
        return true;
      }
    } else if (SubIndices[0] == ARM::dsub_4) {
      // 4 D registers -> 1 QQ register (2nd).
      if (Size == 512 &&
          SubIndices[1] == ARM::dsub_5 &&
          SubIndices[2] == ARM::dsub_6 &&
          SubIndices[3] == ARM::dsub_7) {
        NewSubIdx = ARM::qqsub_1;
        return true;
      }
    } else if (SubIndices[0] == ARM::ssub_0) {
      // 4 S registers -> 1 Q register.
      if (Size >= 128 &&
          SubIndices[1] == ARM::ssub_1 &&
          SubIndices[2] == ARM::ssub_2 &&
          SubIndices[3] == ARM::ssub_3) {
        if (Size >= 256)
          NewSubIdx = ARM::qsub_0;
        return true;
      }
    }
  } else if (NumRegs == 2) {
    if (SubIndices[0] == ARM::qsub_0) {
      // 2 Q registers -> 1 QQ register.
      if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
        if (Size == 512)
          NewSubIdx = ARM::qqsub_0;
        return true;
      }
    } else if (SubIndices[0] == ARM::qsub_2) {
      // 2 Q registers -> 1 QQ register (2nd).
      if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
        NewSubIdx = ARM::qqsub_1;
        return true;
      }
    } else if (SubIndices[0] == ARM::dsub_0) {
      // 2 D registers -> 1 Q register.
      if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
        if (Size >= 256)
          NewSubIdx = ARM::qsub_0;
        return true;
      }
    } else if (SubIndices[0] == ARM::dsub_2) {
      // 2 D registers -> 1 Q register (2nd).
      if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
        NewSubIdx = ARM::qsub_1;
        return true;
      }
    } else if (SubIndices[0] == ARM::dsub_4) {
      // 2 D registers -> 1 Q register (3rd).
      if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
        NewSubIdx = ARM::qsub_2;
        return true;
      }
    } else if (SubIndices[0] == ARM::dsub_6) {
      // 2 D registers -> 1 Q register (3rd).
      if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
        NewSubIdx = ARM::qsub_3;
        return true;
      }
    } else if (SubIndices[0] == ARM::ssub_0) {
      // 2 S registers -> 1 D register.
      if (SubIndices[1] == ARM::ssub_1) {
        if (Size >= 128)
          NewSubIdx = ARM::dsub_0;
        return true;
      }
    } else if (SubIndices[0] == ARM::ssub_2) {
      // 2 S registers -> 1 D register (2nd).
      if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
        NewSubIdx = ARM::dsub_1;
        return true;
      }
    }
  }
  return false;
}
bool WebAssemblyInstrInfo::reverseBranchCondition(
    SmallVectorImpl<MachineOperand> &Cond) const {
  assert(Cond.size() == 2 && "Expected a flag and a successor block");
  Cond.front() = MachineOperand::CreateImm(!Cond.front().getImm());
  return false;
}
Esempio n. 23
0
/// The specified instruction is a non-load access of the element being
/// promoted.  See if it provides a value or refines the demanded element mask
/// used for load promotion.
void AllocOptimize::
updateAvailableValues(SILInstruction *Inst, llvm::SmallBitVector &RequiredElts,
                      SmallVectorImpl<std::pair<SILValue, unsigned>> &Result,
                      llvm::SmallBitVector &ConflictingValues) {
  // Handle store and assign.
  if (isa<StoreInst>(Inst) || isa<AssignInst>(Inst)) {
    unsigned StartSubElt = computeSubelement(Inst->getOperand(1), TheMemory);
    assert(StartSubElt != ~0U && "Store within enum projection not handled");
    SILType ValTy = Inst->getOperand(0).getType();
    
    for (unsigned i = 0, e = getNumSubElements(ValTy, Module); i != e; ++i) {
      // If this element is not required, don't fill it in.
      if (!RequiredElts[StartSubElt+i]) continue;
      
      // If there is no result computed for this subelement, record it.  If
      // there already is a result, check it for conflict.  If there is no
      // conflict, then we're ok.
      auto &Entry = Result[StartSubElt+i];
      if (Entry.first == SILValue())
        Entry = { Inst->getOperand(0), i };
      else if (Entry.first != Inst->getOperand(0) || Entry.second != i)
        ConflictingValues[StartSubElt+i] = true;
      
      // This element is now provided.
      RequiredElts[StartSubElt+i] = false;
    }
    
    return;
  }
  
  // If we get here with a copy_addr, it must be storing into the element. Check
  // to see if any loaded subelements are being used, and if so, explode the
  // copy_addr to its individual pieces.
  if (auto *CAI = dyn_cast<CopyAddrInst>(Inst)) {
    unsigned StartSubElt = computeSubelement(Inst->getOperand(1), TheMemory);
    assert(StartSubElt != ~0U && "Store within enum projection not handled");
    SILType ValTy = Inst->getOperand(1).getType();
    
    bool AnyRequired = false;
    for (unsigned i = 0, e = getNumSubElements(ValTy, Module); i != e; ++i) {
      // If this element is not required, don't fill it in.
      AnyRequired = RequiredElts[StartSubElt+i];
      if (AnyRequired) break;
    }
    
    // If this is a copy addr that doesn't intersect the loaded subelements,
    // just continue with an unmodified load mask.
    if (!AnyRequired)
      return;
    
    // If the copyaddr is of an non-loadable type, we can't promote it.  Just
    // consider it to be a clobber.
    if (CAI->getOperand(0).getType().isLoadable(Module)) {
      // Otherwise, some part of the copy_addr's value is demanded by a load, so
      // we need to explode it to its component pieces.  This only expands one
      // level of the copyaddr.
      explodeCopyAddr(CAI);
      
      // The copy_addr doesn't provide any values, but we've arranged for our
      // iterators to visit the newly generated instructions, which do.
      return;
    }
  }
  
  
  
  // TODO: inout apply's should only clobber pieces passed in.
  
  // Otherwise, this is some unknown instruction, conservatively assume that all
  // values are clobbered.
  RequiredElts.clear();
  ConflictingValues = llvm::SmallBitVector(Result.size(), true);
  return;
}
Esempio n. 24
0
bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
                                    MachineBasicBlock *&TBB,
                                    MachineBasicBlock *&FBB,
                                    SmallVectorImpl<MachineOperand> &Cond,
                                    bool AllowModify) const {
  // Start from the bottom of the block and work up, examining the
  // terminator instructions.
  MachineBasicBlock::iterator I = MBB.end();
  while (I != MBB.begin()) {
    --I;
    if (I->isDebugValue())
      continue;

    // Working from the bottom, when we see a non-terminator
    // instruction, we're done.
    if (!isUnpredicatedTerminator(I))
      break;

    // A terminator that isn't a branch can't easily be handled
    // by this analysis.
    if (!I->isBranch())
      return true;

    // Cannot handle indirect branches.
    if (I->getOpcode() == MSP430::Br ||
        I->getOpcode() == MSP430::Bm)
      return true;

    // Handle unconditional branches.
    if (I->getOpcode() == MSP430::JMP) {
      if (!AllowModify) {
        TBB = I->getOperand(0).getMBB();
        continue;
      }

      // If the block has any instructions after a JMP, delete them.
      while (std::next(I) != MBB.end())
        std::next(I)->eraseFromParent();
      Cond.clear();
      FBB = nullptr;

      // Delete the JMP if it's equivalent to a fall-through.
      if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
        TBB = nullptr;
        I->eraseFromParent();
        I = MBB.end();
        continue;
      }

      // TBB is used to indicate the unconditinal destination.
      TBB = I->getOperand(0).getMBB();
      continue;
    }

    // Handle conditional branches.
    assert(I->getOpcode() == MSP430::JCC && "Invalid conditional branch");
    MSP430CC::CondCodes BranchCode =
      static_cast<MSP430CC::CondCodes>(I->getOperand(1).getImm());
    if (BranchCode == MSP430CC::COND_INVALID)
      return true;  // Can't handle weird stuff.

    // Working from the bottom, handle the first conditional branch.
    if (Cond.empty()) {
      FBB = TBB;
      TBB = I->getOperand(0).getMBB();
      Cond.push_back(MachineOperand::CreateImm(BranchCode));
      continue;
    }

    // Handle subsequent conditional branches. Only handle the case where all
    // conditional branches branch to the same destination.
    assert(Cond.size() == 1);
    assert(TBB);

    // Only handle the case where all conditional branches branch to
    // the same destination.
    if (TBB != I->getOperand(0).getMBB())
      return true;

    MSP430CC::CondCodes OldBranchCode = (MSP430CC::CondCodes)Cond[0].getImm();
    // If the conditions are the same, we can leave them alone.
    if (OldBranchCode == BranchCode)
      continue;

    return true;
  }

  return false;
}
Esempio n. 25
0
bool GuardWideningImpl::combineRangeChecks(
    SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
    SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) {
  unsigned OldCount = Checks.size();
  while (!Checks.empty()) {
    // Pick all of the range checks with a specific base and length, and try to
    // merge them.
    Value *CurrentBase = Checks.front().getBase();
    Value *CurrentLength = Checks.front().getLength();

    SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks;

    auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
      return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
    };

    copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);
    Checks.erase(remove_if(Checks, IsCurrentCheck), Checks.end());

    assert(CurrentChecks.size() != 0 && "We know we have at least one!");

    if (CurrentChecks.size() < 3) {
      RangeChecksOut.insert(RangeChecksOut.end(), CurrentChecks.begin(),
                            CurrentChecks.end());
      continue;
    }

    // CurrentChecks.size() will typically be 3 here, but so far there has been
    // no need to hard-code that fact.

    std::sort(CurrentChecks.begin(), CurrentChecks.end(),
              [&](const GuardWideningImpl::RangeCheck &LHS,
                  const GuardWideningImpl::RangeCheck &RHS) {
      return LHS.getOffsetValue().slt(RHS.getOffsetValue());
    });

    // Note: std::sort should not invalidate the ChecksStart iterator.

    ConstantInt *MinOffset = CurrentChecks.front().getOffset(),
                *MaxOffset = CurrentChecks.back().getOffset();

    unsigned BitWidth = MaxOffset->getValue().getBitWidth();
    if ((MaxOffset->getValue() - MinOffset->getValue())
            .ugt(APInt::getSignedMinValue(BitWidth)))
      return false;

    APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
    const APInt &HighOffset = MaxOffset->getValue();
    auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {
      return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
    };

    if (MaxDiff.isMinValue() ||
        !std::all_of(std::next(CurrentChecks.begin()), CurrentChecks.end(),
                     OffsetOK))
      return false;

    // We have a series of f+1 checks as:
    //
    //   I+k_0 u< L   ... Chk_0
    //   I_k_1 u< L   ... Chk_1
    //   ...
    //   I_k_f u< L   ... Chk_(f+1)
    //
    //     with forall i in [0,f): k_f-k_i u< k_f-k_0  ... Precond_0
    //          k_f-k_0 u< INT_MIN+k_f                 ... Precond_1
    //          k_f != k_0                             ... Precond_2
    //
    // Claim:
    //   Chk_0 AND Chk_(f+1)  implies all the other checks
    //
    // Informal proof sketch:
    //
    // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
    // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
    // thus I+k_f is the greatest unsigned value in that range.
    //
    // This combined with Ckh_(f+1) shows that everything in that range is u< L.
    // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
    // lie in [I+k_0,I+k_f], this proving our claim.
    //
    // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
    // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
    // since k_0 != k_f).  In the former case, [I+k_0,I+k_f] is not a wrapping
    // range by definition, and the latter case is impossible:
    //
    //   0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
    //   xxxxxx             xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    //
    // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
    // with 'x' above) to be at least >u INT_MIN.

    RangeChecksOut.emplace_back(CurrentChecks.front());
    RangeChecksOut.emplace_back(CurrentChecks.back());
  }

  assert(RangeChecksOut.size() <= OldCount && "We pessimized!");
  return RangeChecksOut.size() != OldCount;
}
Esempio n. 26
0
static bool isReverseVectorMask(SmallVectorImpl<int> &Mask) {
  for (unsigned i = 0, MaskSize = Mask.size(); i < MaskSize; ++i)
    if (Mask[i] > 0 && Mask[i] != (int)(MaskSize - 1 - i))
      return false;
  return true;
}
Esempio n. 27
0
static void lookupInModule(Module *module, Module::AccessPathTy accessPath,
                           SmallVectorImpl<ValueDecl *> &decls,
                           ResolutionKind resolutionKind, bool canReturnEarly,
                           LazyResolver *typeResolver,
                           ModuleLookupCache &cache,
                           const DeclContext *moduleScopeContext,
                           bool respectAccessControl,
                           ArrayRef<Module::ImportedModule> extraImports,
                           CallbackTy callback) {
  assert(module);
  assert(std::none_of(extraImports.begin(), extraImports.end(),
                      [](Module::ImportedModule import) -> bool {
    return !import.second;
  }));

  ModuleLookupCache::iterator iter;
  bool isNew;
  std::tie(iter, isNew) = cache.insert({{accessPath, module}, {}});
  if (!isNew) {
    decls.append(iter->second.begin(), iter->second.end());
    return;
  }

  size_t initialCount = decls.size();

  SmallVector<ValueDecl *, 4> localDecls;
  callback(module, accessPath, localDecls);
  if (respectAccessControl) {
    auto newEndIter = std::remove_if(localDecls.begin(), localDecls.end(),
                                    [=](ValueDecl *VD) {
      if (typeResolver) {
        typeResolver->resolveAccessibility(VD);
      }
      if (!VD->hasAccessibility())
        return false;
      return !VD->isAccessibleFrom(moduleScopeContext);
    });
    localDecls.erase(newEndIter, localDecls.end());

    // This only applies to immediate imports of the top-level module.
    if (moduleScopeContext && moduleScopeContext->getParentModule() != module)
      moduleScopeContext = nullptr;
  }

  OverloadSetTy overloads;
  resolutionKind = recordImportDecls(typeResolver, decls, localDecls,
                                     overloads, resolutionKind);

  bool foundDecls = decls.size() > initialCount;
  if (!foundDecls || !canReturnEarly ||
      resolutionKind == ResolutionKind::Overloadable) {
    SmallVector<Module::ImportedModule, 8> reexports;
    module->getImportedModulesForLookup(reexports);
    assert(std::none_of(reexports.begin(), reexports.end(),
                        [](Module::ImportedModule import) -> bool {
      return !import.second;
    }));
    reexports.append(extraImports.begin(), extraImports.end());

    // Prefer scoped imports (import func Swift.max) to whole-module imports.
    SmallVector<ValueDecl *, 8> unscopedValues;
    SmallVector<ValueDecl *, 8> scopedValues;
    for (auto next : reexports) {
      // Filter any whole-module imports, and skip specific-decl imports if the
      // import path doesn't match exactly.
      Module::AccessPathTy combinedAccessPath;
      if (accessPath.empty()) {
        combinedAccessPath = next.first;
      } else if (!next.first.empty() &&
                 !Module::isSameAccessPath(next.first, accessPath)) {
        // If we ever allow importing non-top-level decls, it's possible the
        // rule above isn't what we want.
        assert(next.first.size() == 1 && "import of non-top-level decl");
        continue;
      } else {
        combinedAccessPath = accessPath;
      }

      auto &resultSet = next.first.empty() ? unscopedValues : scopedValues;
      lookupInModule<OverloadSetTy>(next.second, combinedAccessPath,
                                    resultSet, resolutionKind, canReturnEarly,
                                    typeResolver, cache, moduleScopeContext,
                                    respectAccessControl, {}, callback);
    }

    // Add the results from scoped imports.
    resolutionKind = recordImportDecls(typeResolver, decls, scopedValues,
                                       overloads, resolutionKind);

    // Add the results from unscoped imports.
    foundDecls = decls.size() > initialCount;
    if (!foundDecls || !canReturnEarly ||
        resolutionKind == ResolutionKind::Overloadable) {
      resolutionKind = recordImportDecls(typeResolver, decls, unscopedValues,
                                         overloads, resolutionKind);
    }
  }

  // Remove duplicated declarations.
  llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
  decls.erase(std::remove_if(decls.begin() + initialCount, decls.end(),
                             [&](ValueDecl *d) -> bool { 
                               return !knownDecls.insert(d).second;
                             }),
              decls.end());

  auto &cachedValues = cache[{accessPath, module}];
  cachedValues.insert(cachedValues.end(),
                      decls.begin() + initialCount,
                      decls.end());
}
bool WebAssemblyTargetLowering::CanLowerReturn(
    CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
  // WebAssembly can't currently handle returning tuples.
  return Outs.size() <= 1;
}
Esempio n. 29
0
SDValue
NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
                                        CallingConv::ID CallConv, bool isVarArg,
                                      const SmallVectorImpl<ISD::InputArg> &Ins,
                                          DebugLoc dl, SelectionDAG &DAG,
                                       SmallVectorImpl<SDValue> &InVals) const {
  MachineFunction &MF = DAG.getMachineFunction();
  const DataLayout *TD = getDataLayout();

  const Function *F = MF.getFunction();
  const AttrListPtr &PAL = F->getAttributes();

  SDValue Root = DAG.getRoot();
  std::vector<SDValue> OutChains;

  bool isKernel = llvm::isKernelFunction(*F);
  bool isABI = (nvptxSubtarget.getSmVersion() >= 20);

  std::vector<Type *> argTypes;
  std::vector<const Argument *> theArgs;
  for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
      I != E; ++I) {
    theArgs.push_back(I);
    argTypes.push_back(I->getType());
  }
  assert(argTypes.size() == Ins.size() &&
         "Ins types and function types did not match");

  int idx = 0;
  for (unsigned i=0, e=Ins.size(); i!=e; ++i, ++idx) {
    Type *Ty = argTypes[i];
    EVT ObjectVT = getValueType(Ty);
    assert(ObjectVT == Ins[i].VT &&
           "Ins type did not match function type");

    // If the kernel argument is image*_t or sampler_t, convert it to
    // a i32 constant holding the parameter position. This can later
    // matched in the AsmPrinter to output the correct mangled name.
    if (isImageOrSamplerVal(theArgs[i],
                           (theArgs[i]->getParent() ?
                               theArgs[i]->getParent()->getParent() : 0))) {
      assert(isKernel && "Only kernels can have image/sampler params");
      InVals.push_back(DAG.getConstant(i+1, MVT::i32));
      continue;
    }

    if (theArgs[i]->use_empty()) {
      // argument is dead
      InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT));
      continue;
    }

    // In the following cases, assign a node order of "idx+1"
    // to newly created nodes. The SDNOdes for params have to
    // appear in the same order as their order of appearance
    // in the original function. "idx+1" holds that order.
    if (PAL.getParamAttributes(i+1).hasAttribute(Attributes::ByVal) == false) {
      // A plain scalar.
      if (isABI || isKernel) {
        // If ABI, load from the param symbol
        SDValue Arg = getParamSymbol(DAG, idx);
        Value *srcValue = new Argument(PointerType::get(ObjectVT.getTypeForEVT(
            F->getContext()),
            llvm::ADDRESS_SPACE_PARAM));
        SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg,
                                MachinePointerInfo(srcValue), false, false,
                                false,
                                TD->getABITypeAlignment(ObjectVT.getTypeForEVT(
                                  F->getContext())));
        if (p.getNode())
          DAG.AssignOrdering(p.getNode(), idx+1);
        InVals.push_back(p);
      }
      else {
        // If no ABI, just move the param symbol
        SDValue Arg = getParamSymbol(DAG, idx, ObjectVT);
        SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
        if (p.getNode())
          DAG.AssignOrdering(p.getNode(), idx+1);
        InVals.push_back(p);
      }
      continue;
    }

    // Param has ByVal attribute
    if (isABI || isKernel) {
      // Return MoveParam(param symbol).
      // Ideally, the param symbol can be returned directly,
      // but when SDNode builder decides to use it in a CopyToReg(),
      // machine instruction fails because TargetExternalSymbol
      // (not lowered) is target dependent, and CopyToReg assumes
      // the source is lowered.
      SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
      SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
      if (p.getNode())
        DAG.AssignOrdering(p.getNode(), idx+1);
      if (isKernel)
        InVals.push_back(p);
      else {
        SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
                    DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32),
                                 p);
        InVals.push_back(p2);
      }
    } else {
      // Have to move a set of param symbols to registers and
      // store them locally and return the local pointer in InVals
      const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]);
      assert(elemPtrType &&
             "Byval parameter should be a pointer type");
      Type *elemType = elemPtrType->getElementType();
      // Compute the constituent parts
      SmallVector<EVT, 16> vtparts;
      SmallVector<uint64_t, 16> offsets;
      ComputeValueVTs(*this, elemType, vtparts, &offsets, 0);
      unsigned totalsize = 0;
      for (unsigned j=0, je=vtparts.size(); j!=je; ++j)
        totalsize += vtparts[j].getStoreSizeInBits();
      SDValue localcopy =  DAG.getFrameIndex(MF.getFrameInfo()->
                                      CreateStackObject(totalsize/8, 16, false),
                                             getPointerTy());
      unsigned sizesofar = 0;
      std::vector<SDValue> theChains;
      for (unsigned j=0, je=vtparts.size(); j!=je; ++j) {
        unsigned numElems = 1;
        if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements();
        for (unsigned k=0, ke=numElems; k!=ke; ++k) {
          EVT tmpvt = vtparts[j];
          if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType();
          SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt,
                                    getParamSymbol(DAG, idx, tmpvt));
          SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
                                    DAG.getConstant(sizesofar, getPointerTy()));
          theChains.push_back(DAG.getStore(Chain, dl, arg, addr,
                                        MachinePointerInfo(), false, false, 0));
          sizesofar += tmpvt.getStoreSizeInBits()/8;
          ++idx;
        }
      }
      --idx;
      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0],
                          theChains.size());
      InVals.push_back(localcopy);
    }
  }

  // Clang will check explicit VarArg and issue error if any. However, Clang
  // will let code with
  // implicit var arg like f() pass.
  // We treat this case as if the arg list is empty.
  //if (F.isVarArg()) {
  // assert(0 && "VarArg not supported yet!");
  //}

  if (!OutChains.empty())
    DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                            &OutChains[0], OutChains.size()));

  return Chain;
}
Esempio n. 30
0
bool swift::ArraySemanticsCall::replaceByAppendingValues(
    SILModule &M, SILFunction *AppendFn, SILFunction *ReserveFn,
    const SmallVectorImpl<SILValue> &Vals, SubstitutionMap Subs) {
  assert(getKind() == ArrayCallKind::kAppendContentsOf &&
         "Must be an append_contentsOf call");
  assert(AppendFn && "Must provide an append SILFunction");

  // We only handle loadable types.
  if (any_of(Vals, [&M](SILValue V) -> bool {
        return !V->getType().isLoadable(M);
      }))
    return false;
  
  CanSILFunctionType AppendFnTy = AppendFn->getLoweredFunctionType();
  SILValue ArrRef = SemanticsCall->getArgument(1);
  SILBuilderWithScope Builder(SemanticsCall);
  auto Loc = SemanticsCall->getLoc();
  auto *FnRef = Builder.createFunctionRefFor(Loc, AppendFn);

  if (Vals.size() > 1) {
    // Create a call to reserveCapacityForAppend() to reserve space for multiple
    // elements.
    FunctionRefBaseInst *ReserveFnRef =
        Builder.createFunctionRefFor(Loc, ReserveFn);
    SILFunctionType *ReserveFnTy =
      ReserveFnRef->getType().castTo<SILFunctionType>();
    assert(ReserveFnTy->getNumParameters() == 2);
    StructType *IntType =
      ReserveFnTy->getParameters()[0].getType()->castTo<StructType>();
    StructDecl *IntDecl = IntType->getDecl();
    VarDecl *field = *IntDecl->getStoredProperties().begin();
    SILType BuiltinIntTy =SILType::getPrimitiveObjectType(
                               field->getInterfaceType()->getCanonicalType());
    IntegerLiteralInst *CapacityLiteral =
      Builder.createIntegerLiteral(Loc, BuiltinIntTy, Vals.size());
    StructInst *Capacity = Builder.createStruct(Loc,
        SILType::getPrimitiveObjectType(CanType(IntType)), {CapacityLiteral});
    Builder.createApply(Loc, ReserveFnRef, Subs, {Capacity, ArrRef}, false);
  }

  for (SILValue V : Vals) {
    auto SubTy = V->getType();
    auto &ValLowering = Builder.getModule().getTypeLowering(SubTy);
    auto CopiedVal = ValLowering.emitCopyValue(Builder, Loc, V);
    auto *AllocStackInst = Builder.createAllocStack(Loc, SubTy);

    ValLowering.emitStoreOfCopy(Builder, Loc, CopiedVal, AllocStackInst,
                                IsInitialization_t::IsInitialization);

    SILValue Args[] = {AllocStackInst, ArrRef};
    Builder.createApply(Loc, FnRef, Subs, Args, false);
    Builder.createDeallocStack(Loc, AllocStackInst);
    if (!isConsumedParameter(AppendFnTy->getParameters()[0].getConvention())) {
      ValLowering.emitDestroyValue(Builder, Loc, CopiedVal);
    }
  }
  CanSILFunctionType AppendContentsOfFnTy =
    SemanticsCall->getReferencedFunction()->getLoweredFunctionType();
  if (AppendContentsOfFnTy->getParameters()[0].getConvention() ==
        ParameterConvention::Direct_Owned) {
    SILValue SrcArray = SemanticsCall->getArgument(0);
    Builder.createReleaseValue(SemanticsCall->getLoc(), SrcArray,
                               Builder.getDefaultAtomicity());
  }

  removeCall();

  return true;
}