示例#1
0
// At each release point, release the reaching values that have been stored to
// this address.
//
// The caller has already determined that all Stores are to the same element
// within an otherwise dead object.
static void insertReleases(ArrayRef<StoreInst*> Stores,
                           ArrayRef<SILInstruction*> ReleasePoints,
                           SILSSAUpdater &SSAUp) {
  assert(!Stores.empty());
  SILValue StVal = Stores.front()->getSrc();

  SSAUp.Initialize(StVal->getType());

  for (auto *Store : Stores)
    SSAUp.AddAvailableValue(Store->getParent(), Store->getSrc());

  SILLocation Loc = Stores[0]->getLoc();
  for (auto *RelPoint : ReleasePoints) {
    SILBuilder B(RelPoint);
    // This does not use the SSAUpdater::RewriteUse API because it does not do
    // the right thing for local uses. We have already ensured a single store
    // per block, and all release points occur after all stores. Therefore we
    // can simply ask SSAUpdater for the reaching store.
    SILValue RelVal = SSAUp.GetValueAtEndOfBlock(RelPoint->getParent());
    if (StVal->getType().isReferenceCounted(RelPoint->getModule()))
      B.createStrongRelease(Loc, RelVal, Atomicity::Atomic);
    else
      B.createReleaseValue(Loc, RelVal, Atomicity::Atomic);
  }
}
DelayedDiagnostic
DelayedDiagnostic::makeAvailability(AvailabilityResult AR,
                                    ArrayRef<SourceLocation> Locs,
                                    const NamedDecl *ReferringDecl,
                                    const NamedDecl *OffendingDecl,
                                    const ObjCInterfaceDecl *UnknownObjCClass,
                                    const ObjCPropertyDecl  *ObjCProperty,
                                    StringRef Msg,
                                    bool ObjCPropertyAccess) {
  assert(!Locs.empty());
  DelayedDiagnostic DD;
  DD.Kind = Availability;
  DD.Triggered = false;
  DD.Loc = Locs.front();
  DD.AvailabilityData.ReferringDecl = ReferringDecl;
  DD.AvailabilityData.OffendingDecl = OffendingDecl;
  DD.AvailabilityData.UnknownObjCClass = UnknownObjCClass;
  DD.AvailabilityData.ObjCProperty = ObjCProperty;
  char *MessageData = nullptr;
  if (!Msg.empty()) {
    MessageData = new char [Msg.size()];
    memcpy(MessageData, Msg.data(), Msg.size());
  }
  DD.AvailabilityData.Message = MessageData;
  DD.AvailabilityData.MessageLen = Msg.size();

  DD.AvailabilityData.SelectorLocs = new SourceLocation[Locs.size()];
  memcpy(DD.AvailabilityData.SelectorLocs, Locs.data(),
         sizeof(SourceLocation) * Locs.size());
  DD.AvailabilityData.NumSelectorLocs = Locs.size();

  DD.AvailabilityData.AR = AR;
  DD.AvailabilityData.ObjCPropertyAccess = ObjCPropertyAccess;
  return DD;
}
示例#3
0
TypeSubstitutionMap
GenericSignature::getSubstitutionMap(ArrayRef<Substitution> args) const {
  TypeSubstitutionMap subs;
  
  // An empty parameter list gives an empty map.
  if (getGenericParams().empty()) {
    assert(args.empty() && "substitutions but no generic params?!");
    return subs;
  }
  
  // Seed the type map with pre-existing substitutions.
  for (auto depTy : getAllDependentTypes()) {
    auto replacement = args.front().getReplacement();
    args = args.slice(1);
    
    if (auto subTy = depTy->getAs<SubstitutableType>()) {
      subs[subTy->getCanonicalType().getPointer()] = replacement;
    }
    else if (auto dTy = depTy->getAs<DependentMemberType>()) {
      subs[dTy->getCanonicalType().getPointer()] = replacement;
    }
  }
  
  assert(args.empty() && "did not use all substitutions?!");
  return subs;
}
示例#4
0
static void addParameters(ArrayRef<Identifier> &ArgNames,
                          const ParameterList *paramList,
                          TextEntity &Ent,
                          SourceManager &SM,
                          unsigned BufferID) {
  for (auto &param : *paramList) {
    StringRef Arg;
    if (!ArgNames.empty()) {
      Identifier Id = ArgNames.front();
      Arg = Id.empty() ? "_" : Id.str();
      ArgNames = ArgNames.slice(1);
    }

    if (auto typeRepr = param->getTypeLoc().getTypeRepr()) {
      SourceRange TypeRange = param->getTypeLoc().getSourceRange();
      if (auto InOutTyR = dyn_cast_or_null<InOutTypeRepr>(typeRepr))
        TypeRange = InOutTyR->getBase()->getSourceRange();
      if (TypeRange.isInvalid())
        continue;
      
      unsigned StartOffs = SM.getLocOffsetInBuffer(TypeRange.Start, BufferID);
      unsigned EndOffs =
        SM.getLocOffsetInBuffer(Lexer::getLocForEndOfToken(SM, TypeRange.End),
                                BufferID);
      TextRange TR{ StartOffs, EndOffs-StartOffs };
      TextEntity Param(param, Arg, TR, StartOffs);
      Ent.SubEntities.push_back(std::move(Param));
    }
  }
}
示例#5
0
文件: EhFrame.cpp 项目: envytools/lld
// Read a byte and advance D by one byte.
static uint8_t readByte(ArrayRef<uint8_t> &D) {
  if (D.empty())
    fatal("corrupted or unsupported CIE information");
  uint8_t B = D.front();
  D = D.slice(1);
  return B;
}
示例#6
0
SMLoc CTagsEmitter::locate(const Record *R) {
  ArrayRef<SMLoc> Locs = R->getLoc();
  if (Locs.empty()) {
    SMLoc NullLoc;
    return NullLoc;
  }
  return Locs.front();
}
示例#7
0
文件: EhFrame.cpp 项目: envytools/lld
// Skip an integer encoded in the LEB128 format.
// Actual number is not of interest because only the runtime needs it.
// But we need to be at least able to skip it so that we can read
// the field that follows a LEB128 number.
static void skipLeb128(ArrayRef<uint8_t> &D) {
  while (!D.empty()) {
    uint8_t Val = D.front();
    D = D.slice(1);
    if ((Val & 0x80) == 0)
      return;
  }
  fatal("corrupted or unsupported CIE information");
}
示例#8
0
static void handleFieldList(ArrayRef<uint8_t> Content,
                            SmallVectorImpl<TiReference> &Refs) {
  uint32_t Offset = 0;
  uint32_t ThisLen = 0;
  while (!Content.empty()) {
    TypeLeafKind Kind =
        static_cast<TypeLeafKind>(support::endian::read16le(Content.data()));
    switch (Kind) {
    case LF_BCLASS:
      ThisLen = handleBaseClass(Content, Offset, Refs);
      break;
    case LF_ENUMERATE:
      ThisLen = handleEnumerator(Content, Offset, Refs);
      break;
    case LF_MEMBER:
      ThisLen = handleDataMember(Content, Offset, Refs);
      break;
    case LF_METHOD:
      ThisLen = handleOverloadedMethod(Content, Offset, Refs);
      break;
    case LF_ONEMETHOD:
      ThisLen = handleOneMethod(Content, Offset, Refs);
      break;
    case LF_NESTTYPE:
      ThisLen = handleNestedType(Content, Offset, Refs);
      break;
    case LF_STMEMBER:
      ThisLen = handleStaticDataMember(Content, Offset, Refs);
      break;
    case LF_VBCLASS:
    case LF_IVBCLASS:
      ThisLen =
          handleVirtualBaseClass(Content, Offset, Kind == LF_VBCLASS, Refs);
      break;
    case LF_VFUNCTAB:
      ThisLen = handleVFPtr(Content, Offset, Refs);
      break;
    case LF_INDEX:
      ThisLen = handleListContinuation(Content, Offset, Refs);
      break;
    default:
      return;
    }
    Content = Content.drop_front(ThisLen);
    Offset += ThisLen;
    if (!Content.empty()) {
      uint8_t Pad = Content.front();
      if (Pad >= LF_PAD0) {
        uint32_t Skip = Pad & 0x0F;
        Content = Content.drop_front(Skip);
        Offset += Skip;
      }
    }
  }
}
示例#9
0
void GenericEnvironment::
getSubstitutionMap(ModuleDecl *mod,
                   GenericSignature *sig,
                   ArrayRef<Substitution> subs,
                   SubstitutionMap &result) const {
  for (auto depTy : sig->getAllDependentTypes()) {

    // Map the interface type to a context type.
    auto contextTy = depTy.subst(mod, InterfaceToArchetypeMap, SubstOptions());
    auto *archetype = contextTy->castTo<ArchetypeType>();

    auto sub = subs.front();
    subs = subs.slice(1);

    // Record the replacement type and its conformances.
    result.addSubstitution(CanType(archetype), sub.getReplacement());
    result.addConformances(CanType(archetype), sub.getConformances());
  }

  for (auto reqt : sig->getRequirements()) {
    if (reqt.getKind() != RequirementKind::SameType)
      continue;

    auto first = reqt.getFirstType()->getAs<DependentMemberType>();
    auto second = reqt.getSecondType()->getAs<DependentMemberType>();

    if (!first || !second)
      continue;

    auto archetype = mapTypeIntoContext(mod, first)->getAs<ArchetypeType>();
    if (!archetype)
      continue;

    auto firstBase = first->getBase();
    auto secondBase = second->getBase();

    auto firstBaseArchetype = mapTypeIntoContext(mod, firstBase)->getAs<ArchetypeType>();
    auto secondBaseArchetype = mapTypeIntoContext(mod, secondBase)->getAs<ArchetypeType>();

    if (!firstBaseArchetype || !secondBaseArchetype)
      continue;

    if (archetype->getParent() != firstBaseArchetype)
      result.addParent(CanType(archetype),
                       CanType(firstBaseArchetype),
                       first->getAssocType());
    if (archetype->getParent() != secondBaseArchetype)
      result.addParent(CanType(archetype),
                       CanType(secondBaseArchetype),
                       second->getAssocType());
  }

  assert(subs.empty() && "did not use all substitutions?!");
}
示例#10
0
static void getWitnessMethodSubstitutions(ApplySite AI, SILFunction *F,
                                          ArrayRef<Substitution> Subs,
                                          SmallVectorImpl<Substitution> &NewSubs) {
  auto &Module = AI.getModule();

  auto CalleeCanType = F->getLoweredFunctionType();

  ProtocolDecl *proto = nullptr;
  if (CalleeCanType->getRepresentation() ==
      SILFunctionTypeRepresentation::WitnessMethod) {
    proto = CalleeCanType->getDefaultWitnessMethodProtocol(
        *Module.getSwiftModule());
  }

  ArrayRef<Substitution> origSubs = AI.getSubstitutions();

  if (proto != nullptr) {
    // If the callee is a default witness method thunk, preserve substitutions
    // from the call site.
    NewSubs.append(origSubs.begin(), origSubs.end());
    return;
  }

  // If the callee is a concrete witness method thunk, apply substitutions
  // from the conformance, and drop any substitutions derived from the Self
  // type.
  NewSubs.append(Subs.begin(), Subs.end());

  if (auto generics = AI.getOrigCalleeType()->getGenericSignature()) {
    for (auto genericParam : generics->getAllDependentTypes()) {
      auto origSub = origSubs.front();
      origSubs = origSubs.slice(1);

      // If the callee is a concrete witness method thunk, we ignore
      // generic parameters derived from 'self', the generic parameter at
      // depth 0, index 0.
      auto type = genericParam->getCanonicalType();
      while (auto memberType = dyn_cast<DependentMemberType>(type)) {
        type = memberType.getBase();
      }
      auto paramType = cast<GenericTypeParamType>(type);
      if (paramType->getDepth() == 0) {
        // There shouldn't be any other parameters at this depth.
        assert(paramType->getIndex() == 0);
        continue;
      }

      // Okay, remember this substitution.
      NewSubs.push_back(origSub);
    }
  }

  assert(origSubs.empty() && "subs not parallel to dependent types");
}
void FrontendInputsAndOutputs::setMainAndSupplementaryOutputs(
    ArrayRef<std::string> outputFiles,
    ArrayRef<SupplementaryOutputPaths> supplementaryOutputs) {
  if (AllInputs.empty()) {
    assert(outputFiles.empty() && "Cannot have outputs without inputs");
    assert(supplementaryOutputs.empty() &&
           "Cannot have supplementary outputs without inputs");
    return;
  }
  if (hasPrimaryInputs()) {
    const auto N = primaryInputCount();
    assert(outputFiles.size() == N && "Must have one main output per primary");
    assert(supplementaryOutputs.size() == N &&
           "Must have one set of supplementary outputs per primary");
    (void)N;

    unsigned i = 0;
    for (auto &input : AllInputs) {
      if (input.isPrimary()) {
        input.setPrimarySpecificPaths(PrimarySpecificPaths(
            outputFiles[i], input.file(), supplementaryOutputs[i]));
        ++i;
      }
    }
    return;
  }
  assert(supplementaryOutputs.size() == 1 &&
         "WMO only ever produces one set of supplementary outputs");
  if (outputFiles.size() == 1) {
    AllInputs.front().setPrimarySpecificPaths(PrimarySpecificPaths(
        outputFiles.front(), firstInputProducingOutput().file(),
        supplementaryOutputs.front()));
    return;
  }
  assert(outputFiles.size() == AllInputs.size() &&
         "Multi-threaded WMO requires one main output per input");
  for (auto i : indices(AllInputs))
    AllInputs[i].setPrimarySpecificPaths(PrimarySpecificPaths(
        outputFiles[i], outputFiles[i],
        i == 0 ? supplementaryOutputs.front() : SupplementaryOutputPaths()));
}
static InitializationPtr
prepareIndirectResultInit(SILGenFunction &gen, CanType resultType,
                          ArrayRef<SILResultInfo> &allResults,
                          MutableArrayRef<SILValue> &directResults,
                          ArrayRef<SILArgument*> &indirectResultAddrs,
                          SmallVectorImpl<CleanupHandle> &cleanups) {
  // Recursively decompose tuple types.
  if (auto resultTupleType = dyn_cast<TupleType>(resultType)) {
    auto tupleInit = new TupleInitialization();
    tupleInit->SubInitializations.reserve(resultTupleType->getNumElements());

    for (auto resultEltType : resultTupleType.getElementTypes()) {
      auto eltInit = prepareIndirectResultInit(gen, resultEltType, allResults,
                                               directResults,
                                               indirectResultAddrs, cleanups);
      tupleInit->SubInitializations.push_back(std::move(eltInit));
    }

    return InitializationPtr(tupleInit);
  }

  // Okay, pull the next result off the list of results.
  auto result = allResults[0];
  allResults = allResults.slice(1);

  // If it's indirect, we should be emitting into an argument.
  if (result.isIndirect()) {
    // Pull off the next indirect result argument.
    SILValue addr = indirectResultAddrs.front();
    indirectResultAddrs = indirectResultAddrs.slice(1);

    // Create an initialization which will initialize it.
    auto &resultTL = gen.getTypeLowering(addr->getType());
    auto temporary = gen.useBufferAsTemporary(addr, resultTL);

    // Remember the cleanup that will be activated.
    auto cleanup = temporary->getInitializedCleanup();
    if (cleanup.isValid())
      cleanups.push_back(cleanup);

    return InitializationPtr(temporary.release());
  }

  // Otherwise, make an Initialization that stores the value in the
  // next element of the directResults array.
  auto init = new StoreResultInitialization(directResults[0], cleanups);
  directResults = directResults.slice(1);
  return InitializationPtr(init);
}
示例#13
0
static void PrintMessage(ArrayRef<SMLoc> Loc, SourceMgr::DiagKind Kind,
                         const Twine &Msg) {
  // Count the total number of errors printed.
  // This is used to exit with an error code if there were any errors.
  if (Kind == SourceMgr::DK_Error)
    ++ErrorsPrinted;

  SMLoc NullLoc;
  if (Loc.empty())
    Loc = NullLoc;
  SrcMgr.PrintMessage(Loc.front(), Kind, Msg);
  for (unsigned i = 1; i < Loc.size(); ++i)
    SrcMgr.PrintMessage(Loc[i], SourceMgr::DK_Note,
                        "instantiated from multiclass");
}
static void
getSubstitutionMaps(GenericParamList *context,
                    ArrayRef<Substitution> subs,
                    TypeSubstitutionMap &typeMap,
                    ArchetypeConformanceMap &conformanceMap) {
  for (auto arch : context->getAllNestedArchetypes()) {
    auto sub = subs.front();
    subs = subs.slice(1);

    // Save the conformances from the substitution so that we can substitute
    // them into substitutions that map between archetypes.
    conformanceMap[arch] = sub.getConformances();
    typeMap[arch] = sub.getReplacement();
  }
  assert(subs.empty() && "did not use all substitutions?!");
}
示例#15
0
static void addParameters(ArrayRef<Identifier> &ArgNames,
                          const Pattern *Pat,
                          TextEntity &Ent,
                          SourceManager &SM,
                          unsigned BufferID) {
  if (auto ParenPat = dyn_cast<ParenPattern>(Pat)) {
    addParameters(ArgNames, ParenPat->getSubPattern(), Ent, SM, BufferID);
    return;
  }

  if (auto Tuple = dyn_cast<TuplePattern>(Pat)) {
    for (const auto &Elt : Tuple->getElements())
      addParameters(ArgNames, Elt.getPattern(), Ent, SM, BufferID);

    return;
  }

  StringRef Arg;
  if (!ArgNames.empty()) {
    Identifier Id = ArgNames.front();
    Arg = Id.empty() ? "_" : Id.str();
    ArgNames = ArgNames.slice(1);
  }

  if (auto Typed = dyn_cast<TypedPattern>(Pat)) {
    VarDecl *VD = nullptr;
    if (auto Named = dyn_cast<NamedPattern>(Typed->getSubPattern())) {
      VD = Named->getDecl();
    }
    SourceRange TypeRange = Typed->getTypeLoc().getSourceRange();
    if (auto InOutTyR =
        dyn_cast_or_null<InOutTypeRepr>(Typed->getTypeLoc().getTypeRepr())) {
      TypeRange = InOutTyR->getBase()->getSourceRange();
    }
    if (TypeRange.isInvalid())
      return;
    unsigned StartOffs = SM.getLocOffsetInBuffer(TypeRange.Start, BufferID);
    unsigned EndOffs =
      SM.getLocOffsetInBuffer(Lexer::getLocForEndOfToken(SM, TypeRange.End),
                              BufferID);
    TextRange TR{ StartOffs, EndOffs-StartOffs };
    TextEntity Param(VD, Arg, TR, StartOffs);
    Ent.SubEntities.push_back(std::move(Param));
  }
}
示例#16
0
Size ClassLayout::getInstanceStart() const {
  ArrayRef<ElementLayout> elements = AllElements;
  while (!elements.empty()) {
    auto element = elements.front();
    elements = elements.drop_front();

    // Ignore empty elements.
    if (element.isEmpty()) {
      continue;
    } else if (element.hasByteOffset()) {
      // FIXME: assumes layout is always sequential!
      return element.getByteOffset();
    } else {
      return Size(0);
    }
  }

  // If there are no non-empty elements, just return the computed size.
  return getSize();
}
示例#17
0
static void assignRecursive(SILGenFunction &gen, SILLocation loc,
                            CanType type, ArrayRef<ManagedValue> &srcValues,
                            SILValue destAddr) {
  // Recurse into tuples.
  if (auto srcTupleType = dyn_cast<TupleType>(type)) {
    assert(destAddr->getType().castTo<TupleType>()->getNumElements()
             == srcTupleType->getNumElements());
    for (auto eltIndex : indices(srcTupleType.getElementTypes())) {
      auto eltDestAddr = gen.B.createTupleElementAddr(loc, destAddr, eltIndex);
      assignRecursive(gen, loc, srcTupleType.getElementType(eltIndex),
                      srcValues, eltDestAddr);
    }
    return;
  }

  // Otherwise, pull the front value off the list.
  auto srcValue = srcValues.front();
  srcValues = srcValues.slice(1);

  srcValue.assignInto(gen, loc, destAddr);
}
示例#18
0
/// Recursively walk into the given formal index type, expanding tuples,
/// in order to form the arguments to a subscript accessor.
static void translateIndices(SILGenFunction &gen, SILLocation loc,
                             AbstractionPattern pattern, CanType formalType,
                             ArrayRef<ManagedValue> &sourceIndices,
                             RValue &result) {
  // Expand if the pattern was a tuple.
  if (pattern.isTuple()) {
    auto formalTupleType = cast<TupleType>(formalType);
    for (auto i : indices(formalTupleType.getElementTypes())) {
      translateIndices(gen, loc, pattern.getTupleElementType(i),
                       formalTupleType.getElementType(i),
                       sourceIndices, result);
    }
    return;
  }

  assert(!sourceIndices.empty() && "ran out of elements in index!");
  ManagedValue value = sourceIndices.front();
  sourceIndices = sourceIndices.slice(1);

  // We're going to build an RValue here, so make sure we translate
  // indirect arguments to be scalar if we have a loadable type.
  if (value.getType().isAddress()) {
    auto &valueTL = gen.getTypeLowering(value.getType());
    if (!valueTL.isAddressOnly()) {
      value = gen.emitLoad(loc, value.forward(gen), valueTL,
                           SGFContext(), IsTake);
    }
  }

  // Reabstract the subscripts from the requirement pattern to the
  // formal type.
  value = gen.emitOrigToSubstValue(loc, value, pattern, formalType);

  // Invoking the accessor will expect a value of the formal type, so
  // don't reabstract to that here.

  // Add that to the result, further expanding if necessary.
  result.addElement(gen, value, formalType, loc);
}
示例#19
0
void Mapper::mapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix,
                                  bool IsOldCtorDtor,
                                  ArrayRef<Constant *> NewMembers) {
  SmallVector<Constant *, 16> Elements;
  if (InitPrefix) {
    unsigned NumElements =
        cast<ArrayType>(InitPrefix->getType())->getNumElements();
    for (unsigned I = 0; I != NumElements; ++I)
      Elements.push_back(InitPrefix->getAggregateElement(I));
  }

  PointerType *VoidPtrTy;
  Type *EltTy;
  if (IsOldCtorDtor) {
    // FIXME: This upgrade is done during linking to support the C API.  See
    // also IRLinker::linkAppendingVarProto() in IRMover.cpp.
    VoidPtrTy = Type::getInt8Ty(GV.getContext())->getPointerTo();
    auto &ST = *cast<StructType>(NewMembers.front()->getType());
    Type *Tys[3] = {ST.getElementType(0), ST.getElementType(1), VoidPtrTy};
    EltTy = StructType::get(GV.getContext(), Tys, false);
  }

  for (auto *V : NewMembers) {
    Constant *NewV;
    if (IsOldCtorDtor) {
      auto *S = cast<ConstantStruct>(V);
      auto *E1 = cast<Constant>(mapValue(S->getOperand(0)));
      auto *E2 = cast<Constant>(mapValue(S->getOperand(1)));
      Constant *Null = Constant::getNullValue(VoidPtrTy);
      NewV = ConstantStruct::get(cast<StructType>(EltTy), E1, E2, Null);
    } else {
      NewV = cast_or_null<Constant>(mapValue(V));
    }
    Elements.push_back(NewV);
  }

  GV.setInitializer(ConstantArray::get(
      cast<ArrayType>(GV.getType()->getElementType()), Elements));
}
示例#20
0
文件: Attr.cpp 项目: aisobe/swift
/// Print the short-form @available() attribute for an array of long-form
/// AvailableAttrs that can be represented in the short form.
/// For example, for:
///   @available(OSX, introduced: 10.10)
///   @available(iOS, introduced: 8.0)
/// this will print:
///   @available(OSX 10.10, iOS 8.0, *)
static void printShortFormAvailable(ArrayRef<const DeclAttribute *> Attrs,
                                    ASTPrinter &Printer,
                                    const PrintOptions &Options) {
  assert(!Attrs.empty());

  Printer << "@available(";
  auto FirstAvail = cast<AvailableAttr>(Attrs.front());
  if (Attrs.size() == 1 &&
      FirstAvail->isLanguageVersionSpecific()) {
    assert(FirstAvail->Introduced.hasValue());
    Printer << "swift "
            << FirstAvail->Introduced.getValue().getAsString()
            << ")";
  } else {
    for (auto *DA : Attrs) {
      auto *AvailAttr = cast<AvailableAttr>(DA);
      assert(AvailAttr->Introduced.hasValue());
      Printer << platformString(AvailAttr->Platform) << " "
              << AvailAttr->Introduced.getValue().getAsString() << ", ";
    }
    Printer << "*)";
  }
  Printer.printNewline();
}
示例#21
0
RValueTy CodeGenFunction::EmitArrayIntrinsic(intrinsic::FunctionKind Func,
        ArrayRef<Expr*> Arguments) {
    using namespace intrinsic;

    switch(Func) {
    case MAXLOC:
    case MINLOC:
        if(Arguments.size() == 2 &&
                Arguments[0]->getType()->asArrayType()->getDimensionCount() == 1 &&
                Arguments[1]->getType()->isIntegerType()) {
            // Vector, dim -> return scalar
            return EmitVectorDimReturningScalarArrayIntrinsic(Func,
                    Arguments.front());
        }
        llvm_unreachable("FIXME: add codegen for the rest");
        break;

    default:
        llvm_unreachable("invalid intrinsic");
        break;
    }

    return RValueTy();
}
示例#22
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;
  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dbgs() << "\tfolded:  " << LIS.getInstructionIndex(FoldMI) << '\t'
               << *FoldMI);
  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}
示例#23
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::PATCHPOINT ||
                       MI->getOpcode() == TargetOpcode::STACKMAP);

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (!SpillSubRegs && MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstrSpan MIS(MI);

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;

  // Remove LIS for any dead defs in the original MI not in FoldMI.
  for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
    if (!MO->isReg())
      continue;
    unsigned Reg = MO->getReg();
    if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
        MRI.isReserved(Reg)) {
      continue;
    }
    // Skip non-Defs, including undef uses and internal reads.
    if (MO->isUse())
      continue;
    MIBundleOperands::PhysRegInfo RI =
      MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
    if (RI.Defines)
      continue;
    // FoldMI does not define this physreg. Remove the LI segment.
    assert(MO->isDead() && "Cannot fold physreg def");
    for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) {
      if (LiveRange *LR = LIS.getCachedRegUnit(*Units)) {
        SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
        if (VNInfo *VNI = LR->getVNInfoAt(Idx))
          LR->removeValNo(VNI);
      }
    }
  }

  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // Insert any new instructions other than FoldMI into the LIS maps.
  assert(!MIS.empty() && "Unexpected empty span of instructions!");
  for (MachineBasicBlock::iterator MII = MIS.begin(), End = MIS.end();
       MII != End; ++MII)
    if (&*MII != FoldMI)
      LIS.InsertMachineInstrInMaps(&*MII);

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
                                           "folded"));

  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}
示例#24
0
/// Compute substitutions for making a direct call to a SIL function with
/// @convention(witness_method) convention.
///
/// Such functions have a substituted generic signature where the
/// abstract `Self` parameter from the original type of the protocol
/// requirement is replaced by a concrete type.
///
/// Thus, the original substitutions of the apply instruction that
/// are written in terms of the requirement's generic signature need
/// to be remapped to substitutions suitable for the witness signature.
///
/// \param conformanceRef The (possibly-specialized) conformance
/// \param requirementSig The generic signature of the requirement
/// \param witnessThunkSig The generic signature of the witness method
/// \param origSubs The substitutions from the call instruction
/// \param newSubs New substitutions are stored here
static void getWitnessMethodSubstitutions(
    SILModule &M,
    ProtocolConformanceRef conformanceRef,
    GenericSignature *requirementSig,
    GenericSignature *witnessThunkSig,
    ArrayRef<Substitution> origSubs,
    bool isDefaultWitness,
    SmallVectorImpl<Substitution> &newSubs) {

  if (witnessThunkSig == nullptr)
    return;

  assert(!conformanceRef.isAbstract());

  auto conformance = conformanceRef.getConcrete();

  // Otherwise, we need to build new caller-side substitutions
  // written in terms of the witness thunk's generic signature,
  // mapping to the archetypes of the caller.
  SubstitutionMap subMap;

  // Take apart substitutions from the conforming type.
  ArrayRef<Substitution> witnessSubs;
  auto *rootConformance = conformance->getRootNormalConformance();
  auto *witnessSig = rootConformance->getGenericSignature();
  unsigned depth = 0;
  if (isDefaultWitness) {
    // For default witnesses, we substitute all of Self.
    auto gp = witnessThunkSig->getGenericParams().front()->getCanonicalType();
    subMap.addSubstitution(gp, origSubs.front().getReplacement());
    subMap.addConformances(gp, origSubs.front().getConformances());

    // For default witnesses, innermost generic parameters are always at
    // depth 1.
    depth = 1;
  } else {
    // If `Self` maps to a bound generic type, this gives us the
    // substitutions for the concrete type's generic parameters.
    witnessSubs = getSubstitutionsForProtocolConformance(conformanceRef);

    if (!witnessSubs.empty()) {
      witnessSig->getSubstitutionMap(witnessSubs, subMap);
      depth = witnessSig->getGenericParams().back()->getDepth() + 1;
    }
  }

  // Next, take apart caller-side substitutions.
  //
  // Note that the Self-derived dependent types appearing on the left
  // hand side of the map are dropped.
  // FIXME: This won't be correct if the requirement itself adds 'Self'
  // requirements. We should be working from the substitutions in the witness.
  //
  // Also note that we rebuild the generic parameters in the requirement
  // to provide them with the required depth for the thunk itself.
  if (requirementSig->getGenericParams().back()->getDepth() > 0) {
    // Local function to replace generic parameters within the requirement
    // signature with the generic parameter we want to use in the substitution
    // map:
    //   - If the generic parameter is 'Self', return a null type so we don't
    //     add any substitution.
    //   - Otherwise, reset the generic parameter's depth one level deeper than
    //     the deepest generic parameter in the conformance.
    //
    // This local function is meant to be used with Type::transform();
    auto replaceGenericParameter = [&](Type type) -> Type {
      if (auto gp = type->getAs<GenericTypeParamType>()) {
        if (gp->getDepth() == 0) return Type();
        return GenericTypeParamType::get(depth, gp->getIndex(),
                                         M.getASTContext());
      }

      return type;
    };

    // Walk through the substitutions and dependent types.
    ArrayRef<Substitution> subs = origSubs;
    for (auto origDepTy : requirementSig->getAllDependentTypes()) {
      // Grab the next substitution.
      auto sub = subs.front();
      subs = subs.slice(1);

      // Map the generic parameters in the dependent type into the witness
      // thunk's depth.
      auto mappedDepTy = origDepTy.transform(replaceGenericParameter);

      // If the dependent type was rooted in 'Self', it will come out null;
      // skip it.
      if (!mappedDepTy) continue;

      // Otherwise, record the replacement and conformances for the mapped
      // type.
      auto canTy = mappedDepTy->getCanonicalType();
      subMap.addSubstitution(canTy, sub.getReplacement());
      subMap.addConformances(canTy, sub.getConformances());
    }
    assert(subs.empty() && "Did not consume all substitutions");
  }

  // Now, apply both sets of substitutions computed above to the
  // forwarding substitutions of the witness thunk.
  witnessThunkSig->getSubstitutions(subMap, newSubs);
}
示例#25
0
//
// runMCDesc - Print out MC register descriptions.
//
void
RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
                               CodeGenRegBank &RegBank) {
  EmitSourceFileHeader("MC Register Information", OS);

  OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
  OS << "#undef GET_REGINFO_MC_DESC\n";

  const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();

  // The lists of sub-registers, super-registers, and overlaps all go in the
  // same array. That allows us to share suffixes.
  typedef std::vector<const CodeGenRegister*> RegVec;
  SmallVector<RegVec, 4> SubRegLists(Regs.size());
  SmallVector<RegVec, 4> OverlapLists(Regs.size());
  SequenceToOffsetTable<RegVec, CodeGenRegister::Less> RegSeqs;

  // Differentially encoded lists.
  SequenceToOffsetTable<DiffVec> DiffSeqs;
  SmallVector<DiffVec, 4> RegUnitLists(Regs.size());
  SmallVector<unsigned, 4> RegUnitInitScale(Regs.size());

  SequenceToOffsetTable<std::string> RegStrings;

  // Precompute register lists for the SequenceToOffsetTable.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];

    RegStrings.add(Reg->getName());

    // Compute the ordered sub-register list.
    SetVector<const CodeGenRegister*> SR;
    Reg->addSubRegsPreOrder(SR, RegBank);
    RegVec &SubRegList = SubRegLists[i];
    SubRegList.assign(SR.begin(), SR.end());
    RegSeqs.add(SubRegList);

    // Super-registers are already computed.
    const RegVec &SuperRegList = Reg->getSuperRegs();
    RegSeqs.add(SuperRegList);

    // The list of overlaps doesn't need to have any particular order, except
    // Reg itself must be the first element. Pick an ordering that has one of
    // the other lists as a suffix.
    RegVec &OverlapList = OverlapLists[i];
    const RegVec &Suffix = SubRegList.size() > SuperRegList.size() ?
                           SubRegList : SuperRegList;
    CodeGenRegister::Set Omit(Suffix.begin(), Suffix.end());

    // First element is Reg itself.
    OverlapList.push_back(Reg);
    Omit.insert(Reg);

    // Any elements not in Suffix.
    CodeGenRegister::Set OSet;
    Reg->computeOverlaps(OSet, RegBank);
    std::set_difference(OSet.begin(), OSet.end(),
                        Omit.begin(), Omit.end(),
                        std::back_inserter(OverlapList),
                        CodeGenRegister::Less());

    // Finally, Suffix itself.
    OverlapList.insert(OverlapList.end(), Suffix.begin(), Suffix.end());
    RegSeqs.add(OverlapList);

    // Differentially encode the register unit list, seeded by register number.
    // First compute a scale factor that allows more diff-lists to be reused:
    //
    //   D0 -> (S0, S1)
    //   D1 -> (S2, S3)
    //
    // A scale factor of 2 allows D0 and D1 to share a diff-list. The initial
    // value for the differential decoder is the register number multiplied by
    // the scale.
    //
    // Check the neighboring registers for arithmetic progressions.
    unsigned ScaleA = ~0u, ScaleB = ~0u;
    ArrayRef<unsigned> RUs = Reg->getNativeRegUnits();
    if (i > 0 && Regs[i-1]->getNativeRegUnits().size() == RUs.size())
      ScaleB = RUs.front() - Regs[i-1]->getNativeRegUnits().front();
    if (i+1 != Regs.size() &&
        Regs[i+1]->getNativeRegUnits().size() == RUs.size())
      ScaleA = Regs[i+1]->getNativeRegUnits().front() - RUs.front();
    unsigned Scale = std::min(ScaleB, ScaleA);
    // Default the scale to 0 if it can't be encoded in 4 bits.
    if (Scale >= 16)
      Scale = 0;
    RegUnitInitScale[i] = Scale;
    DiffSeqs.add(diffEncode(RegUnitLists[i], Scale * Reg->EnumValue, RUs));
  }

  // Compute the final layout of the sequence table.
  RegSeqs.layout();
  DiffSeqs.layout();

  OS << "namespace llvm {\n\n";

  const std::string &TargetName = Target.getName();

  // Emit the shared table of register lists.
  OS << "extern const uint16_t " << TargetName << "RegLists[] = {\n";
  RegSeqs.emit(OS, printRegister);
  OS << "};\n\n";

  // Emit the shared table of differential lists.
  OS << "extern const uint16_t " << TargetName << "RegDiffLists[] = {\n";
  DiffSeqs.emit(OS, printDiff16);
  OS << "};\n\n";

  // Emit the string table.
  RegStrings.layout();
  OS << "extern const char " << TargetName << "RegStrings[] = {\n";
  RegStrings.emit(OS, printChar);
  OS << "};\n\n";

  OS << "extern const MCRegisterDesc " << TargetName
     << "RegDesc[] = { // Descriptors\n";
  OS << "  { " << RegStrings.get("") << ", 0, 0, 0, 0 },\n";

  // Emit the register descriptors now.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];
    OS << "  { " << RegStrings.get(Reg->getName()) << ", "
       << RegSeqs.get(OverlapLists[i]) << ", "
       << RegSeqs.get(SubRegLists[i]) << ", "
       << RegSeqs.get(Reg->getSuperRegs()) << ", "
       << (DiffSeqs.get(RegUnitLists[i])*16 + RegUnitInitScale[i]) << " },\n";
  }
  OS << "};\n\n";      // End of register descriptors...

  // Emit the table of register unit roots. Each regunit has one or two root
  // registers.
  OS << "extern const uint16_t " << TargetName << "RegUnitRoots[][2] = {\n";
  for (unsigned i = 0, e = RegBank.getNumNativeRegUnits(); i != e; ++i) {
    ArrayRef<const CodeGenRegister*> Roots = RegBank.getRegUnit(i).getRoots();
    assert(!Roots.empty() && "All regunits must have a root register.");
    assert(Roots.size() <= 2 && "More than two roots not supported yet.");
    OS << "  { " << getQualifiedName(Roots.front()->TheDef);
    for (unsigned r = 1; r != Roots.size(); ++r)
      OS << ", " << getQualifiedName(Roots[r]->TheDef);
    OS << " },\n";
  }
  OS << "};\n\n";

  ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();

  // Loop over all of the register classes... emitting each one.
  OS << "namespace {     // Register classes...\n";

  // Emit the register enum value arrays for each RegisterClass
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];
    ArrayRef<Record*> Order = RC.getOrder();

    // Give the register class a legal C name if it's anonymous.
    std::string Name = RC.getName();

    // Emit the register list now.
    OS << "  // " << Name << " Register Class...\n"
       << "  const uint16_t " << Name
       << "[] = {\n    ";
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      OS << getQualifiedName(Reg) << ", ";
    }
    OS << "\n  };\n\n";

    OS << "  // " << Name << " Bit set.\n"
       << "  const uint8_t " << Name
       << "Bits[] = {\n    ";
    BitVectorEmitter BVE;
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
    }
    BVE.print(OS);
    OS << "\n  };\n\n";

  }
  OS << "}\n\n";

  OS << "extern const MCRegisterClass " << TargetName
     << "MCRegisterClasses[] = {\n";

  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];

    // Asserts to make sure values will fit in table assuming types from
    // MCRegisterInfo.h
    assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
    assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
    assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");

    OS << "  { " << '\"' << RC.getName() << "\", "
       << RC.getName() << ", " << RC.getName() << "Bits, "
       << RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
       << RC.getQualifiedName() + "RegClassID" << ", "
       << RC.SpillSize/8 << ", "
       << RC.SpillAlignment/8 << ", "
       << RC.CopyCost << ", "
       << RC.Allocatable << " },\n";
  }

  OS << "};\n\n";

  // Emit the data table for getSubReg().
  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
  if (SubRegIndices.size()) {
    OS << "const uint16_t " << TargetName << "SubRegTable[]["
       << SubRegIndices.size() << "] = {\n";
    for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
      const CodeGenRegister::SubRegMap &SRM = Regs[i]->getSubRegs();
      OS << "  /* " << Regs[i]->TheDef->getName() << " */\n";
      if (SRM.empty()) {
        OS << "  {0},\n";
        continue;
      }
      OS << "  {";
      for (unsigned j = 0, je = SubRegIndices.size(); j != je; ++j) {
        // FIXME: We really should keep this to 80 columns...
        CodeGenRegister::SubRegMap::const_iterator SubReg =
          SRM.find(SubRegIndices[j]);
        if (SubReg != SRM.end())
          OS << getQualifiedName(SubReg->second->TheDef);
        else
          OS << "0";
        if (j != je - 1)
          OS << ", ";
      }
      OS << "}" << (i != e ? "," : "") << "\n";
    }
    OS << "};\n\n";
    OS << "const uint16_t *get" << TargetName
       << "SubRegTable() {\n  return (const uint16_t *)" << TargetName
       << "SubRegTable;\n}\n\n";
  }

  EmitRegMappingTables(OS, Regs, false);

  // Emit Reg encoding table
  OS << "extern const uint16_t " << TargetName;
  OS << "RegEncodingTable[] = {\n";
  // Add entry for NoRegister
  OS << "  0,\n";
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    Record *Reg = Regs[i]->TheDef;
    BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
    uint64_t Value = 0;
    for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
      if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(b)))
      Value |= (uint64_t)B->getValue() << b;
    }
    OS << "  " << Value << ",\n";
  }
  OS << "};\n";       // End of HW encoding table

  // MCRegisterInfo initialization routine.
  OS << "static inline void Init" << TargetName
     << "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
     << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0) {\n";
  OS << "  RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
     << Regs.size()+1 << ", RA, " << TargetName << "MCRegisterClasses, "
     << RegisterClasses.size() << ", "
     << TargetName << "RegUnitRoots, "
     << RegBank.getNumNativeRegUnits() << ", "
     << TargetName << "RegLists, "
     << TargetName << "RegDiffLists, "
     << TargetName << "RegStrings, ";
  if (SubRegIndices.size() != 0)
    OS << "(uint16_t*)" << TargetName << "SubRegTable, "
       << SubRegIndices.size() << ",\n";
  else
    OS << "NULL, 0,\n";

  OS << "  " << TargetName << "RegEncodingTable);\n\n";

  EmitRegMapping(OS, Regs, false);

  OS << "}\n\n";

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_MC_DESC\n\n";
}
示例#26
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;

  // Remove LIS for any dead defs in the original MI not in FoldMI.
  for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
    if (!MO->isReg())
      continue;
    unsigned Reg = MO->getReg();
    if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
        MRI.isReserved(Reg)) {
      continue;
    }
    MIBundleOperands::PhysRegInfo RI =
      MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
    if (MO->readsReg()) {
      assert(RI.Reads && "Cannot fold physreg reader");
      continue;
    }
    if (RI.Defines)
      continue;
    // FoldMI does not define this physreg. Remove the LI segment.
    assert(MO->isDead() && "Cannot fold physreg def");
    for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) {
      if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) {
        SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
        if (VNInfo *VNI = LI->getVNInfoAt(Idx))
          LI->removeValNo(VNI);
      }
    }
  }
  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dbgs() << "\tfolded:  " << LIS.getInstructionIndex(FoldMI) << '\t'
               << *FoldMI);
  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}
示例#27
0
SMLoc CTagsEmitter::locate(const Record *R) {
  ArrayRef<SMLoc> Locs = R->getLoc();
  return !Locs.empty() ? Locs.front() : SMLoc();
}
//
// runMCDesc - Print out MC register descriptions.
//
void
RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
                               CodeGenRegBank &RegBank) {
  emitSourceFileHeader("MC Register Information", OS);

  OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
  OS << "#undef GET_REGINFO_MC_DESC\n";

  const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();

  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
  // The lists of sub-registers and super-registers go in the same array.  That
  // allows us to share suffixes.
  typedef std::vector<const CodeGenRegister*> RegVec;

  // Differentially encoded lists.
  SequenceToOffsetTable<DiffVec> DiffSeqs;
  SmallVector<DiffVec, 4> SubRegLists(Regs.size());
  SmallVector<DiffVec, 4> SuperRegLists(Regs.size());
  SmallVector<DiffVec, 4> RegUnitLists(Regs.size());
  SmallVector<unsigned, 4> RegUnitInitScale(Regs.size());

  // Keep track of sub-register names as well. These are not differentially
  // encoded.
  typedef SmallVector<const CodeGenSubRegIndex*, 4> SubRegIdxVec;
  SequenceToOffsetTable<SubRegIdxVec> SubRegIdxSeqs;
  SmallVector<SubRegIdxVec, 4> SubRegIdxLists(Regs.size());

  SequenceToOffsetTable<std::string> RegStrings;

  // Precompute register lists for the SequenceToOffsetTable.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];

    RegStrings.add(Reg->getName());

    // Compute the ordered sub-register list.
    SetVector<const CodeGenRegister*> SR;
    Reg->addSubRegsPreOrder(SR, RegBank);
    diffEncode(SubRegLists[i], Reg->EnumValue, SR.begin(), SR.end());
    DiffSeqs.add(SubRegLists[i]);

    // Compute the corresponding sub-register indexes.
    SubRegIdxVec &SRIs = SubRegIdxLists[i];
    for (unsigned j = 0, je = SR.size(); j != je; ++j)
      SRIs.push_back(Reg->getSubRegIndex(SR[j]));
    SubRegIdxSeqs.add(SRIs);

    // Super-registers are already computed.
    const RegVec &SuperRegList = Reg->getSuperRegs();
    diffEncode(SuperRegLists[i], Reg->EnumValue,
               SuperRegList.begin(), SuperRegList.end());
    DiffSeqs.add(SuperRegLists[i]);

    // Differentially encode the register unit list, seeded by register number.
    // First compute a scale factor that allows more diff-lists to be reused:
    //
    //   D0 -> (S0, S1)
    //   D1 -> (S2, S3)
    //
    // A scale factor of 2 allows D0 and D1 to share a diff-list. The initial
    // value for the differential decoder is the register number multiplied by
    // the scale.
    //
    // Check the neighboring registers for arithmetic progressions.
    unsigned ScaleA = ~0u, ScaleB = ~0u;
    ArrayRef<unsigned> RUs = Reg->getNativeRegUnits();
    if (i > 0 && Regs[i-1]->getNativeRegUnits().size() == RUs.size())
      ScaleB = RUs.front() - Regs[i-1]->getNativeRegUnits().front();
    if (i+1 != Regs.size() &&
        Regs[i+1]->getNativeRegUnits().size() == RUs.size())
      ScaleA = Regs[i+1]->getNativeRegUnits().front() - RUs.front();
    unsigned Scale = std::min(ScaleB, ScaleA);
    // Default the scale to 0 if it can't be encoded in 4 bits.
    if (Scale >= 16)
      Scale = 0;
    RegUnitInitScale[i] = Scale;
    DiffSeqs.add(diffEncode(RegUnitLists[i], Scale * Reg->EnumValue, RUs));
  }

  // Compute the final layout of the sequence table.
  DiffSeqs.layout();
  SubRegIdxSeqs.layout();

  OS << "namespace llvm {\n\n";

  const std::string &TargetName = Target.getName();

  // Emit the shared table of differential lists.
  OS << "extern const MCPhysReg " << TargetName << "RegDiffLists[] = {\n";
  DiffSeqs.emit(OS, printDiff16);
  OS << "};\n\n";

  // Emit the table of sub-register indexes.
  OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[] = {\n";
  SubRegIdxSeqs.emit(OS, printSubRegIndex);
  OS << "};\n\n";

  // Emit the table of sub-register index sizes.
  OS << "extern const MCRegisterInfo::SubRegCoveredBits "
     << TargetName << "SubRegIdxRanges[] = {\n";
  OS << "  { " << (uint16_t)-1 << ", " << (uint16_t)-1 << " },\n";
  for (ArrayRef<CodeGenSubRegIndex*>::const_iterator
         SRI = SubRegIndices.begin(), SRE = SubRegIndices.end();
         SRI != SRE; ++SRI) {
    OS << "  { " << (*SRI)->Offset << ", "
                 << (*SRI)->Size
       << " },\t// " << (*SRI)->getName() << "\n";
  }
  OS << "};\n\n";

  // Emit the string table.
  RegStrings.layout();
  OS << "extern const char " << TargetName << "RegStrings[] = {\n";
  RegStrings.emit(OS, printChar);
  OS << "};\n\n";

  OS << "extern const MCRegisterDesc " << TargetName
     << "RegDesc[] = { // Descriptors\n";
  OS << "  { " << RegStrings.get("") << ", 0, 0, 0, 0 },\n";

  // Emit the register descriptors now.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];
    OS << "  { " << RegStrings.get(Reg->getName()) << ", "
       << DiffSeqs.get(SubRegLists[i]) << ", "
       << DiffSeqs.get(SuperRegLists[i]) << ", "
       << SubRegIdxSeqs.get(SubRegIdxLists[i]) << ", "
       << (DiffSeqs.get(RegUnitLists[i])*16 + RegUnitInitScale[i]) << " },\n";
  }
  OS << "};\n\n";      // End of register descriptors...

  // Emit the table of register unit roots. Each regunit has one or two root
  // registers.
  OS << "extern const uint16_t " << TargetName << "RegUnitRoots[][2] = {\n";
  for (unsigned i = 0, e = RegBank.getNumNativeRegUnits(); i != e; ++i) {
    ArrayRef<const CodeGenRegister*> Roots = RegBank.getRegUnit(i).getRoots();
    assert(!Roots.empty() && "All regunits must have a root register.");
    assert(Roots.size() <= 2 && "More than two roots not supported yet.");
    OS << "  { " << getQualifiedName(Roots.front()->TheDef);
    for (unsigned r = 1; r != Roots.size(); ++r)
      OS << ", " << getQualifiedName(Roots[r]->TheDef);
    OS << " },\n";
  }
  OS << "};\n\n";

  ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();

  // Loop over all of the register classes... emitting each one.
  OS << "namespace {     // Register classes...\n";

  // Emit the register enum value arrays for each RegisterClass
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];
    ArrayRef<Record*> Order = RC.getOrder();

    // Give the register class a legal C name if it's anonymous.
    std::string Name = RC.getName();

    // Emit the register list now.
    OS << "  // " << Name << " Register Class...\n"
       << "  const uint16_t " << Name
       << "[] = {\n    ";
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      OS << getQualifiedName(Reg) << ", ";
    }
    OS << "\n  };\n\n";

    OS << "  // " << Name << " Bit set.\n"
       << "  const uint8_t " << Name
       << "Bits[] = {\n    ";
    BitVectorEmitter BVE;
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
    }
    BVE.print(OS);
    OS << "\n  };\n\n";

  }
  OS << "}\n\n";

  OS << "extern const MCRegisterClass " << TargetName
     << "MCRegisterClasses[] = {\n";

  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];

    // Asserts to make sure values will fit in table assuming types from
    // MCRegisterInfo.h
    assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
    assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
    assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");

    OS << "  { " << '\"' << RC.getName() << "\", "
       << RC.getName() << ", " << RC.getName() << "Bits, "
       << RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
       << RC.getQualifiedName() + "RegClassID" << ", "
       << RC.SpillSize/8 << ", "
       << RC.SpillAlignment/8 << ", "
       << RC.CopyCost << ", "
       << RC.Allocatable << " },\n";
  }

  OS << "};\n\n";

  EmitRegMappingTables(OS, Regs, false);

  // Emit Reg encoding table
  OS << "extern const uint16_t " << TargetName;
  OS << "RegEncodingTable[] = {\n";
  // Add entry for NoRegister
  OS << "  0,\n";
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    Record *Reg = Regs[i]->TheDef;
    BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
    uint64_t Value = 0;
    for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
      if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
      Value |= (uint64_t)B->getValue() << b;
    }
    OS << "  " << Value << ",\n";
  }
  OS << "};\n";       // End of HW encoding table

  // MCRegisterInfo initialization routine.
  OS << "static inline void Init" << TargetName
     << "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
     << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0, unsigned PC = 0) {\n"
     << "  RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
     << Regs.size()+1 << ", RA, PC, " << TargetName << "MCRegisterClasses, "
     << RegisterClasses.size() << ", "
     << TargetName << "RegUnitRoots, "
     << RegBank.getNumNativeRegUnits() << ", "
     << TargetName << "RegDiffLists, "
     << TargetName << "RegStrings, "
     << TargetName << "SubRegIdxLists, "
     << (SubRegIndices.size() + 1) << ",\n"
     << TargetName << "SubRegIdxRanges, "
     << "  " << TargetName << "RegEncodingTable);\n\n";

  EmitRegMapping(OS, Regs, false);

  OS << "}\n\n";

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_MC_DESC\n\n";
}
示例#29
0
/// Generate a new apply of a function_ref to replace an apply of a
/// witness_method when we've determined the actual function we'll end
/// up calling.
static ApplySite devirtualizeWitnessMethod(ApplySite AI, SILFunction *F,
                                           ArrayRef<Substitution> Subs) {
  // We know the witness thunk and the corresponding set of substitutions
  // required to invoke the protocol method at this point.
  auto &Module = AI.getModule();

  // Collect all the required substitutions.
  //
  // The complete set of substitutions may be different, e.g. because the found
  // witness thunk F may have been created by a specialization pass and have
  // additional generic parameters.
  SmallVector<Substitution, 16> NewSubstList(Subs.begin(), Subs.end());
  if (auto generics = AI.getOrigCalleeType()->getGenericSignature()) {
    ArrayRef<Substitution> origSubs = AI.getSubstitutions();
    for (auto genericParam : generics->getAllDependentTypes()) {
      auto origSub = origSubs.front();
      origSubs = origSubs.slice(1);

      // Ignore generic parameters derived from 'self', the generic
      // parameter at depth 0, index 0.
      auto type = genericParam->getCanonicalType();
      while (auto memberType = dyn_cast<DependentMemberType>(type)) {
        type = memberType.getBase();
      }
      auto paramType = cast<GenericTypeParamType>(type);
      if (paramType->getDepth() == 0) {
        // There shouldn't be any other parameters at this depth.
        assert(paramType->getIndex() == 0);
        continue;
      }

      // Okay, remember this substitution.
      NewSubstList.push_back(origSub);
    }

    assert(origSubs.empty() && "subs not parallel to dependent types");
  }

  // Figure out the exact bound type of the function to be called by
  // applying all substitutions.
  auto CalleeCanType = F->getLoweredFunctionType();
  auto SubstCalleeCanType = CalleeCanType->substGenericArgs(
    Module, Module.getSwiftModule(), NewSubstList);

  // Collect arguments from the apply instruction.
  auto Arguments = SmallVector<SILValue, 4>();

  // Iterate over the non self arguments and add them to the
  // new argument list, upcasting when required.
  SILBuilderWithScope B(AI.getInstruction());
  for (unsigned ArgN = 0, ArgE = AI.getNumArguments(); ArgN != ArgE; ++ArgN) {
    SILValue A = AI.getArgument(ArgN);
    auto ParamType = SubstCalleeCanType->getSILArgumentType(
      SubstCalleeCanType->getNumSILArguments() - AI.getNumArguments() + ArgN);
    if (A->getType() != ParamType)
      A = B.createUpcast(AI.getLoc(), A, ParamType);

    Arguments.push_back(A);
  }

  // Replace old apply instruction by a new apply instruction that invokes
  // the witness thunk.
  SILBuilderWithScope Builder(AI.getInstruction());
  SILLocation Loc = AI.getLoc();
  FunctionRefInst *FRI = Builder.createFunctionRef(Loc, F);

  auto SubstCalleeSILType = SILType::getPrimitiveObjectType(SubstCalleeCanType);
  auto ResultSILType = SubstCalleeCanType->getSILResult();
  ApplySite SAI;

  if (auto *A = dyn_cast<ApplyInst>(AI))
    SAI = Builder.createApply(Loc, FRI, SubstCalleeSILType,
                              ResultSILType, NewSubstList, Arguments,
                              A->isNonThrowing());
  if (auto *TAI = dyn_cast<TryApplyInst>(AI))
    SAI = Builder.createTryApply(Loc, FRI, SubstCalleeSILType,
                                 NewSubstList, Arguments,
                                 TAI->getNormalBB(), TAI->getErrorBB());
  if (auto *PAI = dyn_cast<PartialApplyInst>(AI))
    SAI = Builder.createPartialApply(Loc, FRI, SubstCalleeSILType,
                                     NewSubstList, Arguments, PAI->getType());

  NumWitnessDevirt++;
  return SAI;
}
示例#30
0
void GenericEnvironment::
getSubstitutionMap(ModuleDecl *mod,
                   ArrayRef<Substitution> subs,
                   SubstitutionMap &result) const {
  for (auto depTy : getGenericSignature()->getAllDependentTypes()) {

    // Map the interface type to a context type.
    auto contextTy = depTy.subst(QueryInterfaceTypeSubstitutions(this),
                                 LookUpConformanceInModule(mod),
                                 SubstOptions());

    auto sub = subs.front();
    subs = subs.slice(1);

    // Record the replacement type and its conformances.
    if (auto *archetype = contextTy->getAs<ArchetypeType>()) {
      result.addSubstitution(CanArchetypeType(archetype), sub.getReplacement());
      for (auto conformance : sub.getConformances())
        result.addConformance(CanType(archetype), conformance);
      continue;
    }

    assert(contextTy->is<ErrorType>());
  }

  for (auto reqt : getGenericSignature()->getRequirements()) {
    if (reqt.getKind() != RequirementKind::SameType)
      continue;

    auto first = reqt.getFirstType();
    auto second = reqt.getSecondType();

    auto archetype = mapTypeIntoContext(mod, first)->getAs<ArchetypeType>();
    if (!archetype)
      continue;

#ifndef NDEBUG
    auto secondArchetype = mapTypeIntoContext(mod, second)->getAs<ArchetypeType>();
    assert(secondArchetype == archetype);
#endif

    if (auto *firstMemTy = first->getAs<DependentMemberType>()) {
      auto parent = mapTypeIntoContext(mod, firstMemTy->getBase())
          ->getAs<ArchetypeType>();
      if (parent && archetype->getParent() != parent) {
        result.addParent(CanType(archetype),
                         CanType(parent),
                         firstMemTy->getAssocType());
      }
    }

    if (auto *secondMemTy = second->getAs<DependentMemberType>()) {
      auto parent = mapTypeIntoContext(mod, secondMemTy->getBase())
          ->getAs<ArchetypeType>();
      if (parent && archetype->getParent() != parent) {
        result.addParent(CanType(archetype),
                         CanType(parent),
                         secondMemTy->getAssocType());
      }
    }
  }

  assert(subs.empty() && "did not use all substitutions?!");
}