Exemplo n.º 1
0
void
irgen::emitTypeLayoutVerifier(IRGenFunction &IGF,
                              ArrayRef<CanType> formalTypes) {
  llvm::Type *verifierArgTys[] = {
    IGF.IGM.TypeMetadataPtrTy,
    IGF.IGM.Int8PtrTy,
    IGF.IGM.Int8PtrTy,
    IGF.IGM.SizeTy,
    IGF.IGM.Int8PtrTy,
  };
  auto verifierFnTy = llvm::FunctionType::get(IGF.IGM.VoidTy,
                                              verifierArgTys,
                                              /*var arg*/ false);
  auto verifierFn = IGF.IGM.Module.getOrInsertFunction(
                                       "_swift_debug_verifyTypeLayoutAttribute",
                                       verifierFnTy);
  struct VerifierArgumentBuffers {
    Address runtimeBuf, staticBuf;
  };
  llvm::DenseMap<llvm::Type *, VerifierArgumentBuffers>
    verifierArgBufs;
  
  auto getSizeConstant = [&](Size sz) -> llvm::Constant * {
    return llvm::ConstantInt::get(IGF.IGM.SizeTy, sz.getValue());
  };
  auto getAlignmentMaskConstant = [&](Alignment a) -> llvm::Constant * {
    return llvm::ConstantInt::get(IGF.IGM.SizeTy, a.getValue() - 1);
  };
  auto getBoolConstant = [&](bool b) -> llvm::Constant * {
    return llvm::ConstantInt::get(IGF.IGM.Int1Ty, b);
  };

  SmallString<20> numberBuf;

  for (auto formalType : formalTypes) {
    // Runtime type metadata always represents the maximal abstraction level of
    // the type.
    auto anyTy = ProtocolCompositionType::get(IGF.IGM.Context, {});
    auto openedAnyTy = ArchetypeType::getOpened(anyTy);
    auto maxAbstraction = AbstractionPattern(openedAnyTy);
    auto &ti = IGF.getTypeInfoForUnlowered(maxAbstraction, formalType);
    
    // If there's no fixed type info, we rely on the runtime anyway, so there's
    // nothing to verify.
    // TODO: There are some traits of partially-fixed layouts we could check too.
    auto *fixedTI = dyn_cast<FixedTypeInfo>(&ti);
    if (!fixedTI)
      return;
    
    auto metadata = IGF.emitTypeMetadataRef(formalType);
    
    auto verify = [&](llvm::Value *runtimeVal,
                      llvm::Value *staticVal,
                      const llvm::Twine &description) {
      assert(runtimeVal->getType() == staticVal->getType());
      // Get or create buffers for the arguments.
      VerifierArgumentBuffers bufs;
      auto foundBufs = verifierArgBufs.find(runtimeVal->getType());
      if (foundBufs != verifierArgBufs.end()) {
        bufs = foundBufs->second;
      } else {
        Address runtimeBuf = IGF.createAlloca(runtimeVal->getType(),
                                              IGF.IGM.getPointerAlignment(),
                                              "runtime");
        Address staticBuf = IGF.createAlloca(staticVal->getType(),
                                             IGF.IGM.getPointerAlignment(),
                                             "static");
        bufs = {runtimeBuf, staticBuf};
        verifierArgBufs[runtimeVal->getType()] = bufs;
      }
      
      IGF.Builder.CreateStore(runtimeVal, bufs.runtimeBuf);
      IGF.Builder.CreateStore(staticVal, bufs.staticBuf);
      
      auto runtimePtr = IGF.Builder.CreateBitCast(bufs.runtimeBuf.getAddress(),
                                                  IGF.IGM.Int8PtrTy);
      auto staticPtr = IGF.Builder.CreateBitCast(bufs.staticBuf.getAddress(),
                                                 IGF.IGM.Int8PtrTy);
      auto count = llvm::ConstantInt::get(IGF.IGM.SizeTy,
                    IGF.IGM.DataLayout.getTypeStoreSize(runtimeVal->getType()));
      auto msg
        = IGF.IGM.getAddrOfGlobalString(description.str());
      
      IGF.Builder.CreateCall(
          verifierFn, {metadata, runtimePtr, staticPtr, count, msg});
    };

    // Check that the fixed layout matches the runtime layout.
    SILType layoutType = SILType::getPrimitiveObjectType(formalType);
    verify(emitLoadOfSize(IGF, layoutType),
           getSizeConstant(fixedTI->getFixedSize()),
           "size");
    verify(emitLoadOfAlignmentMask(IGF, layoutType),
           getAlignmentMaskConstant(fixedTI->getFixedAlignment()),
           "alignment mask");
    verify(emitLoadOfStride(IGF, layoutType),
           getSizeConstant(fixedTI->getFixedStride()),
           "stride");
    verify(emitLoadOfIsInline(IGF, layoutType),
           getBoolConstant(fixedTI->getFixedPacking(IGF.IGM)
                             == FixedPacking::OffsetZero),
           "is-inline bit");
    verify(emitLoadOfIsPOD(IGF, layoutType),
           getBoolConstant(fixedTI->isPOD(ResilienceScope::Component)),
           "is-POD bit");
    verify(emitLoadOfIsBitwiseTakable(IGF, layoutType),
           getBoolConstant(fixedTI->isBitwiseTakable(ResilienceScope::Component)),
           "is-bitwise-takable bit");
    unsigned xiCount = fixedTI->getFixedExtraInhabitantCount(IGF.IGM);
    verify(emitLoadOfHasExtraInhabitants(IGF, layoutType),
           getBoolConstant(xiCount != 0),
           "has-extra-inhabitants bit");

    // Check extra inhabitants.
    if (xiCount > 0) {
      verify(emitLoadOfExtraInhabitantCount(IGF, layoutType),
             getSizeConstant(Size(xiCount)),
             "extra inhabitant count");
      
      // Verify that the extra inhabitant representations are consistent.
      
      /* TODO: Update for EnumPayload implementation changes.
      
      auto xiBuf = IGF.createAlloca(fixedTI->getStorageType(),
                                    fixedTI->getFixedAlignment(),
                                    "extra-inhabitant");
      auto xiOpaque = IGF.Builder.CreateBitCast(xiBuf, IGF.IGM.OpaquePtrTy);
      
      // TODO: Randomize the set of extra inhabitants we check.
      unsigned bits = fixedTI->getFixedSize().getValueInBits();
      for (unsigned i = 0, e = std::min(xiCount, 1024u);
           i < e; ++i) {
        // Initialize the buffer with junk, to help ensure we're insensitive to
        // insignificant bits.
        // TODO: Randomize the filler.
        IGF.Builder.CreateMemSet(xiBuf.getAddress(),
                                 llvm::ConstantInt::get(IGF.IGM.Int8Ty, 0x5A),
                                 fixedTI->getFixedSize().getValue(),
                                 fixedTI->getFixedAlignment().getValue());
        
        // Ask the runtime to store an extra inhabitant.
        auto index = llvm::ConstantInt::get(IGF.IGM.Int32Ty, i);
        emitStoreExtraInhabitantCall(IGF, layoutType, index,
                                     xiOpaque.getAddress());
        
        // Compare the stored extra inhabitant against the fixed extra
        // inhabitant pattern.
        auto fixedXI = fixedTI->getFixedExtraInhabitantValue(IGF.IGM, bits, i);
        auto xiBuf2 = IGF.Builder.CreateBitCast(xiBuf,
                                            fixedXI->getType()->getPointerTo());
        llvm::Value *runtimeXI = IGF.Builder.CreateLoad(xiBuf2);
        runtimeXI = fixedTI->maskFixedExtraInhabitant(IGF, runtimeXI);
        
        numberBuf.clear();
        {
          llvm::raw_svector_ostream os(numberBuf);
          os << i;
          os.flush();
        }
        
        verify(runtimeXI, fixedXI,
               llvm::Twine("stored extra inhabitant ") + numberBuf.str());
        
        // Now store the fixed extra inhabitant and ask the runtime to identify
        // it.
        // Mask in junk to make sure the runtime correctly ignores it.
        auto xiMask = fixedTI->getFixedExtraInhabitantMask(IGF.IGM).asAPInt();
        auto maskVal = llvm::ConstantInt::get(IGF.IGM.getLLVMContext(), xiMask);
        auto notMaskVal
          = llvm::ConstantInt::get(IGF.IGM.getLLVMContext(), ~xiMask);
        // TODO: Randomize the filler.
        auto xiFill = llvm::ConstantInt::getAllOnesValue(fixedXI->getType());
        llvm::Value *xiFillMask = IGF.Builder.CreateAnd(notMaskVal, xiFill);
        llvm::Value *xiValMask = IGF.Builder.CreateAnd(maskVal, fixedXI);
        llvm::Value *filledXI = IGF.Builder.CreateOr(xiFillMask, xiValMask);
        
        IGF.Builder.CreateStore(filledXI, xiBuf2);
        
        auto runtimeIndex = emitGetExtraInhabitantIndexCall(IGF, layoutType,
                                                        xiOpaque.getAddress());
        verify(runtimeIndex, index,
               llvm::Twine("extra inhabitant index calculation ")
                 + numberBuf.str());
      }
       */
    }

    // TODO: Verify interesting layout properties specific to the kind of type,
    // such as struct or class field offsets, enum case tags, vtable entries,
    // etc.
  }
}
Exemplo n.º 2
0
void LocalTypeDataCache::
addAbstractForFulfillments(IRGenFunction &IGF, FulfillmentMap &&fulfillments,
                           llvm::function_ref<AbstractSource()> createSource) {
  // Add the source lazily.
  Optional<unsigned> sourceIndex;
  auto getSourceIndex = [&]() -> unsigned {
    if (!sourceIndex) {
      AbstractSources.emplace_back(createSource());
      sourceIndex = AbstractSources.size() - 1;
    }
    return *sourceIndex;
  };

  for (auto &fulfillment : fulfillments) {
    CanType type = CanType(fulfillment.first.first);
    LocalTypeDataKind localDataKind;

    // For now, ignore witness-table fulfillments when they're not for
    // archetypes.
    if (ProtocolDecl *protocol = fulfillment.first.second) {
      if (auto archetype = dyn_cast<ArchetypeType>(type)) {
        auto conformsTo = archetype->getConformsTo();
        auto it = std::find(conformsTo.begin(), conformsTo.end(), protocol);
        if (it == conformsTo.end()) continue;
        localDataKind = LocalTypeDataKind::forAbstractProtocolWitnessTable(*it);
      } else {
        continue;
      }

    } else {
      // Ignore type metadata fulfillments for non-dependent types that
      // we can produce very cheaply.  We don't want to end up emitting
      // the type metadata for Int by chasing through N layers of metadata
      // just because that path happens to be in the cache.
      if (!type->hasArchetype() &&
          getTypeMetadataAccessStrategy(IGF.IGM, type, /*preferDirect*/ true)
            == MetadataAccessStrategy::Direct) {
        continue;
      }

      localDataKind = LocalTypeDataKind::forTypeMetadata();
    }

    // Find the chain for the key.
    auto key = getKey(type, localDataKind);
    auto &chain = Map[key];

    // Check whether there's already an entry that's at least as good as the
    // fulfillment.
    Optional<unsigned> fulfillmentCost;
    auto getFulfillmentCost = [&]() -> unsigned {
      if (!fulfillmentCost)
        fulfillmentCost = fulfillment.second.Path.cost();
      return *fulfillmentCost;
    };

    bool isConditional = IGF.isConditionalDominancePoint();

    bool foundBetter = false;
    for (CacheEntry *cur = chain.Root, *last = nullptr; cur;
         last = cur, cur = cur->getNext()) {
      // Ensure the entry is acceptable.
      if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
        continue;

      // Ensure that the entry isn't better than the fulfillment.
      auto curCost = cur->cost();
      if (curCost == 0 || curCost <= getFulfillmentCost()) {
        foundBetter = true;
        break;
      }

      // If the entry is defined at the current point, (1) we know there
      // won't be a better entry and (2) we should remove it.
      if (cur->DefinitionPoint == IGF.getActiveDominancePoint() &&
          !isConditional) {
        // Splice it out of the chain.
        assert(!cur->isConditional());
        chain.eraseEntry(last, cur);
        break;
      }
    }
    if (foundBetter) continue;

    // Okay, make a new entry.

    // Register with the conditional dominance scope if necessary.
    if (isConditional) {
      IGF.registerConditionalLocalTypeDataKey(key);
    }

    // Allocate the new entry.
    auto newEntry = new AbstractCacheEntry(IGF.getActiveDominancePoint(),
                                           isConditional,
                                           getSourceIndex(),
                                           std::move(fulfillment.second.Path));

    // Add it to the front of the chain.
    chain.push_front(newEntry);
  }
}
Exemplo n.º 3
0
MetadataResponse
LocalTypeDataCache::tryGet(IRGenFunction &IGF, LocalTypeDataKey key,
                           bool allowAbstract, DynamicMetadataRequest request) {
  // Use the caching key.
  key = key.getCachingKey();

  auto it = Map.find(key);
  if (it == Map.end()) return MetadataResponse();
  auto &chain = it->second;

  CacheEntry *best = nullptr;
  Optional<OperationCost> bestCost;

  CacheEntry *next = chain.Root;
  while (next) {
    CacheEntry *cur = next;
    next = cur->getNext();

    // Ignore abstract entries if so requested.
    if (!allowAbstract && !isa<ConcreteCacheEntry>(cur))
      continue;

    // Ignore unacceptable entries.
    if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
      continue;

    // If there's a collision, compare by cost, ignoring higher-cost entries.
    if (best) {
      // Compute the cost of the best entry if we haven't done so already.
      // If that's zero, go ahead and short-circuit out.
      if (!bestCost) {
        bestCost = best->costForRequest(key, request);
        if (*bestCost == OperationCost::Free) break;
      }

      auto curCost = cur->costForRequest(key, request);
      if (curCost >= *bestCost) continue;

      // Replace the best cost and fall through.
      bestCost = curCost;
    }
    best = cur;
  }

  // If we didn't find anything, we're done.
  if (!best) return MetadataResponse();

  // Okay, we've found the best entry available.
  switch (best->getKind()) {

  // For concrete caches, this is easy.
  case CacheEntry::Kind::Concrete: {
    auto entry = cast<ConcreteCacheEntry>(best);

    if (entry->immediatelySatisfies(key, request))
      return entry->Value;

    assert(key.Kind.isAnyTypeMetadata());

    // Emit a dynamic check that the type metadata matches the request.
    // TODO: we could potentially end up calling this redundantly with a
    //   dynamic request.  Fortunately, those are used only in very narrow
    //   circumstances.
    auto response = emitCheckTypeMetadataState(IGF, request, entry->Value);

    // Add a concrete entry for the checked result.
    IGF.setScopedLocalTypeData(key, response);

    return response;
  }

  // For abstract caches, we need to follow a path.
  case CacheEntry::Kind::Abstract: {
    auto entry = cast<AbstractCacheEntry>(best);

    // Follow the path.
    auto &source = AbstractSources[entry->SourceIndex];
    auto response = entry->follow(IGF, source, request);

    // Following the path automatically caches at every point along it,
    // including the end.
    assert(chain.Root->DefinitionPoint == IGF.getActiveDominancePoint());
    assert(isa<ConcreteCacheEntry>(chain.Root));

    return response;
  }

  }
  llvm_unreachable("bad cache entry kind");
}
Exemplo n.º 4
0
/// Emit a checked cast to a protocol or protocol composition.
void irgen::emitScalarExistentialDowncast(IRGenFunction &IGF,
                                  llvm::Value *value,
                                  SILType srcType,
                                  SILType destType,
                                  CheckedCastMode mode,
                                  Optional<MetatypeRepresentation> metatypeKind,
                                  Explosion &ex) {
  SmallVector<ProtocolDecl*, 4> allProtos;
  destType.getSwiftRValueType().getAnyExistentialTypeProtocols(allProtos);

  // Look up witness tables for the protocols that need them and get
  // references to the ObjC Protocol* values for the objc protocols.
  SmallVector<llvm::Value*, 4> objcProtos;
  SmallVector<llvm::Value*, 4> witnessTableProtos;

  bool hasClassConstraint = false;
  bool hasClassConstraintByProtocol = false;

  for (auto proto : allProtos) {
    // If the protocol introduces a class constraint, track whether we need
    // to check for it independent of protocol witnesses.
    if (proto->requiresClass()) {
      hasClassConstraint = true;
      if (proto->getKnownProtocolKind()
          && *proto->getKnownProtocolKind() == KnownProtocolKind::AnyObject) {
        // AnyObject only requires that the type be a class.
        continue;
      }
      
      // If this protocol is class-constrained but not AnyObject, checking its
      // conformance will check the class constraint too.
      hasClassConstraintByProtocol = true;
    }

    if (Lowering::TypeConverter::protocolRequiresWitnessTable(proto)) {
      auto descriptor = emitProtocolDescriptorRef(IGF, proto);
      witnessTableProtos.push_back(descriptor);
    }

    if (!proto->isObjC())
      continue;

    objcProtos.push_back(emitReferenceToObjCProtocol(IGF, proto));
  }
  
  llvm::Type *resultType;
  if (metatypeKind) {
    switch (*metatypeKind) {
    case MetatypeRepresentation::Thin:
      llvm_unreachable("can't cast to thin metatype");
    case MetatypeRepresentation::Thick:
      resultType = IGF.IGM.TypeMetadataPtrTy;
      break;
    case MetatypeRepresentation::ObjC:
      resultType = IGF.IGM.ObjCClassPtrTy;
      break;
    }
  } else {
    auto schema = IGF.getTypeInfo(destType).getSchema();
    resultType = schema[0].getScalarType();
  }
  // We only need to check the class constraint for metatype casts where
  // no protocol conformance indirectly requires the constraint for us.
  bool checkClassConstraint =
    (bool)metatypeKind && hasClassConstraint && !hasClassConstraintByProtocol;

  llvm::Value *resultValue = value;

  // If we don't have anything we really need to check, then trivially succeed.
  if (objcProtos.empty() && witnessTableProtos.empty() &&
      !checkClassConstraint) {
    resultValue = IGF.Builder.CreateBitCast(value, resultType);
    ex.add(resultValue);
    return;
  }

  // Check the ObjC protocol conformances if there were any.
  llvm::Value *objcCast = nullptr;
  if (!objcProtos.empty()) {
    // Get the ObjC instance or class object to check for these conformances.
    llvm::Value *objcObject;
    if (metatypeKind) {
      switch (*metatypeKind) {
      case MetatypeRepresentation::Thin:
        llvm_unreachable("can't cast to thin metatype");
      case MetatypeRepresentation::Thick: {
        // The metadata might be for a non-class type, which wouldn't have
        // an ObjC class object.
        objcObject = nullptr;
        break;
      }
      case MetatypeRepresentation::ObjC:
        // Metatype is already an ObjC object.
        objcObject = value;
        break;
      }
    } else {
      // Class instance is already an ObjC object.
      objcObject = value;
    }
    if (objcObject)
      objcObject = IGF.Builder.CreateBitCast(objcObject,
                                             IGF.IGM.UnknownRefCountedPtrTy);
    
    // Pick the cast function based on the cast mode and on whether we're
    // casting a Swift metatype or ObjC object.
    llvm::Value *castFn;
    switch (mode) {
    case CheckedCastMode::Unconditional:
      castFn = objcObject
        ? IGF.IGM.getDynamicCastObjCProtocolUnconditionalFn()
        : IGF.IGM.getDynamicCastTypeToObjCProtocolUnconditionalFn();
      break;
    case CheckedCastMode::Conditional:
      castFn = objcObject
        ? IGF.IGM.getDynamicCastObjCProtocolConditionalFn()
        : IGF.IGM.getDynamicCastTypeToObjCProtocolConditionalFn();
      break;
    }
    llvm::Value *objcCastObject = objcObject ? objcObject : value;
    
    Address protoRefsBuf = IGF.createAlloca(
                                        llvm::ArrayType::get(IGF.IGM.Int8PtrTy,
                                                             objcProtos.size()),
                                        IGF.IGM.getPointerAlignment(),
                                        "objc_protocols");
    protoRefsBuf = IGF.Builder.CreateBitCast(protoRefsBuf,
                                             IGF.IGM.Int8PtrPtrTy);

    for (unsigned index : indices(objcProtos)) {
      Address protoRefSlot = IGF.Builder.CreateConstArrayGEP(
                                                     protoRefsBuf, index,
                                                     IGF.IGM.getPointerSize());
      IGF.Builder.CreateStore(objcProtos[index], protoRefSlot);
      ++index;
    }

    
    objcCast = IGF.Builder.CreateCall(
        castFn,
        {objcCastObject, IGF.IGM.getSize(Size(objcProtos.size())),
         protoRefsBuf.getAddress()});
    resultValue = IGF.Builder.CreateBitCast(objcCast, resultType);
  }

  // If we don't need to look up any witness tables, we're done.
  if (witnessTableProtos.empty() && !checkClassConstraint) {
    ex.add(resultValue);
    return;
  }

  // If we're doing a conditional cast, and the ObjC protocol checks failed,
  // then the cast is done.
  Optional<ConditionalDominanceScope> condition;
  llvm::BasicBlock *origBB = nullptr, *successBB = nullptr, *contBB = nullptr;
  if (!objcProtos.empty()) {
    switch (mode) {
    case CheckedCastMode::Unconditional:
      break;
    case CheckedCastMode::Conditional: {
      origBB = IGF.Builder.GetInsertBlock();
      successBB = IGF.createBasicBlock("success");
      contBB = IGF.createBasicBlock("cont");
      auto isNull = IGF.Builder.CreateICmpEQ(objcCast,
                               llvm::ConstantPointerNull::get(
                                 cast<llvm::PointerType>(objcCast->getType())));
      IGF.Builder.CreateCondBr(isNull, contBB, successBB);
      IGF.Builder.emitBlock(successBB);
      condition.emplace(IGF);
    }
    }
  }

  // Get the Swift type metadata for the type.
  llvm::Value *metadataValue;
  if (metatypeKind) {
    switch (*metatypeKind) {
    case MetatypeRepresentation::Thin:
      llvm_unreachable("can't cast to thin metatype");
    case MetatypeRepresentation::Thick:
      // The value is already a native metatype.
      metadataValue = value;
      break;
    case MetatypeRepresentation::ObjC:
      // Get the type metadata from the ObjC class, which may be a wrapper.
      metadataValue = emitObjCMetadataRefForMetadata(IGF, value);
    }
  } else {
    // Get the type metadata for the instance.
    metadataValue = emitDynamicTypeOfHeapObject(IGF, value, srcType);
  }

  // Look up witness tables for the protocols that need them.
  auto fn = emitExistentialScalarCastFn(IGF.IGM, witnessTableProtos.size(),
                                        mode, checkClassConstraint);

  llvm::SmallVector<llvm::Value *, 4> args;

  if (resultValue->getType() != IGF.IGM.Int8PtrTy)
    resultValue = IGF.Builder.CreateBitCast(resultValue, IGF.IGM.Int8PtrTy);
  args.push_back(resultValue);

  args.push_back(metadataValue);
  for (auto proto : witnessTableProtos)
    args.push_back(proto);

  auto valueAndWitnessTables = IGF.Builder.CreateCall(fn, args);

  resultValue = IGF.Builder.CreateExtractValue(valueAndWitnessTables, 0);
  if (resultValue->getType() != resultType)
    resultValue = IGF.Builder.CreateBitCast(resultValue, resultType);
  ex.add(resultValue);

  for (unsigned i = 0, e = witnessTableProtos.size(); i < e; ++i) {
    auto wt = IGF.Builder.CreateExtractValue(valueAndWitnessTables, i + 1);
    ex.add(wt);
  }

  // If we had conditional ObjC checks, join the failure paths.
  if (contBB) {
    condition.reset();
    IGF.Builder.CreateBr(contBB);
    IGF.Builder.emitBlock(contBB);
    
    // Return null on the failure path.
    Explosion successEx = std::move(ex);
    ex.reset();
    
    while (!successEx.empty()) {
      auto successVal = successEx.claimNext();
      auto failureVal = llvm::Constant::getNullValue(successVal->getType());
      auto phi = IGF.Builder.CreatePHI(successVal->getType(), 2);
      phi->addIncoming(successVal, successBB);
      phi->addIncoming(failureVal, origBB);
      ex.add(phi);
    }
  }
}
Exemplo n.º 5
0
static void emitSubSwitch(IRGenFunction &IGF,
                    MutableArrayRef<EnumPayload::LazyValue> values,
                    APInt mask,
                    MutableArrayRef<std::pair<APInt, llvm::BasicBlock *>> cases,
                    SwitchDefaultDest dflt) {
recur:
  assert(!values.empty() && "didn't exit out when exhausting all values?!");
  
  assert(!cases.empty() && "switching with no cases?!");
  
  auto &DL = IGF.IGM.DataLayout;
  auto &pv = values.front();
  values = values.slice(1);
  auto payloadTy = getPayloadType(pv);
  unsigned size = DL.getTypeSizeInBits(payloadTy);
  
  // Grab a chunk of the mask.
  auto maskPiece = mask.zextOrTrunc(size);
  mask = mask.lshr(size);
  
  // If the piece is zero, this doesn't affect the switch. We can just move
  // forward and recur.
  if (maskPiece == 0) {
    for (auto &casePair : cases)
      casePair.first = casePair.first.lshr(size);
    goto recur;
  }
  
  // Force the value we will test.
  auto v = forcePayloadValue(pv);
  auto payloadIntTy = llvm::IntegerType::get(IGF.IGM.getLLVMContext(), size);
  
  // Need to coerce to integer for 'icmp eq' if it's not already an integer
  // or pointer. (Switching or masking will also require a cast to integer.)
  if (!isa<llvm::IntegerType>(v->getType())
      && !isa<llvm::PointerType>(v->getType()))
    v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy);
  
  // Apply the mask if it's interesting.
  if (!maskPiece.isAllOnesValue()) {
    v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy);
    auto maskConstant = llvm::ConstantInt::get(payloadIntTy, maskPiece);
    v = IGF.Builder.CreateAnd(v, maskConstant);
  }
  
  // Gather the values we will switch over for this payload chunk.
  // FIXME: std::map is lame. Should hash APInts.
  std::map<APInt, SmallVector<std::pair<APInt, llvm::BasicBlock*>, 2>, ult>
    subCases;
  
  for (auto casePair : cases) {
    // Grab a chunk of the value.
    auto valuePiece = casePair.first.zextOrTrunc(size);
    // Index the case according to this chunk.
    subCases[valuePiece].push_back({std::move(casePair.first).lshr(size),
                                    casePair.second});
  }
  
  bool needsAdditionalCases = !values.empty() && mask != 0;
  SmallVector<std::pair<llvm::BasicBlock *, decltype(cases)>, 2> recursiveCases;
  
  auto blockForCases
    = [&](MutableArrayRef<std::pair<APInt, llvm::BasicBlock*>> cases)
        -> llvm::BasicBlock *
    {
      // If we need to recur, emit a new block.
      if (needsAdditionalCases) {
        auto newBB = IGF.createBasicBlock("");
        recursiveCases.push_back({newBB, cases});
        return newBB;
      }
      // Otherwise, we can jump directly to the ultimate destination.
      assert(cases.size() == 1 && "more than one case for final destination?!");
      return cases.front().second;
    };
  
  // If there's only one case, do a cond_br.
  if (subCases.size() == 1) {
    auto &subCase = *subCases.begin();
    llvm::BasicBlock *block = blockForCases(subCase.second);
    // If the default case is unreachable, we don't need to conditionally
    // branch.
    if (dflt.getInt()) {
      IGF.Builder.CreateBr(block);
      goto next;
    }
  
    auto &valuePiece = subCase.first;
    llvm::Value *valueConstant = llvm::ConstantInt::get(payloadIntTy,
                                                        valuePiece);
    valueConstant = IGF.Builder.CreateBitOrPointerCast(valueConstant,
                                                       v->getType());
    auto cmp = IGF.Builder.CreateICmpEQ(v, valueConstant);
    IGF.Builder.CreateCondBr(cmp, block, dflt.getPointer());
    goto next;
  }
  
  // Otherwise, do a switch.
  {
    v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy);
    auto swi = IGF.Builder.CreateSwitch(v, dflt.getPointer(), subCases.size());
    
    for (auto &subCase : subCases) {
      auto &valuePiece = subCase.first;
      auto valueConstant = llvm::ConstantInt::get(IGF.IGM.getLLVMContext(),
                                                  valuePiece);

      swi->addCase(valueConstant, blockForCases(subCase.second));
    }
  }
  
next:
  // Emit the recursive cases.
  for (auto &recursive : recursiveCases) {
    IGF.Builder.emitBlock(recursive.first);
    emitSubSwitch(IGF, values, mask, recursive.second, dflt);
  }
}
Exemplo n.º 6
0
/// Emit a checked cast to a protocol or protocol composition.
void irgen::emitScalarExistentialDowncast(IRGenFunction &IGF,
                                  llvm::Value *value,
                                  SILType srcType,
                                  SILType destType,
                                  CheckedCastMode mode,
                                  Optional<MetatypeRepresentation> metatypeKind,
                                  Explosion &ex) {
  auto srcInstanceType = srcType.getSwiftRValueType();
  auto destInstanceType = destType.getSwiftRValueType();
  while (auto metatypeType = dyn_cast<ExistentialMetatypeType>(
           destInstanceType)) {
    destInstanceType = metatypeType.getInstanceType();
    srcInstanceType = cast<AnyMetatypeType>(srcInstanceType).getInstanceType();
  }

  auto layout = destInstanceType.getExistentialLayout();

  // Look up witness tables for the protocols that need them and get
  // references to the ObjC Protocol* values for the objc protocols.
  SmallVector<llvm::Value*, 4> objcProtos;
  SmallVector<llvm::Value*, 4> witnessTableProtos;

  bool hasClassConstraint = layout.requiresClass();
  bool hasClassConstraintByProtocol = false;

  bool hasSuperclassConstraint = bool(layout.superclass);

  for (auto protoTy : layout.getProtocols()) {
    auto *protoDecl = protoTy->getDecl();

    // If the protocol introduces a class constraint, track whether we need
    // to check for it independent of protocol witnesses.
    if (protoDecl->requiresClass()) {
      assert(hasClassConstraint);
      hasClassConstraintByProtocol = true;
    }

    if (Lowering::TypeConverter::protocolRequiresWitnessTable(protoDecl)) {
      auto descriptor = emitProtocolDescriptorRef(IGF, protoDecl);
      witnessTableProtos.push_back(descriptor);
    }

    if (protoDecl->isObjC())
      objcProtos.push_back(emitReferenceToObjCProtocol(IGF, protoDecl));
  }
  
  llvm::Type *resultType;
  if (metatypeKind) {
    switch (*metatypeKind) {
    case MetatypeRepresentation::Thin:
      llvm_unreachable("can't cast to thin metatype");
    case MetatypeRepresentation::Thick:
      resultType = IGF.IGM.TypeMetadataPtrTy;
      break;
    case MetatypeRepresentation::ObjC:
      resultType = IGF.IGM.ObjCClassPtrTy;
      break;
    }
  } else {
    auto schema = IGF.getTypeInfo(destType).getSchema();
    resultType = schema[0].getScalarType();
  }

  // The source of a scalar cast is statically known to be a class or a
  // metatype, so we only have to check the class constraint in two cases:
  //
  // 1) The destination type has an explicit superclass constraint that is
  //    more derived than what the source type is known to be.
  //
  // 2) We are casting between metatypes, in which case the source might
  //    be a non-class metatype.
  bool checkClassConstraint = false;
  if ((bool)metatypeKind &&
      hasClassConstraint &&
      !hasClassConstraintByProtocol &&
      !srcInstanceType->mayHaveSuperclass())
    checkClassConstraint = true;

  // If the source has an equal or more derived superclass constraint than
  // the destination, we can elide the superclass check.
  //
  // Note that destInstanceType is always an existential type, so calling
  // getSuperclass() returns the superclass constraint of the existential,
  // not the superclass of some concrete class.
  bool checkSuperclassConstraint =
    hasSuperclassConstraint &&
    !destInstanceType->getSuperclass()->isExactSuperclassOf(srcInstanceType);

  if (checkSuperclassConstraint)
    checkClassConstraint = true;

  llvm::Value *resultValue = value;

  // If we don't have anything we really need to check, then trivially succeed.
  if (objcProtos.empty() && witnessTableProtos.empty() &&
      !checkClassConstraint) {
    resultValue = IGF.Builder.CreateBitCast(value, resultType);
    ex.add(resultValue);
    return;
  }

  // Check the ObjC protocol conformances if there were any.
  llvm::Value *objcCast = nullptr;
  if (!objcProtos.empty()) {
    // Get the ObjC instance or class object to check for these conformances.
    llvm::Value *objcObject;
    if (metatypeKind) {
      switch (*metatypeKind) {
      case MetatypeRepresentation::Thin:
        llvm_unreachable("can't cast to thin metatype");
      case MetatypeRepresentation::Thick: {
        // The metadata might be for a non-class type, which wouldn't have
        // an ObjC class object.
        objcObject = nullptr;
        break;
      }
      case MetatypeRepresentation::ObjC:
        // Metatype is already an ObjC object.
        objcObject = value;
        break;
      }
    } else {
      // Class instance is already an ObjC object.
      objcObject = value;
    }
    if (objcObject)
      objcObject = IGF.Builder.CreateBitCast(objcObject,
                                             IGF.IGM.UnknownRefCountedPtrTy);
    
    // Pick the cast function based on the cast mode and on whether we're
    // casting a Swift metatype or ObjC object.
    llvm::Constant *castFn;
    switch (mode) {
    case CheckedCastMode::Unconditional:
      castFn = objcObject
        ? IGF.IGM.getDynamicCastObjCProtocolUnconditionalFn()
        : IGF.IGM.getDynamicCastTypeToObjCProtocolUnconditionalFn();
      break;
    case CheckedCastMode::Conditional:
      castFn = objcObject
        ? IGF.IGM.getDynamicCastObjCProtocolConditionalFn()
        : IGF.IGM.getDynamicCastTypeToObjCProtocolConditionalFn();
      break;
    }
    llvm::Value *objcCastObject = objcObject ? objcObject : value;
    
    Address protoRefsBuf = IGF.createAlloca(
                                        llvm::ArrayType::get(IGF.IGM.Int8PtrTy,
                                                             objcProtos.size()),
                                        IGF.IGM.getPointerAlignment(),
                                        "objc_protocols");
    protoRefsBuf = IGF.Builder.CreateBitCast(protoRefsBuf,
                                             IGF.IGM.Int8PtrPtrTy);

    for (unsigned index : indices(objcProtos)) {
      Address protoRefSlot = IGF.Builder.CreateConstArrayGEP(
                                                     protoRefsBuf, index,
                                                     IGF.IGM.getPointerSize());
      IGF.Builder.CreateStore(objcProtos[index], protoRefSlot);
      ++index;
    }

    
    auto cc = IGF.IGM.DefaultCC;
    if (auto fun = dyn_cast<llvm::Function>(castFn))
      cc = fun->getCallingConv();


    auto call = IGF.Builder.CreateCall(
        castFn,
        {objcCastObject, IGF.IGM.getSize(Size(objcProtos.size())),
         protoRefsBuf.getAddress()});
    call->setCallingConv(cc);
    objcCast = call;
    resultValue = IGF.Builder.CreateBitCast(objcCast, resultType);
  }

  // If we don't need to look up any witness tables, we're done.
  if (witnessTableProtos.empty() && !checkClassConstraint) {
    ex.add(resultValue);
    return;
  }

  // If we're doing a conditional cast, and the ObjC protocol checks failed,
  // then the cast is done.
  Optional<ConditionalDominanceScope> condition;
  llvm::BasicBlock *origBB = nullptr, *successBB = nullptr, *contBB = nullptr;
  if (!objcProtos.empty()) {
    switch (mode) {
    case CheckedCastMode::Unconditional:
      break;
    case CheckedCastMode::Conditional: {
      origBB = IGF.Builder.GetInsertBlock();
      successBB = IGF.createBasicBlock("success");
      contBB = IGF.createBasicBlock("cont");
      auto isNull = IGF.Builder.CreateICmpEQ(objcCast,
                               llvm::ConstantPointerNull::get(
                                 cast<llvm::PointerType>(objcCast->getType())));
      IGF.Builder.CreateCondBr(isNull, contBB, successBB);
      IGF.Builder.emitBlock(successBB);
      condition.emplace(IGF);
    }
    }
  }

  // Get the Swift type metadata for the type.
  llvm::Value *metadataValue;
  if (metatypeKind) {
    switch (*metatypeKind) {
    case MetatypeRepresentation::Thin:
      llvm_unreachable("can't cast to thin metatype");
    case MetatypeRepresentation::Thick:
      // The value is already a native metatype.
      metadataValue = value;
      break;
    case MetatypeRepresentation::ObjC:
      // Get the type metadata from the ObjC class, which may be a wrapper.
      metadataValue = emitObjCMetadataRefForMetadata(IGF, value);
    }
  } else {
    // Get the type metadata for the instance.
    metadataValue = emitDynamicTypeOfHeapObject(IGF, value, srcType);
  }

  // Look up witness tables for the protocols that need them.
  auto fn = emitExistentialScalarCastFn(IGF.IGM,
                                        witnessTableProtos.size(),
                                        mode,
                                        checkClassConstraint,
                                        checkSuperclassConstraint);

  llvm::SmallVector<llvm::Value *, 4> args;

  if (resultValue->getType() != IGF.IGM.Int8PtrTy)
    resultValue = IGF.Builder.CreateBitCast(resultValue, IGF.IGM.Int8PtrTy);
  args.push_back(resultValue);

  args.push_back(metadataValue);

  if (checkSuperclassConstraint)
    args.push_back(IGF.emitTypeMetadataRef(CanType(layout.superclass)));

  for (auto proto : witnessTableProtos)
    args.push_back(proto);

  auto valueAndWitnessTables = IGF.Builder.CreateCall(fn, args);

  resultValue = IGF.Builder.CreateExtractValue(valueAndWitnessTables, 0);
  if (resultValue->getType() != resultType)
    resultValue = IGF.Builder.CreateBitCast(resultValue, resultType);
  ex.add(resultValue);

  for (unsigned i = 0, e = witnessTableProtos.size(); i < e; ++i) {
    auto wt = IGF.Builder.CreateExtractValue(valueAndWitnessTables, i + 1);
    ex.add(wt);
  }

  // If we had conditional ObjC checks, join the failure paths.
  if (contBB) {
    condition.reset();
    IGF.Builder.CreateBr(contBB);
    IGF.Builder.emitBlock(contBB);
    
    // Return null on the failure path.
    Explosion successEx = std::move(ex);
    ex.reset();
    
    while (!successEx.empty()) {
      auto successVal = successEx.claimNext();
      auto failureVal = llvm::Constant::getNullValue(successVal->getType());
      auto phi = IGF.Builder.CreatePHI(successVal->getType(), 2);
      phi->addIncoming(successVal, successBB);
      phi->addIncoming(failureVal, origBB);
      ex.add(phi);
    }
  }
}
Exemplo n.º 7
0
/// emitBuiltinCall - Emit a call to a builtin function.
void irgen::emitBuiltinCall(IRGenFunction &IGF, Identifier FnId,
                            SILType resultType,
                            Explosion &args, Explosion &out,
                            SubstitutionList substitutions) {
  // Decompose the function's name into a builtin name and type list.
  const BuiltinInfo &Builtin = IGF.getSILModule().getBuiltinInfo(FnId);

  if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteedEnd) {
    // Just consume the incoming argument.
    assert(args.size() == 1 && "Expecting one incoming argument");
    (void)args.claimAll();
    return;
  }

  if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteed) {
    // Just forward the incoming argument.
    assert(args.size() == 1 && "Expecting one incoming argument");
    out = std::move(args);
    // This is a token.
    out.add(llvm::ConstantInt::get(IGF.IGM.Int8Ty, 0));
    return;
  }

  if (Builtin.ID == BuiltinValueKind::OnFastPath) {
    // The onFastPath builtin has only an effect on SIL level, so we lower it
    // to a no-op.
    return;
  }

  // These builtins don't care about their argument:
  if (Builtin.ID == BuiltinValueKind::Sizeof) {
    (void)args.claimAll();
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    out.add(valueTy.second.getSize(IGF, valueTy.first));
    return;
  }

  if (Builtin.ID == BuiltinValueKind::Strideof) {
    (void)args.claimAll();
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    out.add(valueTy.second.getStride(IGF, valueTy.first));
    return;
  }

  if (Builtin.ID == BuiltinValueKind::Alignof) {
    (void)args.claimAll();
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    // The alignof value is one greater than the alignment mask.
    out.add(IGF.Builder.CreateAdd(
                           valueTy.second.getAlignmentMask(IGF, valueTy.first),
                           IGF.IGM.getSize(Size(1))));
    return;
  }

  if (Builtin.ID == BuiltinValueKind::IsPOD) {
    (void)args.claimAll();
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    out.add(valueTy.second.getIsPOD(IGF, valueTy.first));
    return;
  }


  // addressof expects an lvalue argument.
  if (Builtin.ID == BuiltinValueKind::AddressOf) {
    llvm::Value *address = args.claimNext();
    llvm::Value *value = IGF.Builder.CreateBitCast(address,
                                                   IGF.IGM.Int8PtrTy);
    out.add(value);
    return;
  }

  // Everything else cares about the (rvalue) argument.

  // If this is an LLVM IR intrinsic, lower it to an intrinsic call.
  const IntrinsicInfo &IInfo = IGF.getSILModule().getIntrinsicInfo(FnId);
  llvm::Intrinsic::ID IID = IInfo.ID;

  // Calls to the int_instrprof_increment intrinsic are emitted during SILGen.
  // At that stage, the function name GV used by the profiling pass is hidden.
  // Fix the intrinsic call here by pointing it to the correct GV.
  if (IID == llvm::Intrinsic::instrprof_increment) {
    // Extract the PGO function name.
    auto *NameGEP = cast<llvm::User>(args.claimNext());
    auto *NameGV = dyn_cast<llvm::GlobalVariable>(NameGEP->stripPointerCasts());
    if (NameGV) {
      auto *NameC = NameGV->getInitializer();
      StringRef Name = cast<llvm::ConstantDataArray>(NameC)->getRawDataValues();
      StringRef PGOFuncName = Name.rtrim(StringRef("\0", 1));

      // Point the increment call to the right function name variable.
      std::string PGOFuncNameVar = llvm::getPGOFuncNameVarName(
          PGOFuncName, llvm::GlobalValue::LinkOnceAnyLinkage);
      auto *FuncNamePtr = IGF.IGM.Module.getNamedGlobal(PGOFuncNameVar);

      if (FuncNamePtr) {
        llvm::SmallVector<llvm::Value *, 2> Indices(2, NameGEP->getOperand(1));
        NameGEP = llvm::ConstantExpr::getGetElementPtr(
            ((llvm::PointerType *)FuncNamePtr->getType())->getElementType(),
            FuncNamePtr, makeArrayRef(Indices));
      }
    }

    // Replace the placeholder value with the new GEP.
    Explosion replacement;
    replacement.add(NameGEP);
    replacement.add(args.claimAll());
    args = std::move(replacement);
  }

  if (IID != llvm::Intrinsic::not_intrinsic) {
    SmallVector<llvm::Type*, 4> ArgTys;
    for (auto T : IInfo.Types)
      ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(T->getCanonicalType()));
      
    auto F = llvm::Intrinsic::getDeclaration(&IGF.IGM.Module,
                                             (llvm::Intrinsic::ID)IID, ArgTys);
    llvm::FunctionType *FT = F->getFunctionType();
    SmallVector<llvm::Value*, 8> IRArgs;
    for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i)
      IRArgs.push_back(args.claimNext());
    llvm::Value *TheCall = IGF.Builder.CreateCall(F, IRArgs);

    if (!TheCall->getType()->isVoidTy())
      extractScalarResults(IGF, TheCall->getType(), TheCall, out);

    return;
  }

  // TODO: A linear series of ifs is suboptimal.
#define BUILTIN_SIL_OPERATION(id, name, overload) \
  if (Builtin.ID == BuiltinValueKind::id) \
    llvm_unreachable(name " builtin should be lowered away by SILGen!");

#define BUILTIN_CAST_OPERATION(id, name, attrs) \
  if (Builtin.ID == BuiltinValueKind::id) \
    return emitCastBuiltin(IGF, resultType, out, args, \
                           llvm::Instruction::id);

#define BUILTIN_CAST_OR_BITCAST_OPERATION(id, name, attrs) \
  if (Builtin.ID == BuiltinValueKind::id) \
    return emitCastOrBitCastBuiltin(IGF, resultType, out, args, \
                                    BuiltinValueKind::id);
  
#define BUILTIN_BINARY_OPERATION(id, name, attrs, overload) \
  if (Builtin.ID == BuiltinValueKind::id) { \
    llvm::Value *lhs = args.claimNext(); \
    llvm::Value *rhs = args.claimNext(); \
    llvm::Value *v = IGF.Builder.Create##id(lhs, rhs); \
    return out.add(v); \
  }

#define BUILTIN_RUNTIME_CALL(id, name, attrs) \
  if (Builtin.ID == BuiltinValueKind::id) { \
    llvm::CallInst *call = IGF.Builder.CreateCall(IGF.IGM.get##id##Fn(),  \
                           args.claimNext()); \
    call->setCallingConv(IGF.IGM.DefaultCC); \
    call->setDoesNotThrow(); \
    return out.add(call); \
 }

#define BUILTIN_BINARY_OPERATION_WITH_OVERFLOW(id, name, uncheckedID, attrs, overload) \
if (Builtin.ID == BuiltinValueKind::id) { \
  SmallVector<llvm::Type*, 2> ArgTys; \
  auto opType = Builtin.Types[0]->getCanonicalType(); \
  ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(opType)); \
  auto F = llvm::Intrinsic::getDeclaration(&IGF.IGM.Module, \
    getLLVMIntrinsicIDForBuiltinWithOverflow(Builtin.ID), ArgTys); \
  SmallVector<llvm::Value*, 2> IRArgs; \
  IRArgs.push_back(args.claimNext()); \
  IRArgs.push_back(args.claimNext()); \
  args.claimNext();\
  llvm::Value *TheCall = IGF.Builder.CreateCall(F, IRArgs); \
  extractScalarResults(IGF, TheCall->getType(), TheCall, out);  \
  return; \
}
  // FIXME: We could generate the code to dynamically report the overflow if the
  // third argument is true. Now, we just ignore it.

#define BUILTIN_BINARY_PREDICATE(id, name, attrs, overload) \
  if (Builtin.ID == BuiltinValueKind::id) \
    return emitCompareBuiltin(IGF, out, args, llvm::CmpInst::id);
  
#define BUILTIN_TYPE_TRAIT_OPERATION(id, name) \
  if (Builtin.ID == BuiltinValueKind::id) \
    return emitTypeTraitBuiltin(IGF, out, args, substitutions, &TypeBase::name);
  
#define BUILTIN(ID, Name, Attrs)  // Ignore the rest.
#include "swift/AST/Builtins.def"

  if (Builtin.ID == BuiltinValueKind::FNeg) {
    llvm::Value *rhs = args.claimNext();
    llvm::Value *lhs = llvm::ConstantFP::get(rhs->getType(), "-0.0");
    llvm::Value *v = IGF.Builder.CreateFSub(lhs, rhs);
    return out.add(v);
  }
  
  if (Builtin.ID == BuiltinValueKind::AssumeNonNegative) {
    llvm::Value *v = args.claimNext();
    // Set a value range on the load instruction, which must be the argument of
    // the builtin.
    if (isa<llvm::LoadInst>(v) || isa<llvm::CallInst>(v)) {
      // The load must be post-dominated by the builtin. Otherwise we would get
      // a wrong assumption in the else-branch in this example:
      //    x = f()
      //    if condition {
      //      y = assumeNonNegative(x)
      //    } else {
      //      // x might be negative here!
      //    }
      // For simplicity we just enforce that both the load and the builtin must
      // be in the same block.
      llvm::Instruction *I = static_cast<llvm::Instruction *>(v);
      if (I->getParent() == IGF.Builder.GetInsertBlock()) {
        llvm::LLVMContext &ctx = IGF.IGM.Module.getContext();
        auto *intType = dyn_cast<llvm::IntegerType>(v->getType());
        llvm::Metadata *rangeElems[] = {
          llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(intType, 0)),
          llvm::ConstantAsMetadata::get(
              llvm::ConstantInt::get(intType,
                  APInt::getSignedMaxValue(intType->getBitWidth())))
        };
        llvm::MDNode *range = llvm::MDNode::get(ctx, rangeElems);
        I->setMetadata(llvm::LLVMContext::MD_range, range);
      }
    }
    // Don't generate any code for the builtin.
    return out.add(v);
  }
  
  if (Builtin.ID == BuiltinValueKind::AllocRaw) {
    auto size = args.claimNext();
    auto align = args.claimNext();
    // Translate the alignment to a mask.
    auto alignMask = IGF.Builder.CreateSub(align, IGF.IGM.getSize(Size(1)));
    auto alloc = IGF.emitAllocRawCall(size, alignMask, "builtin-allocRaw");
    out.add(alloc);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::DeallocRaw) {
    auto pointer = args.claimNext();
    auto size = args.claimNext();
    auto align = args.claimNext();
    // Translate the alignment to a mask.
    auto alignMask = IGF.Builder.CreateSub(align, IGF.IGM.getSize(Size(1)));
    IGF.emitDeallocRawCall(pointer, size, alignMask);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::Fence) {
    SmallVector<Type, 4> Types;
    StringRef BuiltinName =
      getBuiltinBaseName(IGF.IGM.Context, FnId.str(), Types);
    BuiltinName = BuiltinName.drop_front(strlen("fence_"));
    // Decode the ordering argument, which is required.
    auto underscore = BuiltinName.find('_');
    auto ordering = decodeLLVMAtomicOrdering(BuiltinName.substr(0, underscore));
    assert(ordering != llvm::AtomicOrdering::NotAtomic);
    BuiltinName = BuiltinName.substr(underscore);
    
    // Accept singlethread if present.
    bool isSingleThread = BuiltinName.startswith("_singlethread");
    if (isSingleThread)
      BuiltinName = BuiltinName.drop_front(strlen("_singlethread"));
    assert(BuiltinName.empty() && "Mismatch with sema");
    
    IGF.Builder.CreateFence(ordering, isSingleThread
                                          ? llvm::SyncScope::SingleThread
                                          : llvm::SyncScope::System);
    return;
  }

  
  if (Builtin.ID == BuiltinValueKind::CmpXChg) {
    SmallVector<Type, 4> Types;
    StringRef BuiltinName =
      getBuiltinBaseName(IGF.IGM.Context, FnId.str(), Types);
    BuiltinName = BuiltinName.drop_front(strlen("cmpxchg_"));

    // Decode the success- and failure-ordering arguments, which are required.
    SmallVector<StringRef, 4> Parts;
    BuiltinName.split(Parts, "_");
    assert(Parts.size() >= 2 && "Mismatch with sema");
    auto successOrdering = decodeLLVMAtomicOrdering(Parts[0]);
    auto failureOrdering = decodeLLVMAtomicOrdering(Parts[1]);
    assert(successOrdering != llvm::AtomicOrdering::NotAtomic);
    assert(failureOrdering != llvm::AtomicOrdering::NotAtomic);
    auto NextPart = Parts.begin() + 2;

    // Accept weak, volatile, and singlethread if present.
    bool isWeak = false, isVolatile = false, isSingleThread = false;
    if (NextPart != Parts.end() && *NextPart == "weak") {
      isWeak = true;
      NextPart++;
    }
    if (NextPart != Parts.end() && *NextPart == "volatile") {
      isVolatile = true;
      NextPart++;
    }
    if (NextPart != Parts.end() && *NextPart == "singlethread") {
      isSingleThread = true;
      NextPart++;
    }
    assert(NextPart == Parts.end() && "Mismatch with sema");

    auto pointer = args.claimNext();
    auto cmp = args.claimNext();
    auto newval = args.claimNext();

    llvm::Type *origTy = cmp->getType();
    if (origTy->isPointerTy()) {
      cmp = IGF.Builder.CreatePtrToInt(cmp, IGF.IGM.IntPtrTy);
      newval = IGF.Builder.CreatePtrToInt(newval, IGF.IGM.IntPtrTy);
    }

    pointer = IGF.Builder.CreateBitCast(pointer,
                                  llvm::PointerType::getUnqual(cmp->getType()));
    llvm::Value *value = IGF.Builder.CreateAtomicCmpXchg(
        pointer, cmp, newval, successOrdering, failureOrdering,
        isSingleThread ? llvm::SyncScope::SingleThread
                       : llvm::SyncScope::System);
    cast<llvm::AtomicCmpXchgInst>(value)->setVolatile(isVolatile);
    cast<llvm::AtomicCmpXchgInst>(value)->setWeak(isWeak);

    auto valueLoaded = IGF.Builder.CreateExtractValue(value, {0});
    auto loadSuccessful = IGF.Builder.CreateExtractValue(value, {1});

    if (origTy->isPointerTy())
      valueLoaded = IGF.Builder.CreateIntToPtr(valueLoaded, origTy);

    out.add(valueLoaded);
    out.add(loadSuccessful);

    return;
  }
  
  if (Builtin.ID == BuiltinValueKind::AtomicRMW) {
    using namespace llvm;

    SmallVector<Type, 4> Types;
    StringRef BuiltinName = getBuiltinBaseName(IGF.IGM.Context,
                                               FnId.str(), Types);
    BuiltinName = BuiltinName.drop_front(strlen("atomicrmw_"));
    auto underscore = BuiltinName.find('_');
    StringRef SubOp = BuiltinName.substr(0, underscore);
    
    AtomicRMWInst::BinOp SubOpcode = StringSwitch<AtomicRMWInst::BinOp>(SubOp)
      .Case("xchg", AtomicRMWInst::Xchg)
      .Case("add",  AtomicRMWInst::Add)
      .Case("sub",  AtomicRMWInst::Sub)
      .Case("and",  AtomicRMWInst::And)
      .Case("nand", AtomicRMWInst::Nand)
      .Case("or",   AtomicRMWInst::Or)
      .Case("xor",  AtomicRMWInst::Xor)
      .Case("max",  AtomicRMWInst::Max)
      .Case("min",  AtomicRMWInst::Min)
      .Case("umax", AtomicRMWInst::UMax)
      .Case("umin", AtomicRMWInst::UMin);
    BuiltinName = BuiltinName.drop_front(underscore+1);
    
    // Decode the ordering argument, which is required.
    underscore = BuiltinName.find('_');
    auto ordering = decodeLLVMAtomicOrdering(BuiltinName.substr(0, underscore));
    assert(ordering != llvm::AtomicOrdering::NotAtomic);
    BuiltinName = BuiltinName.substr(underscore);
    
    // Accept volatile and singlethread if present.
    bool isVolatile = BuiltinName.startswith("_volatile");
    if (isVolatile) BuiltinName = BuiltinName.drop_front(strlen("_volatile"));
    
    bool isSingleThread = BuiltinName.startswith("_singlethread");
    if (isSingleThread)
      BuiltinName = BuiltinName.drop_front(strlen("_singlethread"));
    assert(BuiltinName.empty() && "Mismatch with sema");
    
    auto pointer = args.claimNext();
    auto val = args.claimNext();

    // Handle atomic ops on pointers by casting to intptr_t.
    llvm::Type *origTy = val->getType();
    if (origTy->isPointerTy())
      val = IGF.Builder.CreatePtrToInt(val, IGF.IGM.IntPtrTy);

    pointer = IGF.Builder.CreateBitCast(pointer,
                                  llvm::PointerType::getUnqual(val->getType()));
    llvm::Value *value = IGF.Builder.CreateAtomicRMW(
        SubOpcode, pointer, val, ordering,
        isSingleThread ? llvm::SyncScope::SingleThread
                       : llvm::SyncScope::System);
    cast<AtomicRMWInst>(value)->setVolatile(isVolatile);

    if (origTy->isPointerTy())
      value = IGF.Builder.CreateIntToPtr(value, origTy);

    out.add(value);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::AtomicLoad
      || Builtin.ID == BuiltinValueKind::AtomicStore) {
    using namespace llvm;

    SmallVector<Type, 4> Types;
    StringRef BuiltinName = getBuiltinBaseName(IGF.IGM.Context,
                                               FnId.str(), Types);
    auto underscore = BuiltinName.find('_');
    BuiltinName = BuiltinName.substr(underscore+1);

    underscore = BuiltinName.find('_');
    auto ordering = decodeLLVMAtomicOrdering(BuiltinName.substr(0, underscore));
    assert(ordering != llvm::AtomicOrdering::NotAtomic);
    BuiltinName = BuiltinName.substr(underscore);

    // Accept volatile and singlethread if present.
    bool isVolatile = BuiltinName.startswith("_volatile");
    if (isVolatile) BuiltinName = BuiltinName.drop_front(strlen("_volatile"));

    bool isSingleThread = BuiltinName.startswith("_singlethread");
    if (isSingleThread)
      BuiltinName = BuiltinName.drop_front(strlen("_singlethread"));
    assert(BuiltinName.empty() && "Mismatch with sema");

    auto pointer = args.claimNext();
    auto &valueTI = IGF.getTypeInfoForUnlowered(Types[0]);
    auto schema = valueTI.getSchema();
    assert(schema.size() == 1 && "not a scalar type?!");
    auto origValueTy = schema[0].getScalarType();

    // If the type is floating-point, then we need to bitcast to integer.
    auto valueTy = origValueTy;
    if (valueTy->isFloatingPointTy()) {
      valueTy = llvm::IntegerType::get(IGF.IGM.LLVMContext,
                                       valueTy->getPrimitiveSizeInBits());
    }

    pointer = IGF.Builder.CreateBitCast(pointer, valueTy->getPointerTo());

    if (Builtin.ID == BuiltinValueKind::AtomicLoad) {
      auto load = IGF.Builder.CreateLoad(pointer,
                                         valueTI.getBestKnownAlignment());
      load->setAtomic(ordering, isSingleThread ? llvm::SyncScope::SingleThread
                                               : llvm::SyncScope::System);
      load->setVolatile(isVolatile);

      llvm::Value *value = load;
      if (valueTy != origValueTy)
        value = IGF.Builder.CreateBitCast(value, origValueTy);
      out.add(value);
      return;
    } else if (Builtin.ID == BuiltinValueKind::AtomicStore) {
      llvm::Value *value = args.claimNext();
      if (valueTy != origValueTy)
        value = IGF.Builder.CreateBitCast(value, valueTy);
      auto store = IGF.Builder.CreateStore(value, pointer,
                                           valueTI.getBestKnownAlignment());
      store->setAtomic(ordering, isSingleThread ? llvm::SyncScope::SingleThread
                                                : llvm::SyncScope::System);
      store->setVolatile(isVolatile);
      return;
    } else {
      llvm_unreachable("out of sync with outer conditional");
    }
  }

  if (Builtin.ID == BuiltinValueKind::ExtractElement) {
    using namespace llvm;

    auto vector = args.claimNext();
    auto index = args.claimNext();
    out.add(IGF.Builder.CreateExtractElement(vector, index));
    return;
  }

  if (Builtin.ID == BuiltinValueKind::InsertElement) {
    using namespace llvm;

    auto vector = args.claimNext();
    auto newValue = args.claimNext();
    auto index = args.claimNext();
    out.add(IGF.Builder.CreateInsertElement(vector, newValue, index));
    return;
  }

  if (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
      Builtin.ID == BuiltinValueKind::UToUCheckedTrunc ||
      Builtin.ID == BuiltinValueKind::SToUCheckedTrunc) {
    auto FromTy =
      IGF.IGM.getStorageTypeForLowered(Builtin.Types[0]->getCanonicalType());
    auto ToTy =
      IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType());

    // Compute the result for SToSCheckedTrunc_IntFrom_IntTo(Arg):
    //   Res = trunc_IntTo(Arg)
    //   Ext = sext_IntFrom(Res)
    //   OverflowFlag = (Arg == Ext) ? 0 : 1
    //   return (resultVal, OverflowFlag)
    //
    // Compute the result for UToUCheckedTrunc_IntFrom_IntTo(Arg)
    // and SToUCheckedTrunc_IntFrom_IntTo(Arg):
    //   Res = trunc_IntTo(Arg)
    //   Ext = zext_IntFrom(Res)
    //   OverflowFlag = (Arg == Ext) ? 0 : 1
    //   return (Res, OverflowFlag)
    llvm::Value *Arg = args.claimNext();
    llvm::Value *Res = IGF.Builder.CreateTrunc(Arg, ToTy);
    bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc);
    llvm::Value *Ext = Signed ? IGF.Builder.CreateSExt(Res, FromTy) :
                                IGF.Builder.CreateZExt(Res, FromTy);
    llvm::Value *OverflowCond = IGF.Builder.CreateICmpEQ(Arg, Ext);
    llvm::Value *OverflowFlag = IGF.Builder.CreateSelect(OverflowCond,
                                  llvm::ConstantInt::get(IGF.IGM.Int1Ty, 0),
                                  llvm::ConstantInt::get(IGF.IGM.Int1Ty, 1));
    // Return the tuple - the result + the overflow flag.
    out.add(Res);
    return out.add(OverflowFlag);
  }

  if (Builtin.ID == BuiltinValueKind::UToSCheckedTrunc) {
    auto FromTy =
      IGF.IGM.getStorageTypeForLowered(Builtin.Types[0]->getCanonicalType());
    auto ToTy =
      IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType());
    llvm::Type *ToMinusOneTy =
      llvm::Type::getIntNTy(ToTy->getContext(), ToTy->getIntegerBitWidth() - 1);

    // Compute the result for UToSCheckedTrunc_IntFrom_IntTo(Arg):
    //   Res = trunc_IntTo(Arg)
    //   Trunc = trunc_'IntTo-1bit'(Arg)
    //   Ext = zext_IntFrom(Trunc)
    //   OverflowFlag = (Arg == Ext) ? 0 : 1
    //   return (Res, OverflowFlag)
    llvm::Value *Arg = args.claimNext();
    llvm::Value *Res = IGF.Builder.CreateTrunc(Arg, ToTy);
    llvm::Value *Trunc = IGF.Builder.CreateTrunc(Arg, ToMinusOneTy);
    llvm::Value *Ext = IGF.Builder.CreateZExt(Trunc, FromTy);
    llvm::Value *OverflowCond = IGF.Builder.CreateICmpEQ(Arg, Ext);
    llvm::Value *OverflowFlag = IGF.Builder.CreateSelect(OverflowCond,
                                  llvm::ConstantInt::get(IGF.IGM.Int1Ty, 0),
                                  llvm::ConstantInt::get(IGF.IGM.Int1Ty, 1));
    // Return the tuple: (the result, the overflow flag).
    out.add(Res);
    return out.add(OverflowFlag);
  }

  if (Builtin.ID == BuiltinValueKind::SUCheckedConversion ||
      Builtin.ID == BuiltinValueKind::USCheckedConversion) {
    auto Ty =
      IGF.IGM.getStorageTypeForLowered(Builtin.Types[0]->getCanonicalType());

    // Report a sign error if the input parameter is a negative number, when
    // interpreted as signed.
    llvm::Value *Arg = args.claimNext();
    llvm::Value *Zero = llvm::ConstantInt::get(Ty, 0);
    llvm::Value *OverflowFlag = IGF.Builder.CreateICmpSLT(Arg, Zero);

    // Return the tuple: (the result (same as input), the overflow flag).
    out.add(Arg);
    return out.add(OverflowFlag);
  }

  // We are currently emitting code for '_convertFromBuiltinIntegerLiteral',
  // which will call the builtin and pass it a non-compile-time-const parameter.
  if (Builtin.ID == BuiltinValueKind::IntToFPWithOverflow) {
    auto ToTy =
      IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType());
    llvm::Value *Arg = args.claimNext();
    unsigned bitSize = Arg->getType()->getScalarSizeInBits();
    if (bitSize > 64) {
      // TODO: the integer literal bit size is 2048, but we only have a 64-bit
      // conversion function available (on all platforms).
      Arg = IGF.Builder.CreateTrunc(Arg, IGF.IGM.Int64Ty);
    } else if (bitSize < 64) {
      // Just for completeness. IntToFPWithOverflow is currently only used to
      // convert 2048 bit integer literals.
      Arg = IGF.Builder.CreateSExt(Arg, IGF.IGM.Int64Ty);
    }
    llvm::Value *V = IGF.Builder.CreateSIToFP(Arg, ToTy);
    return out.add(V);
  }

  if (Builtin.ID == BuiltinValueKind::Once
      || Builtin.ID == BuiltinValueKind::OnceWithContext) {
    // The input type is statically (Builtin.RawPointer, @convention(thin) () -> ()).
    llvm::Value *PredPtr = args.claimNext();
    // Cast the predicate to a OnceTy pointer.
    PredPtr = IGF.Builder.CreateBitCast(PredPtr, IGF.IGM.OnceTy->getPointerTo());
    llvm::Value *FnCode = args.claimNext();
    // Get the context if any.
    llvm::Value *Context;
    if (Builtin.ID == BuiltinValueKind::OnceWithContext) {
      Context = args.claimNext();
    } else {
      Context = llvm::UndefValue::get(IGF.IGM.Int8PtrTy);
    }
    
    // If we know the platform runtime's "done" value, emit the check inline.
    llvm::BasicBlock *doneBB = nullptr;

    if (auto ExpectedPred = IGF.IGM.TargetInfo.OnceDonePredicateValue) {
      auto PredValue = IGF.Builder.CreateLoad(PredPtr,
                                              IGF.IGM.getPointerAlignment());
      auto ExpectedPredValue = llvm::ConstantInt::getSigned(IGF.IGM.OnceTy,
                                                            *ExpectedPred);
      auto PredIsDone = IGF.Builder.CreateICmpEQ(PredValue, ExpectedPredValue);
      
      auto notDoneBB = IGF.createBasicBlock("once_not_done");
      doneBB = IGF.createBasicBlock("once_done");
      
      IGF.Builder.CreateCondBr(PredIsDone, doneBB, notDoneBB);
      IGF.Builder.emitBlock(notDoneBB);
    }
    
    // Emit the runtime "once" call.
    auto call
      = IGF.Builder.CreateCall(IGF.IGM.getOnceFn(), {PredPtr, FnCode, Context});
    call->setCallingConv(IGF.IGM.DefaultCC);
    
    // If we emitted the "done" check inline, join the branches.
    if (auto ExpectedPred = IGF.IGM.TargetInfo.OnceDonePredicateValue) {
      IGF.Builder.CreateBr(doneBB);
      IGF.Builder.emitBlock(doneBB);
      // We can assume the once predicate is in the "done" state now.
      auto PredValue = IGF.Builder.CreateLoad(PredPtr,
                                              IGF.IGM.getPointerAlignment());
      auto ExpectedPredValue = llvm::ConstantInt::getSigned(IGF.IGM.OnceTy,
                                                            *ExpectedPred);
      auto PredIsDone = IGF.Builder.CreateICmpEQ(PredValue, ExpectedPredValue);

      IGF.Builder.CreateAssumption(PredIsDone);
    }
    
    // No return value.
    return;
  }

  if (Builtin.ID == BuiltinValueKind::AssertConf) {
    // Replace the call to assert_configuration by the Debug configuration
    // value.
    // TODO: assert(IGF.IGM.getOptions().AssertConfig ==
    //              SILOptions::DisableReplacement);
    // Make sure this only happens in a mode where we build a library dylib.

    llvm::Value *DebugAssert = IGF.Builder.getInt32(SILOptions::Debug);
    out.add(DebugAssert);
    return;
  }
  
  if (Builtin.ID == BuiltinValueKind::DestroyArray) {
    // The input type is (T.Type, Builtin.RawPointer, Builtin.Word).
    /* metatype (which may be thin) */
    if (args.size() == 3)
      args.claimNext();
    llvm::Value *ptr = args.claimNext();
    llvm::Value *count = args.claimNext();
    
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    
    ptr = IGF.Builder.CreateBitCast(ptr,
                              valueTy.second.getStorageType()->getPointerTo());
    Address array = valueTy.second.getAddressForPointer(ptr);
    valueTy.second.destroyArray(IGF, array, count, valueTy.first);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::CopyArray ||
      Builtin.ID == BuiltinValueKind::TakeArrayNoAlias ||
      Builtin.ID == BuiltinValueKind::TakeArrayFrontToBack ||
      Builtin.ID == BuiltinValueKind::TakeArrayBackToFront ||
      Builtin.ID == BuiltinValueKind::AssignCopyArrayNoAlias ||
      Builtin.ID == BuiltinValueKind::AssignCopyArrayFrontToBack ||
      Builtin.ID == BuiltinValueKind::AssignCopyArrayBackToFront ||
      Builtin.ID == BuiltinValueKind::AssignTakeArray) {
    // The input type is (T.Type, Builtin.RawPointer, Builtin.RawPointer, Builtin.Word).
    /* metatype (which may be thin) */
    if (args.size() == 4)
      args.claimNext();
    llvm::Value *dest = args.claimNext();
    llvm::Value *src = args.claimNext();
    llvm::Value *count = args.claimNext();
    
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    
    dest = IGF.Builder.CreateBitCast(dest,
                               valueTy.second.getStorageType()->getPointerTo());
    src = IGF.Builder.CreateBitCast(src,
                               valueTy.second.getStorageType()->getPointerTo());
    Address destArray = valueTy.second.getAddressForPointer(dest);
    Address srcArray = valueTy.second.getAddressForPointer(src);
    
    switch (Builtin.ID) {
    case BuiltinValueKind::CopyArray:
      valueTy.second.initializeArrayWithCopy(IGF, destArray, srcArray, count,
                                             valueTy.first);
      break;
    case BuiltinValueKind::TakeArrayNoAlias:
      valueTy.second.initializeArrayWithTakeNoAlias(IGF, destArray, srcArray,
                                                    count, valueTy.first);
      break;
    case BuiltinValueKind::TakeArrayFrontToBack:
      valueTy.second.initializeArrayWithTakeFrontToBack(IGF, destArray, srcArray,
                                                        count, valueTy.first);
      break;
    case BuiltinValueKind::TakeArrayBackToFront:
      valueTy.second.initializeArrayWithTakeBackToFront(IGF, destArray, srcArray,
                                                        count, valueTy.first);
      break;
    case BuiltinValueKind::AssignCopyArrayNoAlias:
      valueTy.second.assignArrayWithCopyNoAlias(IGF, destArray, srcArray, count,
                                                valueTy.first);
      break;
    case BuiltinValueKind::AssignCopyArrayFrontToBack:
      valueTy.second.assignArrayWithCopyFrontToBack(IGF, destArray, srcArray,
                                                    count, valueTy.first);
      break;
    case BuiltinValueKind::AssignCopyArrayBackToFront:
      valueTy.second.assignArrayWithCopyBackToFront(IGF, destArray, srcArray,
                                                    count, valueTy.first);
      break;
    case BuiltinValueKind::AssignTakeArray:
      valueTy.second.assignArrayWithTake(IGF, destArray, srcArray, count,
                                         valueTy.first);
      break;
    default:
      llvm_unreachable("out of sync with if condition");
    }    
    return;
  }
  
  if (Builtin.ID == BuiltinValueKind::CondUnreachable) {
    // conditionallyUnreachable is a no-op by itself. Since it's noreturn, there
    // should be a true unreachable terminator right after.
    return;
  }
  
  if (Builtin.ID == BuiltinValueKind::ZeroInitializer) {
    // Build a zero initializer of the result type.
    auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
                                             substitutions[0].getReplacement());
    auto schema = valueTy.second.getSchema();
    for (auto &elt : schema) {
      out.add(llvm::Constant::getNullValue(elt.getScalarType()));
    }
    return;
  }
  
  if (Builtin.ID == BuiltinValueKind::GetObjCTypeEncoding) {
    (void)args.claimAll();
    Type valueTy = substitutions[0].getReplacement();
    // Get the type encoding for the associated clang type.
    auto clangTy = IGF.IGM.getClangType(valueTy->getCanonicalType());
    std::string encoding;
    IGF.IGM.getClangASTContext().getObjCEncodingForType(clangTy, encoding);
    
    auto globalString = IGF.IGM.getAddrOfGlobalString(encoding);
    out.add(globalString);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::TSanInoutAccess) {
    auto address = args.claimNext();
    IGF.emitTSanInoutAccessCall(address);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::Swift3ImplicitObjCEntrypoint) {
    llvm::Value *entrypointArgs[7];
    auto argIter = IGF.CurFn->arg_begin();

    // self
    entrypointArgs[0] = &*argIter++;
    if (entrypointArgs[0]->getType() != IGF.IGM.ObjCPtrTy)
      entrypointArgs[0] = IGF.Builder.CreateBitCast(entrypointArgs[0], IGF.IGM.ObjCPtrTy);

    // _cmd
    entrypointArgs[1] = &*argIter;
    if (entrypointArgs[1]->getType() != IGF.IGM.ObjCSELTy)
      entrypointArgs[1] = IGF.Builder.CreateBitCast(entrypointArgs[1], IGF.IGM.ObjCSELTy);
    
    // Filename pointer
    entrypointArgs[2] = args.claimNext();
    // Filename length
    entrypointArgs[3] = args.claimNext();
    // Line
    entrypointArgs[4] = args.claimNext();
    // Column
    entrypointArgs[5] = args.claimNext();
    
    // Create a flag variable so that this invocation logs only once.
    auto flagStorageTy = llvm::ArrayType::get(IGF.IGM.Int8Ty,
                                        IGF.IGM.getAtomicBoolSize().getValue());
    auto flag = new llvm::GlobalVariable(IGF.IGM.Module, flagStorageTy,
                               /*constant*/ false,
                               llvm::GlobalValue::PrivateLinkage,
                               llvm::ConstantAggregateZero::get(flagStorageTy));
    flag->setAlignment(IGF.IGM.getAtomicBoolAlignment().getValue());
    entrypointArgs[6] = llvm::ConstantExpr::getBitCast(flag, IGF.IGM.Int8PtrTy);

    IGF.Builder.CreateCall(IGF.IGM.getSwift3ImplicitObjCEntrypointFn(),
                           entrypointArgs);
    return;
  }

  if (Builtin.ID == BuiltinValueKind::IsSameMetatype) {
    auto metatypeLHS = args.claimNext();
    auto metatypeRHS = args.claimNext();
    (void)args.claimAll();
    llvm::Value *metatypeLHSCasted =
        IGF.Builder.CreateBitCast(metatypeLHS, IGF.IGM.Int8PtrTy);
    llvm::Value *metatypeRHSCasted =
        IGF.Builder.CreateBitCast(metatypeRHS, IGF.IGM.Int8PtrTy);

    out.add(IGF.Builder.CreateICmpEQ(metatypeLHSCasted, metatypeRHSCasted));
    return;
  }

  llvm_unreachable("IRGen unimplemented for this builtin!");
}