/// Combine counts of regions which cover the same area. static ArrayRef<CountedRegion> combineRegions(MutableArrayRef<CountedRegion> Regions) { if (Regions.empty()) return Regions; auto Active = Regions.begin(); auto End = Regions.end(); for (auto I = Regions.begin() + 1; I != End; ++I) { if (Active->startLoc() != I->startLoc() || Active->endLoc() != I->endLoc()) { // Shift to the next region. ++Active; if (Active != I) *Active = *I; continue; } // Merge duplicate region. // If CodeRegions and ExpansionRegions cover the same area, it's probably // a macro which is fully expanded to another macro. In that case, we need // to accumulate counts only from CodeRegions, or else the area will be // counted twice. // On the other hand, a macro may have a nested macro in its body. If the // outer macro is used several times, the ExpansionRegion for the nested // macro will also be added several times. These ExpansionRegions cover // the same source locations and have to be combined to reach the correct // value for that area. // We add counts of the regions of the same kind as the active region // to handle the both situations. if (I->Kind == Active->Kind) Active->ExecutionCount += I->ExecutionCount; } return Regions.drop_back(std::distance(++Active, End)); }
std::error_code ByteStream::readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer) const { if (Data.size() < Buffer.size() + Offset) return std::make_error_code(std::errc::bad_address); ::memcpy(Buffer.data(), Data.data() + Offset, Buffer.size()); return std::error_code(); }
Error MappedBlockStream::readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer) const { uint32_t BlockNum = Offset / Pdb.getBlockSize(); uint32_t OffsetInBlock = Offset % Pdb.getBlockSize(); // Make sure we aren't trying to read beyond the end of the stream. if (Buffer.size() > Data->getLength()) return make_error<RawError>(raw_error_code::insufficient_buffer); if (Offset > Data->getLength() - Buffer.size()) return make_error<RawError>(raw_error_code::insufficient_buffer); uint32_t BytesLeft = Buffer.size(); uint32_t BytesWritten = 0; uint8_t *WriteBuffer = Buffer.data(); auto BlockList = Data->getStreamBlocks(); while (BytesLeft > 0) { uint32_t StreamBlockAddr = BlockList[BlockNum]; auto Data = Pdb.getBlockData(StreamBlockAddr, Pdb.getBlockSize()); const uint8_t *ChunkStart = Data.data() + OffsetInBlock; uint32_t BytesInChunk = std::min(BytesLeft, Pdb.getBlockSize() - OffsetInBlock); ::memcpy(WriteBuffer + BytesWritten, ChunkStart, BytesInChunk); BytesWritten += BytesInChunk; BytesLeft -= BytesInChunk; ++BlockNum; OffsetInBlock = 0; } return Error::success(); }
Error MappedBlockStream::readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer) const { uint32_t BlockNum = Offset / BlockSize; uint32_t OffsetInBlock = Offset % BlockSize; // Make sure we aren't trying to read beyond the end of the stream. if (Buffer.size() > StreamLayout.Length) return make_error<MSFError>(msf_error_code::insufficient_buffer); if (Offset > StreamLayout.Length - Buffer.size()) return make_error<MSFError>(msf_error_code::insufficient_buffer); uint32_t BytesLeft = Buffer.size(); uint32_t BytesWritten = 0; uint8_t *WriteBuffer = Buffer.data(); while (BytesLeft > 0) { uint32_t StreamBlockAddr = StreamLayout.Blocks[BlockNum]; ArrayRef<uint8_t> BlockData; uint32_t Offset = blockToOffset(StreamBlockAddr, BlockSize); if (auto EC = MsfData.readBytes(Offset, BlockSize, BlockData)) return EC; const uint8_t *ChunkStart = BlockData.data() + OffsetInBlock; uint32_t BytesInChunk = std::min(BytesLeft, BlockSize - OffsetInBlock); ::memcpy(WriteBuffer + BytesWritten, ChunkStart, BytesInChunk); BytesWritten += BytesInChunk; BytesLeft -= BytesInChunk; ++BlockNum; OffsetInBlock = 0; } return Error::success(); }
Error ByteStream::readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer) const { if (Data.size() < Buffer.size() + Offset) return make_error<RawError>(raw_error_code::insufficient_buffer); ::memcpy(Buffer.data(), Data.data() + Offset, Buffer.size()); return Error::success(); }
static void mergeSymbolRecords(BumpPtrAllocator &Alloc, ObjectFile *File, ArrayRef<TypeIndex> TypeIndexMap, BinaryStreamRef SymData) { // FIXME: Improve error recovery by warning and skipping records when // possible. CVSymbolArray Syms; BinaryStreamReader Reader(SymData); ExitOnErr(Reader.readArray(Syms, Reader.getLength())); for (const CVSymbol &Sym : Syms) { // Discover type index references in the record. Skip it if we don't know // where they are. SmallVector<TiReference, 32> TypeRefs; if (!discoverTypeIndices(Sym, TypeRefs)) { log("ignoring unknown symbol record with kind 0x" + utohexstr(Sym.kind())); continue; } // Copy the symbol record so we can mutate it. MutableArrayRef<uint8_t> NewData = copySymbolForPdb(Sym, Alloc); // Re-map all the type index references. MutableArrayRef<uint8_t> Contents = NewData.drop_front(sizeof(RecordPrefix)); if (!remapTypesInSymbolRecord(File, Contents, TypeIndexMap, TypeRefs)) continue; // FIXME: Fill in "Parent" and "End" fields by maintaining a stack of // scopes. // Add the symbol to the module. File->ModuleDBI->addSymbol(CVSymbol(Sym.kind(), NewData)); } }
static Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Src, MutableArrayRef<uint8_t> Dest) { if (Dest.size() < Src.size()) return make_error<CodeViewError>(cv_error_code::insufficient_buffer); if (Offset > Src.size() - Dest.size()) return make_error<CodeViewError>(cv_error_code::insufficient_buffer); ::memcpy(Dest.data() + Offset, Src.data(), Src.size()); return Error::success(); }
static StringRef toUTF8(UTF32 C, MutableArrayRef<UTF8> Storage) { const UTF32 *Begin32 = &C; UTF8 *Begin8 = Storage.begin(); // The case-folded output should always be a valid unicode character, so use // strict mode here. ConversionResult CR = ConvertUTF32toUTF8(&Begin32, &C + 1, &Begin8, Storage.end(), strictConversion); assert(CR == conversionOK && "Case folding produced invalid char?"); (void)CR; return StringRef(reinterpret_cast<char *>(Storage.begin()), Begin8 - Storage.begin()); }
void SILInstruction::dropAllReferences() { MutableArrayRef<Operand> PossiblyDeadOps = getAllOperands(); for (auto OpI = PossiblyDeadOps.begin(), OpE = PossiblyDeadOps.end(); OpI != OpE; ++OpI) { OpI->drop(); } // If we have a function ref inst, we need to especially drop its function // argument so that it gets a proper ref decrement. auto *FRI = dyn_cast<FunctionRefInst>(this); if (!FRI || !FRI->getReferencedFunction()) return; FRI->dropReferencedFunction(); }
void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef<char> Data, uint64_t Value, bool IsResolved) const { unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); if (!Value) return; // Doesn't change encoding. MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); MCContext &Ctx = Asm.getContext(); // Apply any target-specific value adjustments. Value = adjustFixupValue(Fixup, Value, Ctx); // Shift the value into position. Value <<= Info.TargetOffset; unsigned Offset = Fixup.getOffset(); assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); // Used to point to big endian bytes. unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind()); // For each byte of the fragment that the fixup touches, mask in the // bits from the fixup value. if (FulleSizeInBytes == 0) { // Handle as little-endian for (unsigned i = 0; i != NumBytes; ++i) { Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); } } else {
void RISCVAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef<char> Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const { MCContext &Ctx = Asm.getContext(); MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); if (!Value) return; // Doesn't change encoding. // Apply any target-specific value adjustments. Value = adjustFixupValue(Fixup, Value, Ctx); // Shift the value into position. Value <<= Info.TargetOffset; unsigned Offset = Fixup.getOffset(); unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8; assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); // For each byte of the fragment that the fixup touches, mask in the // bits from the fixup value. for (unsigned i = 0; i != NumBytes; ++i) { Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); } }
size_t AsmLexer::peekTokens(MutableArrayRef<AsmToken> Buf, bool ShouldSkipSpace) { const char *SavedTokStart = TokStart; const char *SavedCurPtr = CurPtr; bool SavedAtStartOfLine = IsAtStartOfLine; bool SavedAtStartOfStatement = IsAtStartOfStatement; bool SavedSkipSpace = SkipSpace; std::string SavedErr = getErr(); SMLoc SavedErrLoc = getErrLoc(); SkipSpace = ShouldSkipSpace; size_t ReadCount; for (ReadCount = 0; ReadCount < Buf.size(); ++ReadCount) { AsmToken Token = LexToken(); Buf[ReadCount] = Token; if (Token.is(AsmToken::Eof)) break; } SetError(SavedErrLoc, SavedErr); SkipSpace = SavedSkipSpace; IsAtStartOfLine = SavedAtStartOfLine; IsAtStartOfStatement = SavedAtStartOfStatement; CurPtr = SavedCurPtr; TokStart = SavedTokStart; return ReadCount; }
void ABISignature::expand(GenIR &Reader, ArrayRef<ABIArgInfo::Expansion> Expansions, Value *Source, MutableArrayRef<Value *> Values, MutableArrayRef<Type *> Types, bool IsResult) { assert(Source != nullptr); assert(Source->getType()->isPointerTy()); assert(Reader.doesValueRepresentStruct(Source)); assert(Expansions.size() > 0); assert((IsResult && Values.size() == 1) || (Values.size() == Expansions.size())); LLVMContext &LLVMContext = *Reader.JitContext->LLVMContext; IRBuilder<> &Builder = *Reader.LLVMBuilder; Type *ResultType = nullptr; Value *ResultValue = nullptr; if (IsResult) { ResultType = getExpandedResultType(LLVMContext, Expansions); ResultValue = Constant::getNullValue(ResultType); } Type *BytePtrTy = Type::getInt8PtrTy(LLVMContext, 0); Value *SourcePtr = Builder.CreatePointerCast(Source, BytePtrTy); for (int32_t I = 0; I < static_cast<int32_t>(Expansions.size()); I++) { const ABIArgInfo::Expansion &Exp = Expansions[I]; Value *LoadPtr = Builder.CreateConstGEP1_32(SourcePtr, Exp.Offset); LoadPtr = Builder.CreatePointerCast(LoadPtr, Exp.TheType->getPointerTo(0)); const bool IsVolatile = false; Value *Value = Builder.CreateLoad(LoadPtr, IsVolatile); if (IsResult) { ResultValue = Builder.CreateInsertValue(ResultValue, Value, I); } else { Values[I] = Value; } if (Types.size() > 0) { Types[I] = Exp.TheType; } } if (IsResult) { Values[0] = ResultValue; } }
static void withValueInPayload(IRGenFunction &IGF, const EnumPayload &payload, llvm::Type *valueType, int numBitsUsedInValue, unsigned payloadOffset, Fn &&f) { auto &DataLayout = IGF.IGM.DataLayout; int valueTypeBitWidth = DataLayout.getTypeSizeInBits(valueType); int valueBitWidth = numBitsUsedInValue < 0 ? valueTypeBitWidth : numBitsUsedInValue; assert(numBitsUsedInValue <= valueTypeBitWidth); // Find the elements we need to touch. // TODO: Linear search through the payload elements is lame. MutableArrayRef<EnumPayload::LazyValue> payloads = payload.PayloadValues; llvm::Type *payloadType; int payloadBitWidth; int valueOffset = 0, payloadValueOffset = payloadOffset; for (;;) { payloadType = getPayloadType(payloads.front()); payloadBitWidth = IGF.IGM.DataLayout.getTypeSizeInBits(payloadType); // Does this element overlap the area we need to touch? if (payloadValueOffset < payloadBitWidth) { // See how much of the value we can fit here. int valueChunkWidth = payloadBitWidth - payloadValueOffset; valueChunkWidth = std::min(valueChunkWidth, valueBitWidth - valueOffset); f(payloads.front(), payloadBitWidth, payloadValueOffset, valueTypeBitWidth, valueOffset); // If we used the entire value, we're done. valueOffset += valueChunkWidth; if (valueOffset >= valueBitWidth) return; } payloadValueOffset = std::max(payloadValueOffset - payloadBitWidth, 0); payloads = payloads.slice(1); } }
static bool remapTypesInSymbolRecord(ObjectFile *File, MutableArrayRef<uint8_t> Contents, ArrayRef<TypeIndex> TypeIndexMap, ArrayRef<TiReference> TypeRefs) { for (const TiReference &Ref : TypeRefs) { unsigned ByteSize = Ref.Count * sizeof(TypeIndex); if (Contents.size() < Ref.Offset + ByteSize) { log("ignoring short symbol record"); return false; } MutableArrayRef<TypeIndex> TIs( reinterpret_cast<TypeIndex *>(Contents.data() + Ref.Offset), Ref.Count); for (TypeIndex &TI : TIs) if (!remapTypeIndex(TI, TypeIndexMap)) { log("ignoring symbol record in " + File->getName() + " with bad type index 0x" + utohexstr(TI.getIndex())); return false; } } return true; }
/// Sort a nested sequence of regions from a single file. static void sortNestedRegions(MutableArrayRef<CountedRegion> Regions) { std::sort(Regions.begin(), Regions.end(), [](const CountedRegion &LHS, const CountedRegion &RHS) { if (LHS.startLoc() != RHS.startLoc()) return LHS.startLoc() < RHS.startLoc(); if (LHS.endLoc() != RHS.endLoc()) // When LHS completely contains RHS, we sort LHS first. return RHS.endLoc() < LHS.endLoc(); // If LHS and RHS cover the same area, we need to sort them according // to their kinds so that the most suitable region will become "active" // in combineRegions(). Because we accumulate counter values only from // regions of the same kind as the first region of the area, prefer // CodeRegion to ExpansionRegion and ExpansionRegion to SkippedRegion. static_assert(CounterMappingRegion::CodeRegion < CounterMappingRegion::ExpansionRegion && CounterMappingRegion::ExpansionRegion < CounterMappingRegion::SkippedRegion, "Unexpected order of region kind values"); return LHS.Kind < RHS.Kind; }); }
/// Prepare an Initialization that will initialize the result of the /// current function. /// /// \param directResultsBuffer - will be filled with the direct /// components of the result /// \param cleanups - will be filled (after initialization completes) /// with all the active cleanups managing the result values static std::unique_ptr<Initialization> prepareIndirectResultInit(SILGenFunction &gen, CanType formalResultType, SmallVectorImpl<SILValue> &directResultsBuffer, SmallVectorImpl<CleanupHandle> &cleanups) { auto fnType = gen.F.getLoweredFunctionType(); // Make space in the direct-results array for all the entries we need. directResultsBuffer.append(fnType->getNumDirectResults(), SILValue()); ArrayRef<SILResultInfo> allResults = fnType->getAllResults(); MutableArrayRef<SILValue> directResults = directResultsBuffer; ArrayRef<SILArgument*> indirectResultAddrs = gen.F.getIndirectResults(); auto init = prepareIndirectResultInit(gen, formalResultType, allResults, directResults, indirectResultAddrs, cleanups); assert(allResults.empty()); assert(directResults.empty()); assert(indirectResultAddrs.empty()); return init; }
static InitializationPtr prepareIndirectResultInit(SILGenFunction &gen, CanType resultType, ArrayRef<SILResultInfo> &allResults, MutableArrayRef<SILValue> &directResults, ArrayRef<SILArgument*> &indirectResultAddrs, SmallVectorImpl<CleanupHandle> &cleanups) { // Recursively decompose tuple types. if (auto resultTupleType = dyn_cast<TupleType>(resultType)) { auto tupleInit = new TupleInitialization(); tupleInit->SubInitializations.reserve(resultTupleType->getNumElements()); for (auto resultEltType : resultTupleType.getElementTypes()) { auto eltInit = prepareIndirectResultInit(gen, resultEltType, allResults, directResults, indirectResultAddrs, cleanups); tupleInit->SubInitializations.push_back(std::move(eltInit)); } return InitializationPtr(tupleInit); } // Okay, pull the next result off the list of results. auto result = allResults[0]; allResults = allResults.slice(1); // If it's indirect, we should be emitting into an argument. if (result.isIndirect()) { // Pull off the next indirect result argument. SILValue addr = indirectResultAddrs.front(); indirectResultAddrs = indirectResultAddrs.slice(1); // Create an initialization which will initialize it. auto &resultTL = gen.getTypeLowering(addr->getType()); auto temporary = gen.useBufferAsTemporary(addr, resultTL); // Remember the cleanup that will be activated. auto cleanup = temporary->getInitializedCleanup(); if (cleanup.isValid()) cleanups.push_back(cleanup); return InitializationPtr(temporary.release()); } // Otherwise, make an Initialization that stores the value in the // next element of the directResults array. auto init = new StoreResultInitialization(directResults[0], cleanups); directResults = directResults.slice(1); return InitializationPtr(init); }
void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef<char> Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const { Value = adjustFixupValue(Fixup, Value, &Asm.getContext()); if (!Value) return; // Doesn't change encoding. MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); // Shift the value into position. Value <<= Info.TargetOffset; unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); uint32_t Offset = Fixup.getOffset(); assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); // For each byte of the fragment that the fixup touches, mask in the bits from // the fixup value. for (unsigned i = 0; i != NumBytes; ++i) Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff); }
CharTypeSet calculateRoles(StringRef Text, MutableArrayRef<CharRole> Roles) { assert(Text.size() == Roles.size()); if (Text.size() == 0) return 0; CharType Type = packedLookup<CharType>(CharTypes, Text[0]); CharTypeSet TypeSet = 1 << Type; // Types holds a sliding window of (Prev, Curr, Next) types. // Initial value is (Empty, Empty, type of Text[0]). int Types = Type; // Rotate slides in the type of the next character. auto Rotate = [&](CharType T) { Types = ((Types << 2) | T) & 0x3f; }; for (unsigned I = 0; I < Text.size() - 1; ++I) { // For each character, rotate in the next, and look up the role. Type = packedLookup<CharType>(CharTypes, Text[I + 1]); TypeSet |= 1 << Type; Rotate(Type); Roles[I] = packedLookup<CharRole>(CharRoles, Types); } // For the last character, the "next character" is Empty. Rotate(Empty); Roles[Text.size() - 1] = packedLookup<CharRole>(CharRoles, Types); return TypeSet; }
void Scope::popPreservingValues(ArrayRef<ManagedValue> innerValues, MutableArrayRef<ManagedValue> outerValues) { auto &SGF = cleanups.SGF; assert(innerValues.size() == outerValues.size()); // Record the cleanup information for each preserved value and deactivate its // cleanup. SmallVector<CleanupCloner, 4> cleanups; cleanups.reserve(innerValues.size()); for (auto &mv : innerValues) { cleanups.emplace_back(SGF, mv); mv.forward(SGF); } // Pop any unpreserved cleanups. pop(); // Create a managed value for each preserved value, cloning its cleanup. // Since the CleanupCloner does not remember its SILValue, grab it from the // original, now-deactivated managed value. for (auto index : indices(innerValues)) { outerValues[index] = cleanups[index].clone(innerValues[index].getValue()); } }
size_t AsmLexer::peekTokens(MutableArrayRef<AsmToken> Buf, bool ShouldSkipSpace) { SaveAndRestore<const char *> SavedTokenStart(TokStart); SaveAndRestore<const char *> SavedCurPtr(CurPtr); SaveAndRestore<bool> SavedAtStartOfLine(IsAtStartOfLine); SaveAndRestore<bool> SavedAtStartOfStatement(IsAtStartOfStatement); SaveAndRestore<bool> SavedSkipSpace(SkipSpace, ShouldSkipSpace); SaveAndRestore<bool> SavedIsPeeking(IsPeeking, true); std::string SavedErr = getErr(); SMLoc SavedErrLoc = getErrLoc(); size_t ReadCount; for (ReadCount = 0; ReadCount < Buf.size(); ++ReadCount) { AsmToken Token = LexToken(); Buf[ReadCount] = Token; if (Token.is(AsmToken::Eof)) break; } SetError(SavedErrLoc, SavedErr); return ReadCount; }
static void sortSections(MutableArrayRef<InputSection *> Vec, SortSectionPolicy K) { if (K != SortSectionPolicy::Default && K != SortSectionPolicy::None) std::stable_sort(Vec.begin(), Vec.end(), getComparator(K)); }
bool swift::omitNeedlessWords(StringRef &baseName, MutableArrayRef<StringRef> argNames, StringRef firstParamName, OmissionTypeName resultType, OmissionTypeName contextType, ArrayRef<OmissionTypeName> paramTypes, bool returnsSelf, bool isProperty, const InheritedNameSet *allPropertyNames, StringScratchSpace &scratch) { bool anyChanges = false; /// Local function that lowercases all of the base names and /// argument names before returning. auto lowercaseAcronymsForReturn = [&] { StringRef newBaseName = toLowercaseWordAndAcronym(baseName, scratch); if (baseName.data() != newBaseName.data()) { baseName = newBaseName; anyChanges = true; } for (StringRef &argName : argNames) { StringRef newArgName = toLowercaseWordAndAcronym(argName, scratch); if (argName.data() != newArgName.data()) { argName = newArgName; anyChanges = true; } } return anyChanges; }; // If the result type matches the context, remove the context type from the // prefix of the name. bool resultTypeMatchesContext = returnsSelf || (resultType == contextType); if (resultTypeMatchesContext) { StringRef newBaseName = omitNeedlessWordsFromPrefix(baseName, contextType, scratch); if (newBaseName != baseName) { baseName = newBaseName; anyChanges = true; } } // Strip the context type from the base name of a method. if (!isProperty) { StringRef newBaseName = ::omitNeedlessWords(baseName, contextType, NameRole::BaseNameSelf, nullptr, scratch); if (newBaseName != baseName) { baseName = newBaseName; anyChanges = true; } } if (paramTypes.empty()) { if (resultTypeMatchesContext) { StringRef newBaseName = ::omitNeedlessWords( baseName, returnsSelf ? contextType : resultType, NameRole::Property, allPropertyNames, scratch); if (newBaseName != baseName) { baseName = newBaseName; anyChanges = true; } } return lowercaseAcronymsForReturn(); } // Omit needless words based on parameter types. for (unsigned i = 0, n = argNames.size(); i != n; ++i) { // If there is no corresponding parameter, there is nothing to // omit. if (i >= paramTypes.size()) continue; // Omit needless words based on the type of the parameter. NameRole role = i > 0 ? NameRole::SubsequentParameter : argNames[0].empty() ? NameRole::BaseName : NameRole::FirstParameter; // Omit needless words from the name. StringRef name = role == NameRole::BaseName ? baseName : argNames[i]; StringRef newName = ::omitNeedlessWords(name, paramTypes[i], role, role == NameRole::BaseName ? allPropertyNames : nullptr, scratch); // Did the name change? if (name != newName) anyChanges = true; // If the first parameter has a default argument, and there is a // preposition in the base name, split the base name at that preposition. if (role == NameRole::BaseName && argNames[0].empty() && paramTypes[0].hasDefaultArgument()) { // Scan backwards for a preposition. auto nameWords = camel_case::getWords(newName); auto nameWordRevIter = nameWords.rbegin(), nameWordRevIterEnd = nameWords.rend(); bool found = false, done = false; while (nameWordRevIter != nameWordRevIterEnd && !done) { switch (getPartOfSpeech(*nameWordRevIter)) { case PartOfSpeech::Preposition: found = true; done = true; break; case PartOfSpeech::Verb: case PartOfSpeech::Gerund: // Don't skip over verbs or gerunds. done = true; break; case PartOfSpeech::Unknown: ++nameWordRevIter; break; } } // If we found a split point that's not at the beginning of the // name, split there. if (found) { ++nameWordRevIter; unsigned splitPos = nameWordRevIter.base().getPosition(); if (splitPos > 0) { unsigned afterSplitPos = splitPos; // Create a first argument name with the remainder of the base name, // lowercased. If we would end up with a vacuous name, go // back and get the original. StringRef newArgName = newName.substr(afterSplitPos); if (isVacuousName(newArgName)) { size_t pos = name.rfind(newArgName); newArgName = name.substr(pos); } // If there is a leading "with" on the first argument, drop it. if (newArgName.size() > 4 && camel_case::sameWordIgnoreFirstCase( camel_case::getFirstWord(newArgName), "with")) { newArgName = newArgName.substr(4); } argNames[0] = toLowercaseWord(newArgName, scratch); // Update the base name by splitting at the preposition. newName = newName.substr(0, splitPos); anyChanges = true; } } } if (name == newName) continue; // Record this change. if (role == NameRole::BaseName) { baseName = newName; } else { argNames[i] = newName; } } return lowercaseAcronymsForReturn(); }
static void emitSubSwitch(IRGenFunction &IGF, MutableArrayRef<EnumPayload::LazyValue> values, APInt mask, MutableArrayRef<std::pair<APInt, llvm::BasicBlock *>> cases, SwitchDefaultDest dflt) { recur: assert(!values.empty() && "didn't exit out when exhausting all values?!"); assert(!cases.empty() && "switching with no cases?!"); auto &DL = IGF.IGM.DataLayout; auto &pv = values.front(); values = values.slice(1); auto payloadTy = getPayloadType(pv); unsigned size = DL.getTypeSizeInBits(payloadTy); // Grab a chunk of the mask. auto maskPiece = mask.zextOrTrunc(size); mask = mask.lshr(size); // If the piece is zero, this doesn't affect the switch. We can just move // forward and recur. if (maskPiece == 0) { for (auto &casePair : cases) casePair.first = casePair.first.lshr(size); goto recur; } // Force the value we will test. auto v = forcePayloadValue(pv); auto payloadIntTy = llvm::IntegerType::get(IGF.IGM.getLLVMContext(), size); // Need to coerce to integer for 'icmp eq' if it's not already an integer // or pointer. (Switching or masking will also require a cast to integer.) if (!isa<llvm::IntegerType>(v->getType()) && !isa<llvm::PointerType>(v->getType())) v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy); // Apply the mask if it's interesting. if (!maskPiece.isAllOnesValue()) { v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy); auto maskConstant = llvm::ConstantInt::get(payloadIntTy, maskPiece); v = IGF.Builder.CreateAnd(v, maskConstant); } // Gather the values we will switch over for this payload chunk. // FIXME: std::map is lame. Should hash APInts. std::map<APInt, SmallVector<std::pair<APInt, llvm::BasicBlock*>, 2>, ult> subCases; for (auto casePair : cases) { // Grab a chunk of the value. auto valuePiece = casePair.first.zextOrTrunc(size); // Index the case according to this chunk. subCases[valuePiece].push_back({std::move(casePair.first).lshr(size), casePair.second}); } bool needsAdditionalCases = !values.empty() && mask != 0; SmallVector<std::pair<llvm::BasicBlock *, decltype(cases)>, 2> recursiveCases; auto blockForCases = [&](MutableArrayRef<std::pair<APInt, llvm::BasicBlock*>> cases) -> llvm::BasicBlock * { // If we need to recur, emit a new block. if (needsAdditionalCases) { auto newBB = IGF.createBasicBlock(""); recursiveCases.push_back({newBB, cases}); return newBB; } // Otherwise, we can jump directly to the ultimate destination. assert(cases.size() == 1 && "more than one case for final destination?!"); return cases.front().second; }; // If there's only one case, do a cond_br. if (subCases.size() == 1) { auto &subCase = *subCases.begin(); llvm::BasicBlock *block = blockForCases(subCase.second); // If the default case is unreachable, we don't need to conditionally // branch. if (dflt.getInt()) { IGF.Builder.CreateBr(block); goto next; } auto &valuePiece = subCase.first; llvm::Value *valueConstant = llvm::ConstantInt::get(payloadIntTy, valuePiece); valueConstant = IGF.Builder.CreateBitOrPointerCast(valueConstant, v->getType()); auto cmp = IGF.Builder.CreateICmpEQ(v, valueConstant); IGF.Builder.CreateCondBr(cmp, block, dflt.getPointer()); goto next; } // Otherwise, do a switch. { v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy); auto swi = IGF.Builder.CreateSwitch(v, dflt.getPointer(), subCases.size()); for (auto &subCase : subCases) { auto &valuePiece = subCase.first; auto valueConstant = llvm::ConstantInt::get(IGF.IGM.getLLVMContext(), valuePiece); swi->addCase(valueConstant, blockForCases(subCase.second)); } } next: // Emit the recursive cases. for (auto &recursive : recursiveCases) { IGF.Builder.emitBlock(recursive.first); emitSubSwitch(IGF, values, mask, recursive.second, dflt); } }
WritableBinaryStreamRef::WritableBinaryStreamRef(MutableArrayRef<uint8_t> Data, endianness Endian) : BinaryStreamRefBase(std::make_shared<MutableArrayRefImpl>(Data, Endian), 0, Data.size()) {}
bool TargetInfo::validateInputConstraint( MutableArrayRef<ConstraintInfo> OutputConstraints, ConstraintInfo &Info) const { const char *Name = Info.ConstraintStr.c_str(); if (!*Name) return false; while (*Name) { switch (*Name) { default: // Check if we have a matching constraint if (*Name >= '0' && *Name <= '9') { const char *DigitStart = Name; while (Name[1] >= '0' && Name[1] <= '9') Name++; const char *DigitEnd = Name; unsigned i; if (StringRef(DigitStart, DigitEnd - DigitStart + 1) .getAsInteger(10, i)) return false; // Check if matching constraint is out of bounds. if (i >= OutputConstraints.size()) return false; // A number must refer to an output only operand. if (OutputConstraints[i].isReadWrite()) return false; // If the constraint is already tied, it must be tied to the // same operand referenced to by the number. if (Info.hasTiedOperand() && Info.getTiedOperand() != i) return false; // The constraint should have the same info as the respective // output constraint. Info.setTiedOperand(i, OutputConstraints[i]); } else if (!validateAsmConstraint(Name, Info)) { // FIXME: This error return is in place temporarily so we can // add more constraints as we hit it. Eventually, an unknown // constraint should just be treated as 'g'. return false; } break; case '[': { unsigned Index = 0; if (!resolveSymbolicName(Name, OutputConstraints, Index)) return false; // If the constraint is already tied, it must be tied to the // same operand referenced to by the number. if (Info.hasTiedOperand() && Info.getTiedOperand() != Index) return false; // A number must refer to an output only operand. if (OutputConstraints[Index].isReadWrite()) return false; Info.setTiedOperand(Index, OutputConstraints[Index]); break; } case '%': // commutative // FIXME: Fail if % is used with the last operand. break; case 'i': // immediate integer. case 'n': // immediate integer with a known value. break; case 'I': // Various constant constraints with target-specific meanings. case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': if (!validateAsmConstraint(Name, Info)) return false; break; case 'r': // general register. Info.setAllowsRegister(); break; case 'm': // memory operand. case 'o': // offsettable memory operand. case 'V': // non-offsettable memory operand. case '<': // autodecrement memory operand. case '>': // autoincrement memory operand. Info.setAllowsMemory(); break; case 'g': // general register, memory operand or immediate integer. case 'X': // any operand. Info.setAllowsRegister(); Info.setAllowsMemory(); break; case 'E': // immediate floating point. case 'F': // immediate floating point. case 'p': // address operand. break; case ',': // multiple alternative constraint. Ignore comma. break; case '#': // Ignore as constraint. while (Name[1] && Name[1] != ',') Name++; break; case '?': // Disparage slightly code. case '!': // Disparage severely. case '*': // Ignore for choosing register preferences. break; // Pass them. } Name++; } return true; }
/// \brief Invert the 1-[0/1] mapping of diags to group into a one to many /// mapping of groups to diags in the group. static void groupDiagnostics(const std::vector<Record*> &Diags, const std::vector<Record*> &DiagGroups, std::map<std::string, GroupInfo> &DiagsInGroup) { for (unsigned i = 0, e = Diags.size(); i != e; ++i) { const Record *R = Diags[i]; DefInit *DI = dyn_cast<DefInit>(R->getValueInit("Group")); if (!DI) continue; assert(R->getValueAsDef("Class")->getName() != "CLASS_NOTE" && "Note can't be in a DiagGroup"); std::string GroupName = DI->getDef()->getValueAsString("GroupName"); DiagsInGroup[GroupName].DiagsInGroup.push_back(R); } typedef SmallPtrSet<GroupInfo *, 16> GroupSetTy; GroupSetTy ImplicitGroups; // Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty // groups (these are warnings that GCC supports that clang never produces). for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) { Record *Group = DiagGroups[i]; GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")]; if (Group->isAnonymous()) { if (GI.DiagsInGroup.size() > 1) ImplicitGroups.insert(&GI); } else { if (GI.ExplicitDef) assert(GI.ExplicitDef == Group); else GI.ExplicitDef = Group; } std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups"); for (unsigned j = 0, e = SubGroups.size(); j != e; ++j) GI.SubGroups.push_back(SubGroups[j]->getValueAsString("GroupName")); } // Assign unique ID numbers to the groups. unsigned IDNo = 0; for (std::map<std::string, GroupInfo>::iterator I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I, ++IDNo) I->second.IDNo = IDNo; // Sort the implicit groups, so we can warn about them deterministically. SmallVector<GroupInfo *, 16> SortedGroups(ImplicitGroups.begin(), ImplicitGroups.end()); for (SmallVectorImpl<GroupInfo *>::iterator I = SortedGroups.begin(), E = SortedGroups.end(); I != E; ++I) { MutableArrayRef<const Record *> GroupDiags = (*I)->DiagsInGroup; std::sort(GroupDiags.begin(), GroupDiags.end(), beforeThanCompare); } std::sort(SortedGroups.begin(), SortedGroups.end(), beforeThanCompareGroups); // Warn about the same group being used anonymously in multiple places. for (SmallVectorImpl<GroupInfo *>::const_iterator I = SortedGroups.begin(), E = SortedGroups.end(); I != E; ++I) { ArrayRef<const Record *> GroupDiags = (*I)->DiagsInGroup; if ((*I)->ExplicitDef) { std::string Name = (*I)->ExplicitDef->getValueAsString("GroupName"); for (ArrayRef<const Record *>::const_iterator DI = GroupDiags.begin(), DE = GroupDiags.end(); DI != DE; ++DI) { const DefInit *GroupInit = cast<DefInit>((*DI)->getValueInit("Group")); const Record *NextDiagGroup = GroupInit->getDef(); if (NextDiagGroup == (*I)->ExplicitDef) continue; SMRange InGroupRange = findSuperClassRange(*DI, "InGroup"); SmallString<64> Replacement; if (InGroupRange.isValid()) { Replacement += "InGroup<"; Replacement += (*I)->ExplicitDef->getName(); Replacement += ">"; } SMFixIt FixIt(InGroupRange, Replacement.str()); SrcMgr.PrintMessage(NextDiagGroup->getLoc().front(), SourceMgr::DK_Error, Twine("group '") + Name + "' is referred to anonymously", None, InGroupRange.isValid() ? FixIt : ArrayRef<SMFixIt>()); SrcMgr.PrintMessage((*I)->ExplicitDef->getLoc().front(), SourceMgr::DK_Note, "group defined here"); } } else { // If there's no existing named group, we should just warn once and use // notes to list all the other cases. ArrayRef<const Record *>::const_iterator DI = GroupDiags.begin(), DE = GroupDiags.end(); assert(DI != DE && "We only care about groups with multiple uses!"); const DefInit *GroupInit = cast<DefInit>((*DI)->getValueInit("Group")); const Record *NextDiagGroup = GroupInit->getDef(); std::string Name = NextDiagGroup->getValueAsString("GroupName"); SMRange InGroupRange = findSuperClassRange(*DI, "InGroup"); SrcMgr.PrintMessage(NextDiagGroup->getLoc().front(), SourceMgr::DK_Error, Twine("group '") + Name + "' is referred to anonymously", InGroupRange); for (++DI; DI != DE; ++DI) { GroupInit = cast<DefInit>((*DI)->getValueInit("Group")); InGroupRange = findSuperClassRange(*DI, "InGroup"); SrcMgr.PrintMessage(GroupInit->getDef()->getLoc().front(), SourceMgr::DK_Note, "also referenced here", InGroupRange); } } } }
void DependencyGraphImpl::markTransitive(SmallVectorImpl<const void *> &visited, const void *node, MarkTracerImpl *tracer) { assert(Provides.count(node) && "node is not in the graph"); llvm::SpecificBumpPtrAllocator<MarkTracerImpl::Entry> scratchAlloc; struct WorklistEntry { ArrayRef<MarkTracerImpl::Entry> Reason; const void *Node; bool IsCascading; }; SmallVector<WorklistEntry, 16> worklist; SmallPtrSet<const void *, 16> visitedSet; auto addDependentsToWorklist = [&](const void *next, ArrayRef<MarkTracerImpl::Entry> reason) { auto allProvided = Provides.find(next); if (allProvided == Provides.end()) return; for (const auto &provided : allProvided->second) { auto allDependents = Dependencies.find(provided.name); if (allDependents == Dependencies.end()) continue; if (allDependents->second.second.contains(provided.kindMask)) continue; // Record that we've traversed this dependency. allDependents->second.second |= provided.kindMask; for (const auto &dependent : allDependents->second.first) { if (dependent.node == next) continue; auto intersectingKinds = provided.kindMask & dependent.kindMask; if (!intersectingKinds) continue; if (isMarked(dependent.node)) continue; bool isCascading{dependent.flags & DependencyFlags::IsCascading}; MutableArrayRef<MarkTracerImpl::Entry> newReason; if (tracer) { tracer->countStatsForNodeMarking(intersectingKinds, isCascading); newReason = {scratchAlloc.Allocate(reason.size()+1), reason.size()+1}; std::uninitialized_copy(reason.begin(), reason.end(), newReason.begin()); new (&newReason.back()) MarkTracerImpl::Entry({next, provided.name, intersectingKinds}); } worklist.push_back({ newReason, dependent.node, isCascading }); } } }; auto record = [&](WorklistEntry next) { if (!visitedSet.insert(next.Node).second) return; visited.push_back(next.Node); if (tracer) { auto &savedReason = tracer->Table[next.Node]; savedReason.clear(); savedReason.append(next.Reason.begin(), next.Reason.end()); } }; // Always mark through the starting node, even if it's already marked. markIntransitive(node); addDependentsToWorklist(node, {}); while (!worklist.empty()) { auto next = worklist.pop_back_val(); // Is this a non-cascading dependency? if (!next.IsCascading) { if (!isMarked(next.Node)) record(next); continue; } addDependentsToWorklist(next.Node, next.Reason); if (!markIntransitive(next.Node)) continue; record(next); } }
bool swift::omitNeedlessWords(StringRef &baseName, MutableArrayRef<StringRef> argNames, StringRef firstParamName, OmissionTypeName resultType, OmissionTypeName contextType, ArrayRef<OmissionTypeName> paramTypes, bool returnsSelf, bool isProperty, const InheritedNameSet *allPropertyNames, StringScratchSpace &scratch) { bool anyChanges = false; /// Local function that lowercases all of the base names and /// argument names before returning. auto lowercaseAcronymsForReturn = [&] { StringRef newBaseName = toLowercaseInitialisms(baseName, scratch); if (baseName.data() != newBaseName.data()) { baseName = newBaseName; anyChanges = true; } for (StringRef &argName : argNames) { StringRef newArgName = toLowercaseInitialisms(argName, scratch); if (argName.data() != newArgName.data()) { argName = newArgName; anyChanges = true; } } return anyChanges; }; // If the result type matches the context, remove the context type from the // prefix of the name. bool resultTypeMatchesContext = returnsSelf || (resultType == contextType); if (resultTypeMatchesContext) { StringRef newBaseName = omitNeedlessWordsFromPrefix(baseName, contextType, scratch); if (newBaseName != baseName) { baseName = newBaseName; anyChanges = true; } } // Strip the context type from the base name of a method. if (!isProperty) { StringRef newBaseName = ::omitNeedlessWords(baseName, contextType, NameRole::BaseNameSelf, allPropertyNames, scratch); if (newBaseName != baseName) { baseName = newBaseName; anyChanges = true; } } if (paramTypes.empty()) { if (resultTypeMatchesContext) { StringRef newBaseName = ::omitNeedlessWords( baseName, returnsSelf ? contextType : resultType, NameRole::Property, allPropertyNames, scratch); if (newBaseName != baseName) { baseName = newBaseName; anyChanges = true; } } return lowercaseAcronymsForReturn(); } // If needed, split the base name. if (!argNames.empty() && splitBaseName(baseName, argNames[0], paramTypes[0], firstParamName)) anyChanges = true; // Omit needless words based on parameter types. for (unsigned i = 0, n = argNames.size(); i != n; ++i) { // If there is no corresponding parameter, there is nothing to // omit. if (i >= paramTypes.size()) continue; // Omit needless words based on the type of the parameter. NameRole role = i > 0 ? NameRole::SubsequentParameter : argNames[0].empty() ? NameRole::BaseName : baseName == "init" ? NameRole::SubsequentParameter : NameRole::FirstParameter; // Omit needless words from the name. StringRef name = role == NameRole::BaseName ? baseName : argNames[i]; StringRef newName = ::omitNeedlessWords(name, paramTypes[i], role, role == NameRole::BaseName ? allPropertyNames : nullptr, scratch); if (name == newName) continue; // Record this change. anyChanges = true; if (role == NameRole::BaseName) { baseName = newName; } else { argNames[i] = newName; } } return lowercaseAcronymsForReturn(); }