/// Look through operations that will be free to find the earliest source of /// this value. /// /// @param ValLoc If V has aggegate type, we will be interested in a particular /// scalar component. This records its address; the reverse of this list gives a /// sequence of indices appropriate for an extractvalue to locate the important /// value. This value is updated during the function and on exit will indicate /// similar information for the Value returned. /// /// @param DataBits If this function looks through truncate instructions, this /// will record the smallest size attained. static const Value *getNoopInput(const Value *V, SmallVectorImpl<unsigned> &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI) { while (true) { // Try to look through V1; if V1 is not an instruction, it can't be looked // through. const Instruction *I = dyn_cast<Instruction>(V); if (!I || I->getNumOperands() == 0) return V; const Value *NoopInput = nullptr; Value *Op = I->getOperand(0); if (isa<BitCastInst>(I)) { // Look through truly no-op bitcasts. if (isNoopBitcast(Op->getType(), I->getType(), TLI)) NoopInput = Op; } else if (isa<GetElementPtrInst>(I)) { // Look through getelementptr if (cast<GetElementPtrInst>(I)->hasAllZeroIndices()) NoopInput = Op; } else if (isa<IntToPtrInst>(I)) { // Look through inttoptr. // Make sure this isn't a truncating or extending cast. We could // support this eventually, but don't bother for now. if (!isa<VectorType>(I->getType()) && TLI.getPointerTy().getSizeInBits() == cast<IntegerType>(Op->getType())->getBitWidth()) NoopInput = Op; } else if (isa<PtrToIntInst>(I)) { // Look through ptrtoint. // Make sure this isn't a truncating or extending cast. We could // support this eventually, but don't bother for now. if (!isa<VectorType>(I->getType()) && TLI.getPointerTy().getSizeInBits() == cast<IntegerType>(I->getType())->getBitWidth()) NoopInput = Op; } else if (isa<TruncInst>(I) && TLI.allowTruncateForTailCall(Op->getType(), I->getType())) { DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits()); NoopInput = Op; } else if (isa<CallInst>(I)) { // Look through call (skipping callee) for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1; i != e; ++i) { unsigned attrInd = i - I->op_begin() + 1; if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) && isNoopBitcast((*i)->getType(), I->getType(), TLI)) { NoopInput = *i; break; } } } else if (isa<InvokeInst>(I)) { // Look through invoke (skipping BB, BB, Callee) for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3; i != e; ++i) { unsigned attrInd = i - I->op_begin() + 1; if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) && isNoopBitcast((*i)->getType(), I->getType(), TLI)) { NoopInput = *i; break; } } } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) { // Value may come from either the aggregate or the scalar ArrayRef<unsigned> InsertLoc = IVI->getIndices(); if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(), ValLoc.rbegin())) { // The type being inserted is a nested sub-type of the aggregate; we // have to remove those initial indices to get the location we're // interested in for the operand. ValLoc.resize(ValLoc.size() - InsertLoc.size()); NoopInput = IVI->getInsertedValueOperand(); } else { // The struct we're inserting into has the value we're interested in, no // change of address. NoopInput = Op; } } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) { // The part we're interested in will inevitably be some sub-section of the // previous aggregate. Combine the two paths to obtain the true address of // our element. ArrayRef<unsigned> ExtractLoc = EVI->getIndices(); std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(), std::back_inserter(ValLoc)); NoopInput = Op; } // Terminate if we couldn't find anything to look through. if (!NoopInput) return V; V = NoopInput; } }
/// \brief Convenient wrapper for checking membership in RegisterOperands. /// (std::count() doesn't have an early exit). static bool containsReg(ArrayRef<unsigned> RegUnits, unsigned RegUnit) { return std::find(RegUnits.begin(), RegUnits.end(), RegUnit) != RegUnits.end(); }
/// Simply decrease the current pressure as impacted by these registers. void RegPressureTracker::decreaseRegPressure(ArrayRef<unsigned> RegUnits) { for (unsigned I = 0, E = RegUnits.size(); I != E; ++I) decreaseSetPressure(CurrSetPressure, MRI->getPressureSets(RegUnits[I])); }
/// emitModuleFlags - Perform code emission for module flags. void TargetLoweringObjectFileMachO:: emitModuleFlags(MCStreamer &Streamer, ArrayRef<Module::ModuleFlagEntry> ModuleFlags, Mangler &Mang, const TargetMachine &TM) const { unsigned VersionVal = 0; unsigned ImageInfoFlags = 0; MDNode *LinkerOptions = nullptr; StringRef SectionVal; for (ArrayRef<Module::ModuleFlagEntry>::iterator i = ModuleFlags.begin(), e = ModuleFlags.end(); i != e; ++i) { const Module::ModuleFlagEntry &MFE = *i; // Ignore flags with 'Require' behavior. if (MFE.Behavior == Module::Require) continue; StringRef Key = MFE.Key->getString(); Metadata *Val = MFE.Val; if (Key == "Objective-C Image Info Version") { VersionVal = mdconst::extract<ConstantInt>(Val)->getZExtValue(); } else if (Key == "Objective-C Garbage Collection" || Key == "Objective-C GC Only" || Key == "Objective-C Is Simulated" || Key == "Objective-C Image Swift Version") { ImageInfoFlags |= mdconst::extract<ConstantInt>(Val)->getZExtValue(); } else if (Key == "Objective-C Image Info Section") { SectionVal = cast<MDString>(Val)->getString(); } else if (Key == "Linker Options") { LinkerOptions = cast<MDNode>(Val); } } // Emit the linker options if present. if (LinkerOptions) { for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) { MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i)); SmallVector<std::string, 4> StrOptions; // Convert to strings. for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) { MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii)); StrOptions.push_back(MDOption->getString()); } Streamer.EmitLinkerOptions(StrOptions); } } // The section is mandatory. If we don't have it, then we don't have GC info. if (SectionVal.empty()) return; StringRef Segment, Section; unsigned TAA = 0, StubSize = 0; bool TAAParsed; std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(SectionVal, Segment, Section, TAA, TAAParsed, StubSize); if (!ErrorCode.empty()) // If invalid, report the error with report_fatal_error. report_fatal_error("Invalid section specifier '" + Section + "': " + ErrorCode + "."); // Get the section. const MCSectionMachO *S = getContext().getMachOSection(Segment, Section, TAA, StubSize, SectionKind::getDataNoRel()); Streamer.SwitchSection(S); Streamer.EmitLabel(getContext(). GetOrCreateSymbol(StringRef("L_OBJC_IMAGE_INFO"))); Streamer.EmitIntValue(VersionVal, 4); Streamer.EmitIntValue(ImageInfoFlags, 4); Streamer.AddBlankLine(); }
void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{ switch (getKind()) { case APValue::Uninitialized: Out << "<uninitialized>"; return; case APValue::Int: if (Ty->isBooleanType()) Out << (getInt().getBoolValue() ? "true" : "false"); else Out << getInt(); return; case APValue::Float: Out << GetApproxValue(getFloat()); return; case APValue::Vector: { Out << '{'; QualType ElemTy = Ty->getAs<VectorType>()->getElementType(); getVectorElt(0).printPretty(Out, Ctx, ElemTy); for (unsigned i = 1; i != getVectorLength(); ++i) { Out << ", "; getVectorElt(i).printPretty(Out, Ctx, ElemTy); } Out << '}'; return; } case APValue::ComplexInt: Out << getComplexIntReal() << "+" << getComplexIntImag() << "i"; return; case APValue::ComplexFloat: Out << GetApproxValue(getComplexFloatReal()) << "+" << GetApproxValue(getComplexFloatImag()) << "i"; return; case APValue::LValue: { LValueBase Base = getLValueBase(); if (!Base) { Out << "0"; return; } bool IsReference = Ty->isReferenceType(); QualType InnerTy = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType(); if (!hasLValuePath()) { // No lvalue path: just print the offset. CharUnits O = getLValueOffset(); CharUnits S = Ctx.getTypeSizeInChars(InnerTy); if (!O.isZero()) { if (IsReference) Out << "*("; if (O % S) { Out << "(char*)"; S = CharUnits::One(); } Out << '&'; } else if (!IsReference) Out << '&'; if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) Out << *VD; else Base.get<const Expr*>()->printPretty(Out, 0, Ctx.getPrintingPolicy()); if (!O.isZero()) { Out << " + " << (O / S); if (IsReference) Out << ')'; } return; } // We have an lvalue path. Print it out nicely. if (!IsReference) Out << '&'; else if (isLValueOnePastTheEnd()) Out << "*(&"; QualType ElemTy; if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) { Out << *VD; ElemTy = VD->getType(); } else { const Expr *E = Base.get<const Expr*>(); E->printPretty(Out, 0, Ctx.getPrintingPolicy()); ElemTy = E->getType(); } ArrayRef<LValuePathEntry> Path = getLValuePath(); const CXXRecordDecl *CastToBase = 0; for (unsigned I = 0, N = Path.size(); I != N; ++I) { if (ElemTy->getAs<RecordType>()) { // The lvalue refers to a class type, so the next path entry is a base // or member. const Decl *BaseOrMember = BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer(); if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) { CastToBase = RD; ElemTy = Ctx.getRecordType(RD); } else { const ValueDecl *VD = cast<ValueDecl>(BaseOrMember); Out << "."; if (CastToBase) Out << *CastToBase << "::"; Out << *VD; ElemTy = VD->getType(); } } else { // The lvalue must refer to an array. Out << '[' << Path[I].ArrayIndex << ']'; ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType(); } } // Handle formatting of one-past-the-end lvalues. if (isLValueOnePastTheEnd()) { // FIXME: If CastToBase is non-0, we should prefix the output with // "(CastToBase*)". Out << " + 1"; if (IsReference) Out << ')'; } return; } case APValue::Array: { const ArrayType *AT = Ctx.getAsArrayType(Ty); QualType ElemTy = AT->getElementType(); Out << '{'; if (unsigned N = getArrayInitializedElts()) { getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy); for (unsigned I = 1; I != N; ++I) { Out << ", "; if (I == 10) { // Avoid printing out the entire contents of large arrays. Out << "..."; break; } getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy); } } Out << '}'; return; } case APValue::Struct: { Out << '{'; const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl(); bool First = true; if (unsigned N = getStructNumBases()) { const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD); CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin(); for (unsigned I = 0; I != N; ++I, ++BI) { assert(BI != CD->bases_end()); if (!First) Out << ", "; getStructBase(I).printPretty(Out, Ctx, BI->getType()); First = false; } } for (RecordDecl::field_iterator FI = RD->field_begin(); FI != RD->field_end(); ++FI) { if (!First) Out << ", "; if (FI->isUnnamedBitfield()) continue; getStructField(FI->getFieldIndex()). printPretty(Out, Ctx, FI->getType()); First = false; } Out << '}'; return; } case APValue::Union: Out << '{'; if (const FieldDecl *FD = getUnionField()) { Out << "." << *FD << " = "; getUnionValue().printPretty(Out, Ctx, FD->getType()); } Out << '}'; return; case APValue::MemberPointer: // FIXME: This is not enough to unambiguously identify the member in a // multiple-inheritance scenario. if (const ValueDecl *VD = getMemberPointerDecl()) { Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD; return; } Out << "0"; return; case APValue::AddrLabelDiff: Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName(); Out << " - "; Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName(); return; } llvm_unreachable("Unknown APValue kind!"); }
/// Performs the compile requested by the user. /// \param Instance Will be reset after performIRGeneration when the verifier /// mode is NoVerify and there were no errors. /// \returns true on error static bool performCompile(std::unique_ptr<CompilerInstance> &Instance, CompilerInvocation &Invocation, ArrayRef<const char *> Args, int &ReturnValue, FrontendObserver *observer) { FrontendOptions opts = Invocation.getFrontendOptions(); FrontendOptions::ActionType Action = opts.RequestedAction; // We've been asked to precompile a bridging header; we want to // avoid touching any other inputs and just parse, emit and exit. if (Action == FrontendOptions::EmitPCH) { auto clangImporter = static_cast<ClangImporter *>( Instance->getASTContext().getClangModuleLoader()); return clangImporter->emitBridgingPCH( Invocation.getInputFilenames()[0], opts.getSingleOutputFilename()); } IRGenOptions &IRGenOpts = Invocation.getIRGenOptions(); bool inputIsLLVMIr = Invocation.getInputKind() == InputFileKind::IFK_LLVM_IR; if (inputIsLLVMIr) { auto &LLVMContext = getGlobalLLVMContext(); // Load in bitcode file. assert(Invocation.getInputFilenames().size() == 1 && "We expect a single input for bitcode input!"); llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileBufOrErr = llvm::MemoryBuffer::getFileOrSTDIN(Invocation.getInputFilenames()[0]); if (!FileBufOrErr) { Instance->getASTContext().Diags.diagnose(SourceLoc(), diag::error_open_input_file, Invocation.getInputFilenames()[0], FileBufOrErr.getError().message()); return true; } llvm::MemoryBuffer *MainFile = FileBufOrErr.get().get(); llvm::SMDiagnostic Err; std::unique_ptr<llvm::Module> Module = llvm::parseIR( MainFile->getMemBufferRef(), Err, LLVMContext); if (!Module) { // TODO: Translate from the diagnostic info to the SourceManager location // if available. Instance->getASTContext().Diags.diagnose(SourceLoc(), diag::error_parse_input_file, Invocation.getInputFilenames()[0], Err.getMessage()); return true; } // TODO: remove once the frontend understands what action it should perform IRGenOpts.OutputKind = getOutputKind(Action); return performLLVM(IRGenOpts, Instance->getASTContext(), Module.get()); } ReferencedNameTracker nameTracker; bool shouldTrackReferences = !opts.ReferenceDependenciesFilePath.empty(); if (shouldTrackReferences) Instance->setReferencedNameTracker(&nameTracker); if (Action == FrontendOptions::Parse || Action == FrontendOptions::DumpParse || Action == FrontendOptions::DumpInterfaceHash) Instance->performParseOnly(); else Instance->performSema(); if (Action == FrontendOptions::Parse) return Instance->getASTContext().hadError(); if (observer) { observer->performedSemanticAnalysis(*Instance); } FrontendOptions::DebugCrashMode CrashMode = opts.CrashMode; if (CrashMode == FrontendOptions::DebugCrashMode::AssertAfterParse) debugFailWithAssertion(); else if (CrashMode == FrontendOptions::DebugCrashMode::CrashAfterParse) debugFailWithCrash(); ASTContext &Context = Instance->getASTContext(); if (Action == FrontendOptions::REPL) { runREPL(*Instance, ProcessCmdLine(Args.begin(), Args.end()), Invocation.getParseStdlib()); return Context.hadError(); } SourceFile *PrimarySourceFile = Instance->getPrimarySourceFile(); // We've been told to dump the AST (either after parsing or type-checking, // which is already differentiated in CompilerInstance::performSema()), // so dump or print the main source file and return. if (Action == FrontendOptions::DumpParse || Action == FrontendOptions::DumpAST || Action == FrontendOptions::PrintAST || Action == FrontendOptions::DumpScopeMaps || Action == FrontendOptions::DumpTypeRefinementContexts || Action == FrontendOptions::DumpInterfaceHash) { SourceFile *SF = PrimarySourceFile; if (!SF) { SourceFileKind Kind = Invocation.getSourceFileKind(); SF = &Instance->getMainModule()->getMainSourceFile(Kind); } if (Action == FrontendOptions::PrintAST) SF->print(llvm::outs(), PrintOptions::printEverything()); else if (Action == FrontendOptions::DumpScopeMaps) { ASTScope &scope = SF->getScope(); if (opts.DumpScopeMapLocations.empty()) { scope.expandAll(); } else if (auto bufferID = SF->getBufferID()) { SourceManager &sourceMgr = Instance->getSourceMgr(); // Probe each of the locations, and dump what we find. for (auto lineColumn : opts.DumpScopeMapLocations) { SourceLoc loc = sourceMgr.getLocForLineCol(*bufferID, lineColumn.first, lineColumn.second); if (loc.isInvalid()) continue; llvm::errs() << "***Scope at " << lineColumn.first << ":" << lineColumn.second << "***\n"; auto locScope = scope.findInnermostEnclosingScope(loc); locScope->print(llvm::errs(), 0, false, false); // Dump the AST context, too. if (auto dc = locScope->getDeclContext()) { dc->printContext(llvm::errs()); } // Grab the local bindings introduced by this scope. auto localBindings = locScope->getLocalBindings(); if (!localBindings.empty()) { llvm::errs() << "Local bindings: "; interleave(localBindings.begin(), localBindings.end(), [&](ValueDecl *value) { llvm::errs() << value->getFullName(); }, [&]() { llvm::errs() << " "; }); llvm::errs() << "\n"; } } llvm::errs() << "***Complete scope map***\n"; } // Print the resulting map. scope.print(llvm::errs()); } else if (Action == FrontendOptions::DumpTypeRefinementContexts) SF->getTypeRefinementContext()->dump(llvm::errs(), Context.SourceMgr); else if (Action == FrontendOptions::DumpInterfaceHash) SF->dumpInterfaceHash(llvm::errs()); else SF->dump(); return Context.hadError(); } // If we were asked to print Clang stats, do so. if (opts.PrintClangStats && Context.getClangModuleLoader()) Context.getClangModuleLoader()->printStatistics(); if (!opts.DependenciesFilePath.empty()) (void)emitMakeDependencies(Context.Diags, *Instance->getDependencyTracker(), opts); if (shouldTrackReferences) emitReferenceDependencies(Context.Diags, Instance->getPrimarySourceFile(), *Instance->getDependencyTracker(), opts); if (Context.hadError()) return true; // FIXME: This is still a lousy approximation of whether the module file will // be externally consumed. bool moduleIsPublic = !Instance->getMainModule()->hasEntryPoint() && opts.ImplicitObjCHeaderPath.empty() && !Context.LangOpts.EnableAppExtensionRestrictions; // We've just been told to perform a typecheck, so we can return now. if (Action == FrontendOptions::Typecheck) { if (!opts.ObjCHeaderOutputPath.empty()) return printAsObjC(opts.ObjCHeaderOutputPath, Instance->getMainModule(), opts.ImplicitObjCHeaderPath, moduleIsPublic); return Context.hadError(); } assert(Action >= FrontendOptions::EmitSILGen && "All actions not requiring SILGen must have been handled!"); std::unique_ptr<SILModule> SM = Instance->takeSILModule(); if (!SM) { if (opts.PrimaryInput.hasValue() && opts.PrimaryInput.getValue().isFilename()) { FileUnit *PrimaryFile = PrimarySourceFile; if (!PrimaryFile) { auto Index = opts.PrimaryInput.getValue().Index; PrimaryFile = Instance->getMainModule()->getFiles()[Index]; } SM = performSILGeneration(*PrimaryFile, Invocation.getSILOptions(), None, opts.SILSerializeAll); } else { SM = performSILGeneration(Instance->getMainModule(), Invocation.getSILOptions(), opts.SILSerializeAll, true); } } if (observer) { observer->performedSILGeneration(*SM); } // We've been told to emit SIL after SILGen, so write it now. if (Action == FrontendOptions::EmitSILGen) { // If we are asked to link all, link all. if (Invocation.getSILOptions().LinkMode == SILOptions::LinkAll) performSILLinking(SM.get(), true); return writeSIL(*SM, Instance->getMainModule(), opts.EmitVerboseSIL, opts.getSingleOutputFilename(), opts.EmitSortedSIL); } if (Action == FrontendOptions::EmitSIBGen) { // If we are asked to link all, link all. if (Invocation.getSILOptions().LinkMode == SILOptions::LinkAll) performSILLinking(SM.get(), true); auto DC = PrimarySourceFile ? ModuleOrSourceFile(PrimarySourceFile) : Instance->getMainModule(); if (!opts.ModuleOutputPath.empty()) { SerializationOptions serializationOpts; serializationOpts.OutputPath = opts.ModuleOutputPath.c_str(); serializationOpts.SerializeAllSIL = true; serializationOpts.IsSIB = true; serialize(DC, serializationOpts, SM.get()); } return Context.hadError(); } // Perform "stable" optimizations that are invariant across compiler versions. if (!Invocation.getDiagnosticOptions().SkipDiagnosticPasses) { if (runSILDiagnosticPasses(*SM)) return true; if (observer) { observer->performedSILDiagnostics(*SM); } } else { // Even if we are not supposed to run the diagnostic passes, we still need // to run the ownership evaluator. if (runSILOwnershipEliminatorPass(*SM)) return true; } // Now if we are asked to link all, link all. if (Invocation.getSILOptions().LinkMode == SILOptions::LinkAll) performSILLinking(SM.get(), true); { SharedTimer timer("SIL verification (pre-optimization)"); SM->verify(); } // Perform SIL optimization passes if optimizations haven't been disabled. // These may change across compiler versions. { SharedTimer timer("SIL optimization"); if (Invocation.getSILOptions().Optimization > SILOptions::SILOptMode::None) { StringRef CustomPipelinePath = Invocation.getSILOptions().ExternalPassPipelineFilename; if (!CustomPipelinePath.empty()) { runSILOptimizationPassesWithFileSpecification(*SM, CustomPipelinePath); } else { runSILOptimizationPasses(*SM); } } else { runSILPassesForOnone(*SM); } } if (observer) { observer->performedSILOptimization(*SM); } { SharedTimer timer("SIL verification (post-optimization)"); SM->verify(); } // Gather instruction counts if we are asked to do so. if (SM->getOptions().PrintInstCounts) { performSILInstCount(&*SM); } // Get the main source file's private discriminator and attach it to // the compile unit's flags. if (PrimarySourceFile) { Identifier PD = PrimarySourceFile->getPrivateDiscriminator(); if (!PD.empty()) IRGenOpts.DWARFDebugFlags += (" -private-discriminator "+PD.str()).str(); } if (!opts.ObjCHeaderOutputPath.empty()) { (void)printAsObjC(opts.ObjCHeaderOutputPath, Instance->getMainModule(), opts.ImplicitObjCHeaderPath, moduleIsPublic); } if (Action == FrontendOptions::EmitSIB) { auto DC = PrimarySourceFile ? ModuleOrSourceFile(PrimarySourceFile) : Instance->getMainModule(); if (!opts.ModuleOutputPath.empty()) { SerializationOptions serializationOpts; serializationOpts.OutputPath = opts.ModuleOutputPath.c_str(); serializationOpts.SerializeAllSIL = true; serializationOpts.IsSIB = true; serialize(DC, serializationOpts, SM.get()); } return Context.hadError(); } if (!opts.ModuleOutputPath.empty() || !opts.ModuleDocOutputPath.empty()) { auto DC = PrimarySourceFile ? ModuleOrSourceFile(PrimarySourceFile) : Instance->getMainModule(); if (!opts.ModuleOutputPath.empty()) { SerializationOptions serializationOpts; serializationOpts.OutputPath = opts.ModuleOutputPath.c_str(); serializationOpts.DocOutputPath = opts.ModuleDocOutputPath.c_str(); serializationOpts.GroupInfoPath = opts.GroupInfoPath.c_str(); serializationOpts.SerializeAllSIL = opts.SILSerializeAll; if (opts.SerializeBridgingHeader) serializationOpts.ImportedHeader = opts.ImplicitObjCHeaderPath; serializationOpts.ModuleLinkName = opts.ModuleLinkName; serializationOpts.ExtraClangOptions = Invocation.getClangImporterOptions().ExtraArgs; serializationOpts.EnableNestedTypeLookupTable = opts.EnableSerializationNestedTypeLookupTable; if (!IRGenOpts.ForceLoadSymbolName.empty()) serializationOpts.AutolinkForceLoad = true; // Options contain information about the developer's computer, // so only serialize them if the module isn't going to be shipped to // the public. serializationOpts.SerializeOptionsForDebugging = !moduleIsPublic || opts.AlwaysSerializeDebuggingOptions; serialize(DC, serializationOpts, SM.get()); } if (Action == FrontendOptions::EmitModuleOnly) return Context.hadError(); } assert(Action >= FrontendOptions::EmitSIL && "All actions not requiring SILPasses must have been handled!"); // We've been told to write canonical SIL, so write it now. if (Action == FrontendOptions::EmitSIL) { return writeSIL(*SM, Instance->getMainModule(), opts.EmitVerboseSIL, opts.getSingleOutputFilename(), opts.EmitSortedSIL); } assert(Action >= FrontendOptions::Immediate && "All actions not requiring IRGen must have been handled!"); assert(Action != FrontendOptions::REPL && "REPL mode must be handled immediately after Instance->performSema()"); // Check if we had any errors; if we did, don't proceed to IRGen. if (Context.hadError()) return true; // Cleanup instructions/builtin calls not suitable for IRGen. performSILCleanup(SM.get()); // TODO: remove once the frontend understands what action it should perform IRGenOpts.OutputKind = getOutputKind(Action); if (Action == FrontendOptions::Immediate) { assert(!PrimarySourceFile && "-i doesn't work in -primary-file mode"); IRGenOpts.UseJIT = true; IRGenOpts.DebugInfoKind = IRGenDebugInfoKind::Normal; const ProcessCmdLine &CmdLine = ProcessCmdLine(opts.ImmediateArgv.begin(), opts.ImmediateArgv.end()); Instance->setSILModule(std::move(SM)); if (observer) { observer->aboutToRunImmediately(*Instance); } ReturnValue = RunImmediately(*Instance, CmdLine, IRGenOpts, Invocation.getSILOptions()); return Context.hadError(); } // FIXME: We shouldn't need to use the global context here, but // something is persisting across calls to performIRGeneration. auto &LLVMContext = getGlobalLLVMContext(); std::unique_ptr<llvm::Module> IRModule; llvm::GlobalVariable *HashGlobal; if (PrimarySourceFile) { IRModule = performIRGeneration(IRGenOpts, *PrimarySourceFile, std::move(SM), opts.getSingleOutputFilename(), LLVMContext, 0, &HashGlobal); } else { IRModule = performIRGeneration(IRGenOpts, Instance->getMainModule(), std::move(SM), opts.getSingleOutputFilename(), LLVMContext, &HashGlobal); } // Just because we had an AST error it doesn't mean we can't performLLVM. bool HadError = Instance->getASTContext().hadError(); // If the AST Context has no errors but no IRModule is available, // parallelIRGen happened correctly, since parallel IRGen produces multiple // modules. if (!IRModule) { return HadError; } std::unique_ptr<llvm::TargetMachine> TargetMachine = createTargetMachine(IRGenOpts, Context); version::Version EffectiveLanguageVersion = Context.LangOpts.EffectiveLanguageVersion; DiagnosticEngine &Diags = Context.Diags; const DiagnosticOptions &DiagOpts = Invocation.getDiagnosticOptions(); // Delete the compiler instance now that we have an IRModule. if (DiagOpts.VerifyMode == DiagnosticOptions::NoVerify) { SM.reset(); Instance.reset(); } // Now that we have a single IR Module, hand it over to performLLVM. return performLLVM(IRGenOpts, &Diags, nullptr, HashGlobal, IRModule.get(), TargetMachine.get(), EffectiveLanguageVersion, opts.getSingleOutputFilename()) || HadError; }
void MaterializeForSetEmitter::emit(SILGenFunction &gen, ManagedValue self, SILValue resultBuffer, SILValue callbackBuffer, ArrayRef<ManagedValue> indices) { SILLocation loc = Witness; loc.markAutoGenerated(); // If there's an abstraction difference, we always need to use the // get/set pattern. AccessStrategy strategy; if (WitnessStorage->getType()->is<ReferenceStorageType>() || (Conformance && RequirementStorageType != WitnessStorageType)) { strategy = AccessStrategy::DispatchToAccessor; } else { strategy = WitnessStorage->getAccessStrategy(TheAccessSemantics, AccessKind::ReadWrite); } // Handle the indices. RValue indicesRV; if (isa<SubscriptDecl>(WitnessStorage)) { indicesRV = collectIndicesFromParameters(gen, loc, indices); } else { assert(indices.empty() && "indices for a non-subscript?"); } // As above, assume that we don't need to reabstract 'self'. // Choose the right implementation. SILValue address; SILFunction *callbackFn = nullptr; switch (strategy) { case AccessStrategy::Storage: address = emitUsingStorage(gen, loc, self, std::move(indicesRV)); break; case AccessStrategy::Addressor: address = emitUsingAddressor(gen, loc, self, std::move(indicesRV), callbackBuffer, callbackFn); break; case AccessStrategy::DirectToAccessor: case AccessStrategy::DispatchToAccessor: address = emitUsingGetterSetter(gen, loc, self, std::move(indicesRV), resultBuffer, callbackBuffer, callbackFn); break; } // Return the address as a Builtin.RawPointer. SILType rawPointerTy = SILType::getRawPointerType(gen.getASTContext()); address = gen.B.createAddressToPointer(loc, address, rawPointerTy); SILType resultTupleTy = gen.F.mapTypeIntoContext( gen.F.getLoweredFunctionType()->getSILResult()); SILType optCallbackTy = resultTupleTy.getTupleElementType(1); // Form the callback. SILValue callback; if (callbackFn) { // Make a reference to the function. callback = gen.B.createFunctionRef(loc, callbackFn); // If it's polymorphic, cast to RawPointer and then back to the // right monomorphic type. The safety of this cast relies on some // assumptions about what exactly IRGen can reconstruct from the // callback's thick type argument. if (callbackFn->getLoweredFunctionType()->isPolymorphic()) { callback = gen.B.createThinFunctionToPointer(loc, callback, rawPointerTy); OptionalTypeKind optKind; auto callbackTy = optCallbackTy.getAnyOptionalObjectType(SGM.M, optKind); callback = gen.B.createPointerToThinFunction(loc, callback, callbackTy); } callback = gen.B.createOptionalSome(loc, callback, optCallbackTy); } else { callback = gen.B.createOptionalNone(loc, optCallbackTy); } // Form the result and return. auto result = gen.B.createTuple(loc, resultTupleTy, { address, callback }); gen.Cleanups.emitCleanupsForReturn(CleanupLocation::get(loc)); gen.B.createReturn(loc, result); }
void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) { assert(Clauses.size() == getNumClauses() && "Number of clauses is not the same as the preallocated buffer"); std::copy(Clauses.begin(), Clauses.end(), getClauses().begin()); }
/// This function goes through the arguments of F and sees if we have anything /// to optimize in which case it returns true. If we have nothing to optimize, /// it returns false. bool FunctionAnalyzer::analyze() { // For now ignore functions with indirect results. if (F->getLoweredFunctionType()->hasIndirectResult()) return false; ArrayRef<SILArgument *> Args = F->begin()->getBBArgs(); // A map from consumed SILArguments to the release associated with an // argument. ConsumedArgToEpilogueReleaseMatcher ArgToReturnReleaseMap(RCIA, F); ConsumedArgToEpilogueReleaseMatcher ArgToThrowReleaseMap( RCIA, F, ConsumedArgToEpilogueReleaseMatcher::ExitKind::Throw); for (unsigned i = 0, e = Args.size(); i != e; ++i) { ArgumentDescriptor A(Allocator, Args[i]); bool HaveOptimizedArg = false; bool isABIRequired = isArgumentABIRequired(Args[i]); auto OnlyRelease = getNonTrivialNonDebugReleaseUse(Args[i]); // If this argument is not ABI required and has not uses except for debug // instructions, remove it. if (!isABIRequired && OnlyRelease && OnlyRelease.getValue().isNull()) { A.IsDead = true; HaveOptimizedArg = true; ++NumDeadArgsEliminated; } // See if we can find a ref count equivalent strong_release or release_value // at the end of this function if our argument is an @owned parameter. if (A.hasConvention(ParameterConvention::Direct_Owned)) { if (auto *Release = ArgToReturnReleaseMap.releaseForArgument(A.Arg)) { SILInstruction *ReleaseInThrow = nullptr; // If the function has a throw block we must also find a matching // release in the throw block. if (!ArgToThrowReleaseMap.hasBlock() || (ReleaseInThrow = ArgToThrowReleaseMap.releaseForArgument(A.Arg))) { // TODO: accept a second release in the throw block to let the // argument be dead. if (OnlyRelease && OnlyRelease.getValue().getPtrOrNull() == Release) { A.IsDead = true; } A.CalleeRelease = Release; A.CalleeReleaseInThrowBlock = ReleaseInThrow; HaveOptimizedArg = true; ++NumOwnedConvertedToGuaranteed; } } } if (A.shouldExplode()) { HaveOptimizedArg = true; ++NumSROAArguments; } if (HaveOptimizedArg) { ShouldOptimize = true; // Store that we have modified the self argument. We need to change the // calling convention later. if (Args[i]->isSelf()) HaveModifiedSelfArgument = true; } // Add the argument to our list. ArgDescList.push_back(std::move(A)); } return ShouldOptimize; }
void NonNullParamChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const { const Decl *FD = Call.getDecl(); if (!FD) return; // Merge all non-null attributes unsigned NumArgs = Call.getNumArgs(); llvm::SmallBitVector AttrNonNull(NumArgs); for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { if (!NonNull->args_size()) { AttrNonNull.set(0, NumArgs); break; } for (unsigned Val : NonNull->args()) { if (Val >= NumArgs) continue; AttrNonNull.set(Val); } } ProgramStateRef state = C.getState(); CallEvent::param_type_iterator TyI = Call.param_type_begin(), TyE = Call.param_type_end(); for (unsigned idx = 0; idx < NumArgs; ++idx) { // Check if the parameter is a reference. We want to report when reference // to a null pointer is passed as a paramter. bool haveRefTypeParam = false; if (TyI != TyE) { haveRefTypeParam = (*TyI)->isReferenceType(); TyI++; } bool haveAttrNonNull = AttrNonNull[idx]; if (!haveAttrNonNull) { // Check if the parameter is also marked 'nonnull'. ArrayRef<ParmVarDecl*> parms = Call.parameters(); if (idx < parms.size()) haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>(); } if (!haveRefTypeParam && !haveAttrNonNull) continue; // If the value is unknown or undefined, we can't perform this check. const Expr *ArgE = Call.getArgExpr(idx); SVal V = Call.getArgSVal(idx); Optional<DefinedSVal> DV = V.getAs<DefinedSVal>(); if (!DV) continue; // Process the case when the argument is not a location. assert(!haveRefTypeParam || DV->getAs<Loc>()); if (haveAttrNonNull && !DV->getAs<Loc>()) { // If the argument is a union type, we want to handle a potential // transparent_union GCC extension. if (!ArgE) continue; QualType T = ArgE->getType(); const RecordType *UT = T->getAsUnionType(); if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>()) continue; if (Optional<nonloc::CompoundVal> CSV = DV->getAs<nonloc::CompoundVal>()) { nonloc::CompoundVal::iterator CSV_I = CSV->begin(); assert(CSV_I != CSV->end()); V = *CSV_I; DV = V.getAs<DefinedSVal>(); assert(++CSV_I == CSV->end()); // FIXME: Handle (some_union){ some_other_union_val }, which turns into // a LazyCompoundVal inside a CompoundVal. if (!V.getAs<Loc>()) continue; // Retrieve the corresponding expression. if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE)) if (const InitListExpr *IE = dyn_cast<InitListExpr>(CE->getInitializer())) ArgE = dyn_cast<Expr>(*(IE->begin())); } else { // FIXME: Handle LazyCompoundVals? continue; } } ConstraintManager &CM = C.getConstraintManager(); ProgramStateRef stateNotNull, stateNull; std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV); if (stateNull) { if (!stateNotNull) { // Generate an error node. Check for a null node in case // we cache out. if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) { std::unique_ptr<BugReport> R; if (haveAttrNonNull) R = genReportNullAttrNonNull(errorNode, ArgE); else if (haveRefTypeParam) R = genReportReferenceToNullPointer(errorNode, ArgE); // Highlight the range of the argument that was null. R->addRange(Call.getArgSourceRange(idx)); // Emit the bug report. C.emitReport(std::move(R)); } // Always return. Either we cached out or we just emitted an error. return; } if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) { ImplicitNullDerefEvent event = { V, false, N, &C.getBugReporter(), /*IsDirectDereference=*/haveRefTypeParam}; dispatchEvent(event); } } // If a pointer value passed the check we should assume that it is // indeed not null from this point forward. assert(stateNotNull); state = stateNotNull; } // If we reach here all of the arguments passed the nonnull check. // If 'state' has been updated generated a new node. C.addTransition(state); }
/// Produce note diagnostics for a jump into a protected scope. void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) { assert(!ToScopes.empty()); for (unsigned I = 0, E = ToScopes.size(); I != E; ++I) if (Scopes[ToScopes[I]].InDiag) S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag); }
// Find the minimum offset that we may store a value of size Size bits at. If // IsAfter is set, look for an offset before the object, otherwise look for an // offset after the object. uint64_t wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets, bool IsAfter, uint64_t Size) { // Find a minimum offset taking into account only vtable sizes. uint64_t MinByte = 0; for (const VirtualCallTarget &Target : Targets) { if (IsAfter) MinByte = std::max(MinByte, Target.minAfterBytes()); else MinByte = std::max(MinByte, Target.minBeforeBytes()); } // Build a vector of arrays of bytes covering, for each target, a slice of the // used region (see AccumBitVector::BytesUsed in // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively, // this aligns the used regions to start at MinByte. // // In this example, A, B and C are vtables, # is a byte already allocated for // a virtual function pointer, AAAA... (etc.) are the used regions for the // vtables and Offset(X) is the value computed for the Offset variable below // for X. // // Offset(A) // | | // |MinByte // A: ################AAAAAAAA|AAAAAAAA // B: ########BBBBBBBBBBBBBBBB|BBBB // C: ########################|CCCCCCCCCCCCCCCC // | Offset(B) | // // This code produces the slices of A, B and C that appear after the divider // at MinByte. std::vector<ArrayRef<uint8_t>> Used; for (const VirtualCallTarget &Target : Targets) { ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed : Target.TM->Bits->Before.BytesUsed; uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes() : MinByte - Target.minBeforeBytes(); // Disregard used regions that are smaller than Offset. These are // effectively all-free regions that do not need to be checked. if (VTUsed.size() > Offset) Used.push_back(VTUsed.slice(Offset)); } if (Size == 1) { // Find a free bit in each member of Used. for (unsigned I = 0;; ++I) { uint8_t BitsUsed = 0; for (auto &&B : Used) if (I < B.size()) BitsUsed |= B[I]; if (BitsUsed != 0xff) return (MinByte + I) * 8 + countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined); } } else { // Find a free (Size/8) byte region in each member of Used. // FIXME: see if alignment helps. for (unsigned I = 0;; ++I) { for (auto &&B : Used) { unsigned Byte = 0; while ((I + Byte) < B.size() && Byte < (Size / 8)) { if (B[I + Byte]) goto NextI; ++Byte; } } return (MinByte + I) * 8; NextI:; } } }
std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis, const MachineLoopInfo *loopInfo, const RegSet &vregs) { LiveIntervals *LIS = const_cast<LiveIntervals*>(lis); MachineRegisterInfo *mri = &mf->getRegInfo(); const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo(); std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem()); PBQP::Graph &g = p->getGraph(); RegSet pregs; // Collect the set of preg intervals, record that they're used in the MF. for (unsigned Reg = 1, e = tri->getNumRegs(); Reg != e; ++Reg) { if (mri->def_empty(Reg)) continue; pregs.insert(Reg); mri->setPhysRegUsed(Reg); } BitVector reservedRegs = tri->getReservedRegs(*mf); // Iterate over vregs. for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end(); vregItr != vregEnd; ++vregItr) { unsigned vreg = *vregItr; const TargetRegisterClass *trc = mri->getRegClass(vreg); LiveInterval *vregLI = &LIS->getInterval(vreg); // Record any overlaps with regmask operands. BitVector regMaskOverlaps; LIS->checkRegMaskInterference(*vregLI, regMaskOverlaps); // Compute an initial allowed set for the current vreg. typedef std::vector<unsigned> VRAllowed; VRAllowed vrAllowed; ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf); for (unsigned i = 0; i != rawOrder.size(); ++i) { unsigned preg = rawOrder[i]; if (reservedRegs.test(preg)) continue; // vregLI crosses a regmask operand that clobbers preg. if (!regMaskOverlaps.empty() && !regMaskOverlaps.test(preg)) continue; // vregLI overlaps fixed regunit interference. bool Interference = false; for (MCRegUnitIterator Units(preg, tri); Units.isValid(); ++Units) { if (vregLI->overlaps(LIS->getRegUnit(*Units))) { Interference = true; break; } } if (Interference) continue; // preg is usable for this virtual register. vrAllowed.push_back(preg); } // Construct the node. PBQP::Graph::NodeItr node = g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0)); // Record the mapping and allowed set in the problem. p->recordVReg(vreg, node, vrAllowed.begin(), vrAllowed.end()); PBQP::PBQPNum spillCost = (vregLI->weight != 0.0) ? vregLI->weight : std::numeric_limits<PBQP::PBQPNum>::min(); addSpillCosts(g.getNodeCosts(node), spillCost); } for (RegSet::const_iterator vr1Itr = vregs.begin(), vrEnd = vregs.end(); vr1Itr != vrEnd; ++vr1Itr) { unsigned vr1 = *vr1Itr; const LiveInterval &l1 = lis->getInterval(vr1); const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1); for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr); vr2Itr != vrEnd; ++vr2Itr) { unsigned vr2 = *vr2Itr; const LiveInterval &l2 = lis->getInterval(vr2); const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2); assert(!l2.empty() && "Empty interval in vreg set?"); if (l1.overlaps(l2)) { PBQP::Graph::EdgeItr edge = g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2), PBQP::Matrix(vr1Allowed.size()+1, vr2Allowed.size()+1, 0)); addInterferenceCosts(g.getEdgeCosts(edge), vr1Allowed, vr2Allowed, tri); } } } return p; }
MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction( MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/, raw_ostream &OS, raw_ostream &CS) const { Size = 0; uint64_t Pos = 0; // Read the opcode. if (Pos + sizeof(uint64_t) > Bytes.size()) return MCDisassembler::Fail; uint64_t Opcode = support::endian::read64le(Bytes.data() + Pos); Pos += sizeof(uint64_t); if (Opcode >= WebAssembly::INSTRUCTION_LIST_END) return MCDisassembler::Fail; MI.setOpcode(Opcode); const MCInstrDesc &Desc = MCII->get(Opcode); unsigned NumFixedOperands = Desc.NumOperands; // If it's variadic, read the number of extra operands. unsigned NumExtraOperands = 0; if (Desc.isVariadic()) { if (Pos + sizeof(uint64_t) > Bytes.size()) return MCDisassembler::Fail; NumExtraOperands = support::endian::read64le(Bytes.data() + Pos); Pos += sizeof(uint64_t); } // Read the fixed operands. These are described by the MCInstrDesc. for (unsigned i = 0; i < NumFixedOperands; ++i) { const MCOperandInfo &Info = Desc.OpInfo[i]; switch (Info.OperandType) { case MCOI::OPERAND_IMMEDIATE: case WebAssembly::OPERAND_P2ALIGN: case WebAssembly::OPERAND_BASIC_BLOCK: { if (Pos + sizeof(uint64_t) > Bytes.size()) return MCDisassembler::Fail; uint64_t Imm = support::endian::read64le(Bytes.data() + Pos); Pos += sizeof(uint64_t); MI.addOperand(MCOperand::createImm(Imm)); break; } case MCOI::OPERAND_REGISTER: { if (Pos + sizeof(uint64_t) > Bytes.size()) return MCDisassembler::Fail; uint64_t Reg = support::endian::read64le(Bytes.data() + Pos); Pos += sizeof(uint64_t); MI.addOperand(MCOperand::createReg(Reg)); break; } case WebAssembly::OPERAND_FPIMM: { // TODO: MC converts all floating point immediate operands to double. // This is fine for numeric values, but may cause NaNs to change bits. if (Pos + sizeof(uint64_t) > Bytes.size()) return MCDisassembler::Fail; uint64_t Bits = support::endian::read64le(Bytes.data() + Pos); Pos += sizeof(uint64_t); double Imm; memcpy(&Imm, &Bits, sizeof(Imm)); MI.addOperand(MCOperand::createFPImm(Imm)); break; } default: llvm_unreachable("unimplemented operand kind"); } } // Read the extra operands. assert(NumExtraOperands == 0 || Desc.isVariadic()); for (unsigned i = 0; i < NumExtraOperands; ++i) { if (Pos + sizeof(uint64_t) > Bytes.size()) return MCDisassembler::Fail; if (Desc.TSFlags & WebAssemblyII::VariableOpIsImmediate) { // Decode extra immediate operands. uint64_t Imm = support::endian::read64le(Bytes.data() + Pos); MI.addOperand(MCOperand::createImm(Imm)); } else { // Decode extra register operands. uint64_t Reg = support::endian::read64le(Bytes.data() + Pos); MI.addOperand(MCOperand::createReg(Reg)); } Pos += sizeof(uint64_t); } Size = Pos; return MCDisassembler::Success; }
void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites) { LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end()); }
/// compute - Compute the preferred allocation order for RC with reserved /// registers filtered out. Volatile registers come first followed by CSR /// aliases ordered according to the CSR order specified by the target. void RegisterClassInfo::compute(const TargetRegisterClass *RC) const { RCInfo &RCI = RegClass[RC->getID()]; // Raw register count, including all reserved regs. unsigned NumRegs = RC->getNumRegs(); if (!RCI.Order) RCI.Order.reset(new MCPhysReg[NumRegs]); unsigned N = 0; SmallVector<MCPhysReg, 16> CSRAlias; unsigned MinCost = 0xff; unsigned LastCost = ~0u; unsigned LastCostChange = 0; // FIXME: Once targets reserve registers instead of removing them from the // allocation order, we can simply use begin/end here. ArrayRef<MCPhysReg> RawOrder = RC->getRawAllocationOrder(*MF); for (unsigned i = 0; i != RawOrder.size(); ++i) { unsigned PhysReg = RawOrder[i]; // Remove reserved registers from the allocation order. if (Reserved.test(PhysReg)) continue; unsigned Cost = TRI->getCostPerUse(PhysReg); MinCost = std::min(MinCost, Cost); if (CSRNum[PhysReg]) // PhysReg aliases a CSR, save it for later. CSRAlias.push_back(PhysReg); else { if (Cost != LastCost) LastCostChange = N; RCI.Order[N++] = PhysReg; LastCost = Cost; } } RCI.NumRegs = N + CSRAlias.size(); assert (RCI.NumRegs <= NumRegs && "Allocation order larger than regclass"); // CSR aliases go after the volatile registers, preserve the target's order. for (unsigned i = 0, e = CSRAlias.size(); i != e; ++i) { unsigned PhysReg = CSRAlias[i]; unsigned Cost = TRI->getCostPerUse(PhysReg); if (Cost != LastCost) LastCostChange = N; RCI.Order[N++] = PhysReg; LastCost = Cost; } // Register allocator stress test. Clip register class to N registers. if (StressRA && RCI.NumRegs > StressRA) RCI.NumRegs = StressRA; // Check if RC is a proper sub-class. if (const TargetRegisterClass *Super = TRI->getLargestLegalSuperClass(RC)) if (Super != RC && getNumAllocatableRegs(Super) > RCI.NumRegs) RCI.ProperSubClass = true; RCI.MinCost = uint8_t(MinCost); RCI.LastCostChange = LastCostChange; DEBUG({ dbgs() << "AllocationOrder(" << RC->getName() << ") = ["; for (unsigned I = 0; I != RCI.NumRegs; ++I) dbgs() << ' ' << PrintReg(RCI.Order[I], TRI); dbgs() << (RCI.ProperSubClass ? " ] (sub-class)\n" : " ]\n"); });
void llvm::printCOFFUnwindInfo(const COFFObjectFile *Obj) { const coff_file_header *Header; if (error(Obj->getHeader(Header))) return; if (Header->Machine != COFF::IMAGE_FILE_MACHINE_AMD64) { errs() << "Unsupported image machine type " "(currently only AMD64 is supported).\n"; return; } const coff_section *Pdata = 0; error_code ec; for (section_iterator SI = Obj->begin_sections(), SE = Obj->end_sections(); SI != SE; SI.increment(ec)) { if (error(ec)) return; StringRef Name; if (error(SI->getName(Name))) continue; if (Name != ".pdata") continue; Pdata = Obj->getCOFFSection(SI); std::vector<RelocationRef> Rels; for (relocation_iterator RI = SI->begin_relocations(), RE = SI->end_relocations(); RI != RE; RI.increment(ec)) { if (error(ec)) break; Rels.push_back(*RI); } // Sort relocations by address. std::sort(Rels.begin(), Rels.end(), RelocAddressLess); ArrayRef<uint8_t> Contents; if (error(Obj->getSectionContents(Pdata, Contents))) continue; if (Contents.empty()) continue; ArrayRef<RuntimeFunction> RFs( reinterpret_cast<const RuntimeFunction *>(Contents.data()), Contents.size() / sizeof(RuntimeFunction)); for (const RuntimeFunction *I = RFs.begin(), *E = RFs.end(); I < E; ++I) { const uint64_t SectionOffset = std::distance(RFs.begin(), I) * sizeof(RuntimeFunction); outs() << "Function Table:\n"; outs() << " Start Address: "; printCOFFSymbolAddress(outs(), Rels, SectionOffset + /*offsetof(RuntimeFunction, StartAddress)*/ 0, I->StartAddress); outs() << "\n"; outs() << " End Address: "; printCOFFSymbolAddress(outs(), Rels, SectionOffset + /*offsetof(RuntimeFunction, EndAddress)*/ 4, I->EndAddress); outs() << "\n"; outs() << " Unwind Info Address: "; printCOFFSymbolAddress(outs(), Rels, SectionOffset + /*offsetof(RuntimeFunction, UnwindInfoOffset)*/ 8, I->UnwindInfoOffset); outs() << "\n"; ArrayRef<uint8_t> XContents; uint64_t UnwindInfoOffset = 0; if (error(getSectionContents(Obj, Rels, SectionOffset + /*offsetof(RuntimeFunction, UnwindInfoOffset)*/ 8, XContents, UnwindInfoOffset))) continue; if (XContents.empty()) continue; UnwindInfoOffset += I->UnwindInfoOffset; if (UnwindInfoOffset > XContents.size()) continue; const Win64EH::UnwindInfo *UI = reinterpret_cast<const Win64EH::UnwindInfo *> (XContents.data() + UnwindInfoOffset); // The casts to int are required in order to output the value as number. // Without the casts the value would be interpreted as char data (which // results in garbage output). outs() << " Version: " << static_cast<int>(UI->getVersion()) << "\n"; outs() << " Flags: " << static_cast<int>(UI->getFlags()); if (UI->getFlags()) { if (UI->getFlags() & UNW_ExceptionHandler) outs() << " UNW_ExceptionHandler"; if (UI->getFlags() & UNW_TerminateHandler) outs() << " UNW_TerminateHandler"; if (UI->getFlags() & UNW_ChainInfo) outs() << " UNW_ChainInfo"; } outs() << "\n"; outs() << " Size of prolog: " << static_cast<int>(UI->PrologSize) << "\n"; outs() << " Number of Codes: " << static_cast<int>(UI->NumCodes) << "\n"; // Maybe this should move to output of UOP_SetFPReg? if (UI->getFrameRegister()) { outs() << " Frame register: " << getUnwindRegisterName(UI->getFrameRegister()) << "\n"; outs() << " Frame offset: " << 16 * UI->getFrameOffset() << "\n"; } else { outs() << " No frame pointer used\n"; } if (UI->getFlags() & (UNW_ExceptionHandler | UNW_TerminateHandler)) { // FIXME: Output exception handler data } else if (UI->getFlags() & UNW_ChainInfo) { // FIXME: Output chained unwind info } if (UI->NumCodes) outs() << " Unwind Codes:\n"; printAllUnwindCodes(ArrayRef<UnwindCode>(&UI->UnwindCodes[0], UI->NumCodes)); outs() << "\n\n"; outs().flush(); } } }
ArrayRef<Ref<DataBlock> > DataBlock::getDataBlocks(ArrayRef<unsigned char> rawCodewords, Version *version, ErrorCorrectionLevel &ecLevel) { // Figure out the number and size of data blocks used by this version and // error correction level Version::ECBlocks &ecBlocks = version->getECBlocksForLevel(ecLevel); // First count the total number of data blocks int totalBlocks = 0; vector<Version::ECB*> ecBlockArray = ecBlocks.getECBlocks(); for (size_t i = 0; i < ecBlockArray.size(); i++) { totalBlocks += ecBlockArray[i]->getCount(); } // Now establish DataBlocks of the appropriate size and number of data codewords ArrayRef<Ref<DataBlock> > result(totalBlocks); int numResultBlocks = 0; for (size_t j = 0; j < ecBlockArray.size(); j++) { Version::ECB *ecBlock = ecBlockArray[j]; for (int i = 0; i < ecBlock->getCount(); i++) { int numDataCodewords = ecBlock->getDataCodewords(); int numBlockCodewords = ecBlocks.getECCodewords() + numDataCodewords; ArrayRef<unsigned char> buffer(numBlockCodewords); Ref<DataBlock> blockRef(new DataBlock(numDataCodewords, buffer)); result[numResultBlocks++] = blockRef; } } // All blocks have the same amount of data, except that the last n // (where n may be 0) have 1 more byte. Figure out where these start. int shorterBlocksTotalCodewords = result[0]->codewords_.size(); int longerBlocksStartAt = result->size() - 1; while (longerBlocksStartAt >= 0) { int numCodewords = result[longerBlocksStartAt]->codewords_.size(); if (numCodewords == shorterBlocksTotalCodewords) { break; } if (numCodewords != shorterBlocksTotalCodewords + 1) { throw IllegalArgumentException("Data block sizes differ by more than 1"); } longerBlocksStartAt--; } longerBlocksStartAt++; int shorterBlocksNumDataCodewords = shorterBlocksTotalCodewords - ecBlocks.getECCodewords(); // The last elements of result may be 1 element longer; // first fill out as many elements as all of them have int rawCodewordsOffset = 0; for (int i = 0; i < shorterBlocksNumDataCodewords; i++) { for (int j = 0; j < numResultBlocks; j++) { result[j]->codewords_[i] = rawCodewords[rawCodewordsOffset++]; } } // Fill out the last data block in the longer ones for (int j = longerBlocksStartAt; j < numResultBlocks; j++) { result[j]->codewords_[shorterBlocksNumDataCodewords] = rawCodewords[rawCodewordsOffset++]; } // Now add in error correction blocks int max = result[0]->codewords_.size(); for (int i = shorterBlocksNumDataCodewords; i < max; i++) { for (int j = 0; j < numResultBlocks; j++) { int iOffset = j < longerBlocksStartAt ? i : i + 1; result[j]->codewords_[iOffset] = rawCodewords[rawCodewordsOffset++]; } } if ((size_t) rawCodewordsOffset != rawCodewords.size()) { throw IllegalArgumentException("rawCodewordsOffset != rawCodewords.length"); } return result; }
int swift::performFrontend(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr, FrontendObserver *observer) { llvm::InitializeAllTargets(); llvm::InitializeAllTargetMCs(); llvm::InitializeAllAsmPrinters(); llvm::InitializeAllAsmParsers(); std::unique_ptr<CompilerInstance> Instance = llvm::make_unique<CompilerInstance>(); PrintingDiagnosticConsumer PDC; Instance->addDiagnosticConsumer(&PDC); if (Args.empty()) { Instance->getDiags().diagnose(SourceLoc(), diag::error_no_frontend_args); return 1; } CompilerInvocation Invocation; std::string MainExecutablePath = llvm::sys::fs::getMainExecutable(Argv0, MainAddr); Invocation.setMainExecutablePath(MainExecutablePath); SmallString<128> workingDirectory; llvm::sys::fs::current_path(workingDirectory); // Parse arguments. if (Invocation.parseArgs(Args, Instance->getDiags(), workingDirectory)) { return 1; } // Setting DWARF Version depend on platform IRGenOptions &IRGenOpts = Invocation.getIRGenOptions(); IRGenOpts.DWARFVersion = swift::DWARFVersion; // The compiler invocation is now fully configured; notify our observer. if (observer) { observer->parsedArgs(Invocation); } if (Invocation.getFrontendOptions().PrintHelp || Invocation.getFrontendOptions().PrintHelpHidden) { unsigned IncludedFlagsBitmask = options::FrontendOption; unsigned ExcludedFlagsBitmask = Invocation.getFrontendOptions().PrintHelpHidden ? 0 : llvm::opt::HelpHidden; std::unique_ptr<llvm::opt::OptTable> Options(createSwiftOptTable()); Options->PrintHelp(llvm::outs(), displayName(MainExecutablePath).c_str(), "Swift frontend", IncludedFlagsBitmask, ExcludedFlagsBitmask); return 0; } if (Invocation.getFrontendOptions().RequestedAction == FrontendOptions::NoneAction) { Instance->getDiags().diagnose(SourceLoc(), diag::error_missing_frontend_action); return 1; } // Because the serialized diagnostics consumer is initialized here, // diagnostics emitted above, within CompilerInvocation::parseArgs, are never // serialized. This is a non-issue because, in nearly all cases, frontend // arguments are generated by the driver, not directly by a user. The driver // is responsible for emitting diagnostics for its own errors. See SR-2683 // for details. std::unique_ptr<DiagnosticConsumer> SerializedConsumer; { const std::string &SerializedDiagnosticsPath = Invocation.getFrontendOptions().SerializedDiagnosticsPath; if (!SerializedDiagnosticsPath.empty()) { std::error_code EC; std::unique_ptr<llvm::raw_fd_ostream> OS; OS.reset(new llvm::raw_fd_ostream(SerializedDiagnosticsPath, EC, llvm::sys::fs::F_None)); if (EC) { Instance->getDiags().diagnose(SourceLoc(), diag::cannot_open_serialized_file, SerializedDiagnosticsPath, EC.message()); return 1; } SerializedConsumer.reset( serialized_diagnostics::createConsumer(std::move(OS))); Instance->addDiagnosticConsumer(SerializedConsumer.get()); } } std::unique_ptr<DiagnosticConsumer> FixitsConsumer; { const std::string &FixitsOutputPath = Invocation.getFrontendOptions().FixitsOutputPath; if (!FixitsOutputPath.empty()) { std::error_code EC; std::unique_ptr<llvm::raw_fd_ostream> OS; OS.reset(new llvm::raw_fd_ostream(FixitsOutputPath, EC, llvm::sys::fs::F_None)); if (EC) { Instance->getDiags().diagnose(SourceLoc(), diag::cannot_open_file, FixitsOutputPath, EC.message()); return 1; } FixitsConsumer.reset(new JSONFixitWriter(std::move(OS), Invocation.getDiagnosticOptions())); Instance->addDiagnosticConsumer(FixitsConsumer.get()); } } if (Invocation.getDiagnosticOptions().UseColor) PDC.forceColors(); if (Invocation.getFrontendOptions().DebugTimeCompilation) SharedTimer::enableCompilationTimers(); if (Invocation.getFrontendOptions().PrintStats) { llvm::EnableStatistics(); } const DiagnosticOptions &diagOpts = Invocation.getDiagnosticOptions(); if (diagOpts.VerifyMode != DiagnosticOptions::NoVerify) { enableDiagnosticVerifier(Instance->getSourceMgr()); } DependencyTracker depTracker; if (!Invocation.getFrontendOptions().DependenciesFilePath.empty() || !Invocation.getFrontendOptions().ReferenceDependenciesFilePath.empty()) { Instance->setDependencyTracker(&depTracker); } if (Instance->setup(Invocation)) { return 1; } // The compiler instance has been configured; notify our observer. if (observer) { observer->configuredCompiler(*Instance); } int ReturnValue = 0; bool HadError = performCompile(Instance, Invocation, Args, ReturnValue, observer); if (!HadError) { NewMangling::printManglingStats(); } if (!HadError && !Invocation.getFrontendOptions().DumpAPIPath.empty()) { HadError = dumpAPI(Instance->getMainModule(), Invocation.getFrontendOptions().DumpAPIPath); } if (diagOpts.VerifyMode != DiagnosticOptions::NoVerify) { HadError = verifyDiagnostics( Instance->getSourceMgr(), Instance->getInputBufferIDs(), diagOpts.VerifyMode == DiagnosticOptions::VerifyAndApplyFixes); DiagnosticEngine &diags = Instance->getDiags(); if (diags.hasFatalErrorOccurred() && !Invocation.getDiagnosticOptions().ShowDiagnosticsAfterFatalError) { diags.resetHadAnyError(); diags.diagnose(SourceLoc(), diag::verify_encountered_fatal); HadError = true; } } return (HadError ? 1 : ReturnValue); }
SILSpecializeAttr::SILSpecializeAttr(ArrayRef<Requirement> requirements, bool exported, SpecializationKind kind) : numRequirements(requirements.size()), kind(kind), exported(exported) { std::copy(requirements.begin(), requirements.end(), getRequirementsData()); }
static inline uint32_t eatB32(ArrayRef<uint8_t>& Bytes) { assert(Bytes.size() >= sizeof eatB32(Bytes)); const auto Res = support::endian::read32le(Bytes.data()); Bytes = Bytes.slice(sizeof Res); return Res; }
error_code MachOObjectFile::getSectionName(DataRefImpl Sec, StringRef &Result) const { ArrayRef<char> Raw = getSectionRawName(Sec); Result = parseSegmentOrSectionName(Raw.data()); return object_error::success; }
void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) { OwningPtr<PathDiagnostic> OwningD(D); if (!D || D->path.empty()) return; // We need to flatten the locations (convert Stmt* to locations) because // the referenced statements may be freed by the time the diagnostics // are emitted. D->flattenLocations(); // If the PathDiagnosticConsumer does not support diagnostics that // cross file boundaries, prune out such diagnostics now. if (!supportsCrossFileDiagnostics()) { // Verify that the entire path is from the same FileID. FileID FID; const SourceManager &SMgr = (*D->path.begin())->getLocation().getManager(); SmallVector<const PathPieces *, 5> WorkList; WorkList.push_back(&D->path); while (!WorkList.empty()) { const PathPieces &path = *WorkList.pop_back_val(); for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I) { const PathDiagnosticPiece *piece = I->getPtr(); FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc(); if (FID.isInvalid()) { FID = SMgr.getFileID(L); } else if (SMgr.getFileID(L) != FID) return; // FIXME: Emit a warning? // Check the source ranges. ArrayRef<SourceRange> Ranges = piece->getRanges(); for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end(); I != E; ++I) { SourceLocation L = SMgr.getExpansionLoc(I->getBegin()); if (!L.isFileID() || SMgr.getFileID(L) != FID) return; // FIXME: Emit a warning? L = SMgr.getExpansionLoc(I->getEnd()); if (!L.isFileID() || SMgr.getFileID(L) != FID) return; // FIXME: Emit a warning? } if (const PathDiagnosticCallPiece *call = dyn_cast<PathDiagnosticCallPiece>(piece)) { WorkList.push_back(&call->path); } else if (const PathDiagnosticMacroPiece *macro = dyn_cast<PathDiagnosticMacroPiece>(piece)) { WorkList.push_back(¯o->subPieces); } } } if (FID.isInvalid()) return; // FIXME: Emit a warning? } // Profile the node to see if we already have something matching it llvm::FoldingSetNodeID profile; D->Profile(profile); void *InsertPos = 0; if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) { // Keep the PathDiagnostic with the shorter path. // Note, the enclosing routine is called in deterministic order, so the // results will be consistent between runs (no reason to break ties if the // size is the same). const unsigned orig_size = orig->full_size(); const unsigned new_size = D->full_size(); if (orig_size <= new_size) return; assert(orig != D); Diags.RemoveNode(orig); delete orig; } Diags.InsertNode(OwningD.take()); }
void PlistDiagnostics::FlushDiagnosticsImpl( std::vector<const PathDiagnostic *> &Diags, FilesMade *filesMade) { // Build up a set of FIDs that we use by scanning the locations and // ranges of the diagnostics. FIDMap FM; SmallVector<FileID, 10> Fids; const SourceManager* SM = 0; if (!Diags.empty()) SM = &(*(*Diags.begin())->path.begin())->getLocation().getManager(); for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(), DE = Diags.end(); DI != DE; ++DI) { const PathDiagnostic *D = *DI; SmallVector<const PathPieces *, 5> WorkList; WorkList.push_back(&D->path); while (!WorkList.empty()) { const PathPieces &path = *WorkList.pop_back_val(); for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I) { const PathDiagnosticPiece *piece = I->getPtr(); AddFID(FM, Fids, *SM, piece->getLocation().asLocation()); ArrayRef<SourceRange> Ranges = piece->getRanges(); for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end(); I != E; ++I) { AddFID(FM, Fids, *SM, I->getBegin()); AddFID(FM, Fids, *SM, I->getEnd()); } if (const PathDiagnosticCallPiece *call = dyn_cast<PathDiagnosticCallPiece>(piece)) { IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnterWithin = call->getCallEnterWithinCallerEvent(); if (callEnterWithin) AddFID(FM, Fids, *SM, callEnterWithin->getLocation().asLocation()); WorkList.push_back(&call->path); } else if (const PathDiagnosticMacroPiece *macro = dyn_cast<PathDiagnosticMacroPiece>(piece)) { WorkList.push_back(¯o->subPieces); } } } } // Open the file. std::string ErrMsg; llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg, llvm::sys::fs::F_Text); if (!ErrMsg.empty()) { llvm::errs() << "warning: could not create file: " << OutputFile << '\n'; return; } // Write the plist header. o << PlistHeader; // Write the root object: a <dict> containing... // - "clang_version", the string representation of clang version // - "files", an <array> mapping from FIDs to file names // - "diagnostics", an <array> containing the path diagnostics o << "<dict>\n" << " <key>clang_version</key>\n"; EmitString(o, getClangFullVersion()) << '\n'; o << " <key>files</key>\n" " <array>\n"; for (SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end(); I!=E; ++I) { o << " "; EmitString(o, SM->getFileEntryForID(*I)->getName()) << '\n'; } o << " </array>\n" " <key>diagnostics</key>\n" " <array>\n"; for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(), DE = Diags.end(); DI!=DE; ++DI) { o << " <dict>\n" " <key>path</key>\n"; const PathDiagnostic *D = *DI; o << " <array>\n"; for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end(); I != E; ++I) ReportDiag(o, **I, FM, *SM, LangOpts); o << " </array>\n"; // Output the bug type and bug category. o << " <key>description</key>"; EmitString(o, D->getShortDescription()) << '\n'; o << " <key>category</key>"; EmitString(o, D->getCategory()) << '\n'; o << " <key>type</key>"; EmitString(o, D->getBugType()) << '\n'; // Output information about the semantic context where // the issue occurred. if (const Decl *DeclWithIssue = D->getDeclWithIssue()) { // FIXME: handle blocks, which have no name. if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) { StringRef declKind; switch (ND->getKind()) { case Decl::CXXRecord: declKind = "C++ class"; break; case Decl::CXXMethod: declKind = "C++ method"; break; case Decl::ObjCMethod: declKind = "Objective-C method"; break; case Decl::Function: declKind = "function"; break; default: break; } if (!declKind.empty()) { const std::string &declName = ND->getDeclName().getAsString(); o << " <key>issue_context_kind</key>"; EmitString(o, declKind) << '\n'; o << " <key>issue_context</key>"; EmitString(o, declName) << '\n'; } // Output the bug hash for issue unique-ing. Currently, it's just an // offset from the beginning of the function. if (const Stmt *Body = DeclWithIssue->getBody()) { // If the bug uniqueing location exists, use it for the hash. // For example, this ensures that two leaks reported on the same line // will have different issue_hashes and that the hash will identify // the leak location even after code is added between the allocation // site and the end of scope (leak report location). PathDiagnosticLocation UPDLoc = D->getUniqueingLoc(); if (UPDLoc.isValid()) { FullSourceLoc UL(SM->getExpansionLoc(UPDLoc.asLocation()), *SM); FullSourceLoc UFunL(SM->getExpansionLoc( D->getUniqueingDecl()->getBody()->getLocStart()), *SM); o << " <key>issue_hash</key><string>" << UL.getExpansionLineNumber() - UFunL.getExpansionLineNumber() << "</string>\n"; // Otherwise, use the location on which the bug is reported. } else { FullSourceLoc L(SM->getExpansionLoc(D->getLocation().asLocation()), *SM); FullSourceLoc FunL(SM->getExpansionLoc(Body->getLocStart()), *SM); o << " <key>issue_hash</key><string>" << L.getExpansionLineNumber() - FunL.getExpansionLineNumber() << "</string>\n"; } } } } // Output the location of the bug. o << " <key>location</key>\n"; EmitLocation(o, *SM, LangOpts, D->getLocation().asLocation(), FM, 2); // Output the diagnostic to the sub-diagnostic client, if any. if (!filesMade->empty()) { StringRef lastName; PDFileEntry::ConsumerFiles *files = filesMade->getFiles(*D); if (files) { for (PDFileEntry::ConsumerFiles::const_iterator CI = files->begin(), CE = files->end(); CI != CE; ++CI) { StringRef newName = CI->first; if (newName != lastName) { if (!lastName.empty()) { o << " </array>\n"; } lastName = newName; o << " <key>" << lastName << "_files</key>\n"; o << " <array>\n"; } o << " <string>" << CI->second << "</string>\n"; } o << " </array>\n"; } } // Close up the entry. o << " </dict>\n"; } o << " </array>\n"; // Finish. o << "</dict>\n</plist>"; }
/// \brief Compares macro tokens with a specified token value sequence. static bool MacroDefinitionEquals(const MacroInfo *MI, ArrayRef<TokenValue> Tokens) { return Tokens.size() == MI->getNumTokens() && std::equal(Tokens.begin(), Tokens.end(), MI->tokens_begin()); }
SolutionDiff::SolutionDiff(ArrayRef<Solution> solutions) { if (solutions.size() <= 1) return; // Populate the type bindings with the first solution. llvm::DenseMap<TypeVariableType *, SmallVector<Type, 2>> typeBindings; for (auto binding : solutions[0].typeBindings) { typeBindings[binding.first].push_back(binding.second); } // Populate the overload choices with the first solution. llvm::DenseMap<ConstraintLocator *, SmallVector<OverloadChoice, 2>> overloadChoices; for (auto choice : solutions[0].overloadChoices) { overloadChoices[choice.first].push_back(choice.second.choice); } // Find the type variables and overload locators common to all of the // solutions. for (auto &solution : solutions.slice(1)) { // For each type variable bound in all of the previous solutions, check // whether we have a binding for this type variable in this solution. SmallVector<TypeVariableType *, 4> removeTypeBindings; for (auto &binding : typeBindings) { auto known = solution.typeBindings.find(binding.first); if (known == solution.typeBindings.end()) { removeTypeBindings.push_back(binding.first); continue; } // Add this solution's binding to the results. binding.second.push_back(known->second); } // Remove those type variables for which this solution did not have a // binding. for (auto typeVar : removeTypeBindings) { typeBindings.erase(typeVar); } removeTypeBindings.clear(); // For each overload locator for which we have an overload choice in // all of the previous solutions. Check whether we have an overload choice // in this solution. SmallVector<ConstraintLocator *, 4> removeOverloadChoices; for (auto &overloadChoice : overloadChoices) { auto known = solution.overloadChoices.find(overloadChoice.first); if (known == solution.overloadChoices.end()) { removeOverloadChoices.push_back(overloadChoice.first); continue; } // Add this solution's overload choice to the results. overloadChoice.second.push_back(known->second.choice); } // Remove those overload locators for which this solution did not have // an overload choice. for (auto overloadChoice : removeOverloadChoices) { overloadChoices.erase(overloadChoice); } } // Look through the type variables that have bindings in all of the // solutions, and add those that have differences to the diff. for (auto &binding : typeBindings) { Type singleType; for (auto type : binding.second) { if (!singleType) singleType = type; else if (!singleType->isEqual(type)) { // We have a difference. Add this binding to the diff. this->typeBindings.push_back( SolutionDiff::TypeBindingDiff{ binding.first, std::move(binding.second) }); break; } } } // Look through the overload locators that have overload choices in all of // the solutions, and add those that have differences to the diff. for (auto &overloadChoice : overloadChoices) { OverloadChoice singleChoice = overloadChoice.second[0]; for (auto choice : overloadChoice.second) { if (!sameOverloadChoice(singleChoice, choice)) { // We have a difference. Add this set of overload choices to the diff. this->overloads.push_back( SolutionDiff::OverloadDiff{ overloadChoice.first, overloadChoice.second }); } } } }
/// Force liveness of registers. void RegPressureTracker::addLiveRegs(ArrayRef<unsigned> Regs) { for (unsigned i = 0, e = Regs.size(); i != e; ++i) { if (LiveRegs.insert(Regs[i])) increaseRegPressure(Regs[i]); } }
void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad, ArrayRef<const GlobalValue *> TyInfo) { LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); for (unsigned N = TyInfo.size(); N; --N) LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1])); }
/// Specialized emitter for Builtin.castReference. static ManagedValue emitBuiltinCastReference(SILGenFunction &gen, SILLocation loc, SubstitutionList substitutions, ArrayRef<ManagedValue> args, CanFunctionType formalApplyType, SGFContext C) { assert(args.size() == 1 && "castReference should be given one argument"); assert(substitutions.size() == 2 && "castReference should have two subs"); auto fromTy = substitutions[0].getReplacement(); auto toTy = substitutions[1].getReplacement(); auto &fromTL = gen.getTypeLowering(fromTy); auto &toTL = gen.getTypeLowering(toTy); assert(!fromTL.isTrivial() && !toTL.isTrivial() && "expected ref type"); if (fromTL.isLoadable() || toTL.isLoadable()) { if (auto refCast = gen.B.tryCreateUncheckedRefCast(loc, args[0].getValue(), toTL.getLoweredType())) { // Create a reference cast, forwarding the cleanup. // The cast takes the source reference. return ManagedValue(refCast, args[0].getCleanup()); } } // We are either casting between address-only types, or cannot promote to a // cast of reference values. // // If the from/to types are invalid, then use a cast that will fail at // runtime. We cannot catch these errors with SIL verification because they // may legitimately occur during code specialization on dynamically // unreachable paths. // // TODO: For now, we leave invalid casts in address form so that the runtime // will trap. We could emit a noreturn call here instead which would provide // more information to the optimizer. SILValue srcVal = args[0].forward(gen); SILValue fromAddr; if (fromTL.isLoadable()) { // Move the loadable value into a "source temp". Since the source and // dest are RC identical, store the reference into the source temp without // a retain. The cast will load the reference from the source temp and // store it into a dest temp effectively forwarding the cleanup. fromAddr = gen.emitTemporaryAllocation(loc, srcVal->getType()); fromTL.emitStore(gen.B, loc, srcVal, fromAddr, StoreOwnershipQualifier::Init); } else { // The cast loads directly from the source address. fromAddr = srcVal; } // Create a "dest temp" to hold the reference after casting it. SILValue toAddr = gen.emitTemporaryAllocation(loc, toTL.getLoweredType()); gen.B.createUncheckedRefCastAddr(loc, fromAddr, fromTy->getCanonicalType(), toAddr, toTy->getCanonicalType()); // Forward it along and register a cleanup. if (toTL.isAddressOnly()) return gen.emitManagedBufferWithCleanup(toAddr); // Load the destination value. auto result = toTL.emitLoad(gen.B, loc, toAddr, LoadOwnershipQualifier::Take); return gen.emitManagedRValueWithCleanup(result); }
InputSectionBase * elf::ObjectFile<ELFT>::createInputSection(const Elf_Shdr &Sec) { StringRef Name = getSectionName(Sec); switch (Sec.sh_type) { case SHT_ARM_ATTRIBUTES: // FIXME: ARM meta-data section. Retain the first attribute section // we see. The eglibc ARM dynamic loaders require the presence of an // attribute section for dlopen to work. // In a full implementation we would merge all attribute sections. if (InX::ARMAttributes == nullptr) { InX::ARMAttributes = make<InputSection>(this, &Sec, Name); return InX::ARMAttributes; } return &InputSection::Discarded; case SHT_RELA: case SHT_REL: { // Find the relocation target section and associate this // section with it. Target can be discarded, for example // if it is a duplicated member of SHT_GROUP section, we // do not create or proccess relocatable sections then. InputSectionBase *Target = getRelocTarget(Sec); if (!Target) return nullptr; // This section contains relocation information. // If -r is given, we do not interpret or apply relocation // but just copy relocation sections to output. if (Config->Relocatable) return make<InputSection>(this, &Sec, Name); if (Target->FirstRelocation) fatal(toString(this) + ": multiple relocation sections to one section are not supported"); // Mergeable sections with relocations are tricky because relocations // need to be taken into account when comparing section contents for // merging. It's not worth supporting such mergeable sections because // they are rare and it'd complicates the internal design (we usually // have to determine if two sections are mergeable early in the link // process much before applying relocations). We simply handle mergeable // sections with relocations as non-mergeable. if (auto *MS = dyn_cast<MergeInputSection>(Target)) { Target = toRegularSection(MS); this->Sections[Sec.sh_info] = Target; } size_t NumRelocations; if (Sec.sh_type == SHT_RELA) { ArrayRef<Elf_Rela> Rels = check(this->getObj().relas(&Sec), toString(this)); Target->FirstRelocation = Rels.begin(); NumRelocations = Rels.size(); Target->AreRelocsRela = true; } else { ArrayRef<Elf_Rel> Rels = check(this->getObj().rels(&Sec), toString(this)); Target->FirstRelocation = Rels.begin(); NumRelocations = Rels.size(); Target->AreRelocsRela = false; } assert(isUInt<31>(NumRelocations)); Target->NumRelocations = NumRelocations; // Relocation sections processed by the linker are usually removed // from the output, so returning `nullptr` for the normal case. // However, if -emit-relocs is given, we need to leave them in the output. // (Some post link analysis tools need this information.) if (Config->EmitRelocs) { InputSection *RelocSec = make<InputSection>(this, &Sec, Name); // We will not emit relocation section if target was discarded. Target->DependentSections.push_back(RelocSec); return RelocSec; } return nullptr; } } // The GNU linker uses .note.GNU-stack section as a marker indicating // that the code in the object file does not expect that the stack is // executable (in terms of NX bit). If all input files have the marker, // the GNU linker adds a PT_GNU_STACK segment to tells the loader to // make the stack non-executable. Most object files have this section as // of 2017. // // But making the stack non-executable is a norm today for security // reasons. Failure to do so may result in a serious security issue. // Therefore, we make LLD always add PT_GNU_STACK unless it is // explicitly told to do otherwise (by -z execstack). Because the stack // executable-ness is controlled solely by command line options, // .note.GNU-stack sections are simply ignored. if (Name == ".note.GNU-stack") return &InputSection::Discarded; // Split stacks is a feature to support a discontiguous stack. At least // as of 2017, it seems that the feature is not being used widely. // Only GNU gold supports that. We don't. For the details about that, // see https://gcc.gnu.org/wiki/SplitStacks if (Name == ".note.GNU-split-stack") { error(toString(this) + ": object file compiled with -fsplit-stack is not supported"); return &InputSection::Discarded; } if (Config->Strip != StripPolicy::None && Name.startswith(".debug")) return &InputSection::Discarded; // If -gdb-index is given, LLD creates .gdb_index section, and that // section serves the same purpose as .debug_gnu_pub{names,types} sections. // If that's the case, we want to eliminate .debug_gnu_pub{names,types} // because they are redundant and can waste large amount of disk space // (for example, they are about 400 MiB in total for a clang debug build.) if (Config->GdbIndex && (Name == ".debug_gnu_pubnames" || Name == ".debug_gnu_pubtypes")) return &InputSection::Discarded; // The linkonce feature is a sort of proto-comdat. Some glibc i386 object // files contain definitions of symbol "__x86.get_pc_thunk.bx" in linkonce // sections. Drop those sections to avoid duplicate symbol errors. // FIXME: This is glibc PR20543, we should remove this hack once that has been // fixed for a while. if (Name.startswith(".gnu.linkonce.")) return &InputSection::Discarded; // The linker merges EH (exception handling) frames and creates a // .eh_frame_hdr section for runtime. So we handle them with a special // class. For relocatable outputs, they are just passed through. if (Name == ".eh_frame" && !Config->Relocatable) return make<EhInputSection>(this, &Sec, Name); if (shouldMerge(Sec)) return make<MergeInputSection>(this, &Sec, Name); return make<InputSection>(this, &Sec, Name); }