Constraint *Constraint::createDisjunction(ConstraintSystem &cs, ArrayRef<Constraint *> constraints, ConstraintLocator *locator, RememberChoice_t rememberChoice) { // Unwrap any disjunctions inside the disjunction constraint; we only allow // disjunctions at the top level. SmallVector<TypeVariableType *, 4> typeVars; bool unwrappedAny = false; SmallVector<Constraint *, 1> unwrapped; unsigned index = 0; for (auto constraint : constraints) { // Gather type variables from this constraint. gatherReferencedTypeVars(constraint, typeVars); // If we have a nested disjunction, unwrap it. if (constraint->getKind() == ConstraintKind::Disjunction) { // If we haven't unwrapped anything before, copy all of the constraints // we skipped. if (!unwrappedAny) { unwrapped.append(constraints.begin(), constraints.begin() + index); unwrappedAny = true; } // Add all of the constraints in the disjunction. unwrapped.append(constraint->getNestedConstraints().begin(), constraint->getNestedConstraints().end()); } else if (unwrappedAny) { // Since we unwrapped constraints before, add this constraint. unwrapped.push_back(constraint); } ++index; } // If we unwrapped anything, our list of constraints is the unwrapped list. if (unwrappedAny) constraints = unwrapped; assert(!constraints.empty() && "Empty disjunction constraint"); // If there is a single constraint, this isn't a disjunction at all. if (constraints.size() == 1) { assert(!rememberChoice && "simplified an important disjunction?"); return constraints.front(); } // Create the disjunction constraint. uniqueTypeVariables(typeVars); unsigned size = sizeof(Constraint) + typeVars.size() * sizeof(TypeVariableType*); void *mem = cs.getAllocator().Allocate(size, alignof(Constraint)); auto disjunction = new (mem) Constraint(ConstraintKind::Disjunction, cs.allocateCopy(constraints), locator, typeVars); disjunction->RememberChoice = (bool) rememberChoice; return disjunction; }
Parser::StmtResult Parser::ParseGotoStmt() { SourceLocation Loc = ConsumeToken(); if(ConsumeIfPresent(tok::l_paren)) { // computed goto. SmallVector<Expr*, 4> Targets; do { auto E = ParseStatementLabelReference(); if(E.isInvalid()) break; Targets.append(1, E.get()); } while(ConsumeIfPresent(tok::comma)); ExprResult Operand; bool ParseOperand = true; if(!ExpectAndConsume(tok::r_paren)) { if(!SkipUntil(tok::r_paren)) ParseOperand = false; } if(ParseOperand) Operand = ParseExpectedExpression(); return Actions.ActOnComputedGotoStmt(Context, Loc, Targets, Operand, StmtLabel); } auto Destination = ParseStatementLabelReference(); if(Destination.isInvalid()) { if(!IsPresent(tok::identifier)) { Diag.Report(getExpectedLoc(), diag::err_expected_stmt_label_after) << "GO TO"; return StmtError(); } auto IDInfo = Tok.getIdentifierInfo(); auto IDLoc = ConsumeToken(); auto VD = Actions.ExpectVarRef(IDLoc, IDInfo); if(!VD) return StmtError(); auto Var = VarExpr::Create(Context, IDLoc, VD); // Assigned goto SmallVector<Expr*, 4> AllowedValues; if(ConsumeIfPresent(tok::l_paren)) { do { auto E = ParseStatementLabelReference(); if(E.isInvalid()) { Diag.Report(getExpectedLoc(), diag::err_expected_stmt_label); SkipUntilNextStatement(); return Actions.ActOnAssignedGotoStmt(Context, Loc, Var, AllowedValues, StmtLabel); } AllowedValues.append(1, E.get()); } while(ConsumeIfPresent(tok::comma)); ExpectAndConsume(tok::r_paren); } return Actions.ActOnAssignedGotoStmt(Context, Loc, Var, AllowedValues, StmtLabel); } // Uncoditional goto return Actions.ActOnGotoStmt(Context, Loc, Destination, StmtLabel); }
/// Emit a collection downcast expression. /// /// \param conditional Whether to emit a conditional downcast; if /// false, this will emit a forced downcast. static RValue emitCollectionDowncastExpr(SILGenFunction &SGF, ManagedValue source, Type sourceType, SILLocation loc, Type destType, SGFContext C, bool conditional) { // Compute substitutions for the intrinsic call. auto fromCollection = cast<BoundGenericStructType>( sourceType->getCanonicalType()); auto toCollection = cast<BoundGenericStructType>( destType->getCanonicalType()); // Get the intrinsic function. auto &ctx = SGF.getASTContext(); FuncDecl *fn = nullptr; if (fromCollection->getDecl() == ctx.getArrayDecl()) { fn = conditional ? SGF.SGM.getArrayConditionalCast(loc) : SGF.SGM.getArrayForceCast(loc); } else if (fromCollection->getDecl() == ctx.getDictionaryDecl()) { fn = (conditional ? SGF.SGM.getDictionaryDownCastConditional(loc) : SGF.SGM.getDictionaryDownCast(loc)); } else if (fromCollection->getDecl() == ctx.getSetDecl()) { fn = (conditional ? SGF.SGM.getSetDownCastConditional(loc) : SGF.SGM.getSetDownCast(loc)); } else { llvm_unreachable("unsupported collection upcast kind"); } // This will have been diagnosed by the accessors above. if (!fn) return SGF.emitUndefRValue(loc, destType); auto fnGenericParams = fn->getGenericSignature()->getGenericParams(); auto fromSubsts = fromCollection->gatherAllSubstitutions( SGF.SGM.SwiftModule, nullptr); auto toSubsts = toCollection->gatherAllSubstitutions( SGF.SGM.SwiftModule, nullptr); assert(fnGenericParams.size() == fromSubsts.size() + toSubsts.size() && "wrong number of generic collection parameters"); (void) fnGenericParams; // Form type parameter substitutions. SmallVector<Substitution, 4> subs; subs.append(fromSubsts.begin(), fromSubsts.end()); subs.append(toSubsts.begin(), toSubsts.end()); return SGF.emitApplyOfLibraryIntrinsic(loc, fn, subs, {source}, C); }
static bool printSourceSymbols(ArrayRef<const char *> Args) { SmallVector<const char *, 4> ArgsWithProgName; ArgsWithProgName.push_back("clang"); ArgsWithProgName.append(Args.begin(), Args.end()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags(CompilerInstance::createDiagnostics(new DiagnosticOptions)); auto CInvok = createInvocationFromCommandLine(ArgsWithProgName, Diags); if (!CInvok) return true; auto DataConsumer = std::make_shared<PrintIndexDataConsumer>(outs()); IndexingOptions IndexOpts; std::unique_ptr<FrontendAction> IndexAction; IndexAction = createIndexingAction(DataConsumer, IndexOpts, /*WrappedAction=*/nullptr); auto PCHContainerOps = std::make_shared<PCHContainerOperations>(); std::unique_ptr<ASTUnit> Unit(ASTUnit::LoadFromCompilerInvocationAction( std::move(CInvok), PCHContainerOps, Diags, IndexAction.get())); if (!Unit) return true; return false; }
int Compilation::performSingleCommand(const Job *Cmd) { assert(Cmd->getInputs().empty() && "This can only be used to run a single command with no inputs"); switch (Cmd->getCondition()) { case Job::Condition::CheckDependencies: return 0; case Job::Condition::RunWithoutCascading: case Job::Condition::Always: case Job::Condition::NewlyAdded: break; } if (!writeFilelistIfNecessary(Cmd, Diags)) return 1; if (Level == OutputLevel::Verbose) Cmd->printCommandLine(llvm::errs()); SmallVector<const char *, 128> Argv; Argv.push_back(Cmd->getExecutable()); Argv.append(Cmd->getArguments().begin(), Cmd->getArguments().end()); Argv.push_back(0); const char *ExecPath = Cmd->getExecutable(); const char **argv = Argv.data(); for (auto &envPair : Cmd->getExtraEnvironment()) setenv(envPair.first, envPair.second, /*replacing=*/true); return ExecuteInPlace(ExecPath, argv); }
void CodeGenModule::EmitCXXGlobalInitFunc() { while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) CXXGlobalInits.pop_back(); if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) return; llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); // Create our global initialization function. llvm::Function *Fn = CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a"); if (!PrioritizedCXXGlobalInits.empty()) { SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits; llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), PrioritizedCXXGlobalInits.end()); for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) { llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second; LocalCXXGlobalInits.push_back(Fn); } LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end()); CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, &LocalCXXGlobalInits[0], LocalCXXGlobalInits.size()); } else CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, &CXXGlobalInits[0], CXXGlobalInits.size()); AddGlobalCtor(Fn); CXXGlobalInits.clear(); PrioritizedCXXGlobalInits.clear(); }
/// Parse a free-standing where clause attached to a declaration, adding it to /// a generic parameter list that may (or may not) already exist. ParserStatus Parser:: parseFreestandingGenericWhereClause(GenericParamList *&genericParams, WhereClauseKind kind) { assert(Tok.is(tok::kw_where) && "Shouldn't call this without a where"); // Push the generic arguments back into a local scope so that references will // find them. Scope S(this, ScopeKind::Generics); if (genericParams) for (auto pd : genericParams->getParams()) addToScope(pd); SmallVector<RequirementRepr, 4> Requirements; if (genericParams) Requirements.append(genericParams->getRequirements().begin(), genericParams->getRequirements().end()); SourceLoc WhereLoc; bool FirstTypeInComplete; auto result = parseGenericWhereClause(WhereLoc, Requirements, FirstTypeInComplete); if (result.shouldStopParsing() || Requirements.empty()) return result; if (!genericParams) diagnose(WhereLoc, diag::where_without_generic_params, unsigned(kind)); else genericParams = GenericParamList::create(Context, genericParams->getLAngleLoc(), genericParams->getParams(), WhereLoc, Requirements, genericParams->getRAngleLoc()); return ParserStatus(); }
Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallOrInvoke *CI) { Module *M = CI->getModule(); SmallVector<Type *, 16> ArgTys; Value *Callee = CI->getCalledValue(); FunctionType *CalleeFTy; if (auto *F = dyn_cast<Function>(Callee)) CalleeFTy = F->getFunctionType(); else { auto *CalleeTy = cast<PointerType>(Callee->getType())->getElementType(); CalleeFTy = dyn_cast<FunctionType>(CalleeTy); } std::string Sig = getSignature(CalleeFTy); if (InvokeWrappers.find(Sig) != InvokeWrappers.end()) return InvokeWrappers[Sig]; // Put the pointer to the callee as first argument ArgTys.push_back(PointerType::getUnqual(CalleeFTy)); // Add argument types ArgTys.append(CalleeFTy->param_begin(), CalleeFTy->param_end()); FunctionType *FTy = FunctionType::get(CalleeFTy->getReturnType(), ArgTys, CalleeFTy->isVarArg()); Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage, InvokePrefix + Sig, M); InvokeWrappers[Sig] = F; return F; }
Value *llvm::concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) { unsigned NumVecs = Vecs.size(); assert(NumVecs > 1 && "Should be at least two vectors"); SmallVector<Value *, 8> ResList; ResList.append(Vecs.begin(), Vecs.end()); do { SmallVector<Value *, 8> TmpList; for (unsigned i = 0; i < NumVecs - 1; i += 2) { Value *V0 = ResList[i], *V1 = ResList[i + 1]; assert((V0->getType() == V1->getType() || i == NumVecs - 2) && "Only the last vector may have a different type"); TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); } // Push the last vector if the total number of vectors is odd. if (NumVecs % 2 != 0) TmpList.push_back(ResList[NumVecs - 1]); ResList = TmpList; NumVecs = ResList.size(); } while (NumVecs > 1); return ResList[0]; }
DIExpression *DIExpression::appendToStack(const DIExpression *Expr, ArrayRef<uint64_t> Ops) { assert(Expr && !Ops.empty() && "Can't append ops to this expression"); assert(none_of(Ops, [](uint64_t Op) { return Op == dwarf::DW_OP_stack_value || Op == dwarf::DW_OP_LLVM_fragment; }) && "Can't append this op"); // Append a DW_OP_deref after Expr's current op list if it's non-empty and // has no DW_OP_stack_value. // // Match .* DW_OP_stack_value (DW_OP_LLVM_fragment A B)?. Optional<FragmentInfo> FI = Expr->getFragmentInfo(); unsigned DropUntilStackValue = FI.hasValue() ? 3 : 0; ArrayRef<uint64_t> ExprOpsBeforeFragment = Expr->getElements().drop_back(DropUntilStackValue); bool NeedsDeref = (Expr->getNumElements() > DropUntilStackValue) && (ExprOpsBeforeFragment.back() != dwarf::DW_OP_stack_value); bool NeedsStackValue = NeedsDeref || ExprOpsBeforeFragment.empty(); // Append a DW_OP_deref after Expr's current op list if needed, then append // the new ops, and finally ensure that a single DW_OP_stack_value is present. SmallVector<uint64_t, 16> NewOps; if (NeedsDeref) NewOps.push_back(dwarf::DW_OP_deref); NewOps.append(Ops.begin(), Ops.end()); if (NeedsStackValue) NewOps.push_back(dwarf::DW_OP_stack_value); return DIExpression::append(Expr, NewOps); }
static IntrusiveRefCntPtr<DiagnosticsEngine> createDiagnostics(unsigned int argc, char **argv) { IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(new DiagnosticIDs()); // Buffer diagnostics from argument parsing so that we can output them using a // well formed diagnostic object. TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer; IntrusiveRefCntPtr<DiagnosticsEngine> InterimDiags( new DiagnosticsEngine(DiagIDs, new DiagnosticOptions(), DiagsBuffer)); // Try to build a CompilerInvocation. SmallVector<const char *, 4> Args; Args.push_back("diagtool"); Args.append(argv, argv + argc); std::unique_ptr<CompilerInvocation> Invocation( createInvocationFromCommandLine(Args, InterimDiags)); if (!Invocation) return nullptr; // Build the diagnostics parser IntrusiveRefCntPtr<DiagnosticsEngine> FinalDiags = CompilerInstance::createDiagnostics(&Invocation->getDiagnosticOpts()); if (!FinalDiags) return nullptr; // Flush any errors created when initializing everything. This could happen // for invalid command lines, which will probably give non-sensical results. DiagsBuffer->FlushDiagnostics(*FinalDiags); return FinalDiags; }
/// Escape RegNode so that we can access it from child handlers. Find the call /// to frameescape, if any, in the entry block and append RegNode to the list /// of arguments. int WinEHStatePass::escapeRegNode(Function &F) { // Find the call to frameescape and extract its arguments. IntrinsicInst *EscapeCall = nullptr; for (Instruction &I : F.getEntryBlock()) { IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); if (II && II->getIntrinsicID() == Intrinsic::frameescape) { EscapeCall = II; break; } } SmallVector<Value *, 8> Args; if (EscapeCall) { auto Ops = EscapeCall->arg_operands(); Args.append(Ops.begin(), Ops.end()); } Args.push_back(RegNode); // Replace the call (if it exists) with new one. Otherwise, insert at the end // of the entry block. IRBuilder<> Builder(&F.getEntryBlock(), EscapeCall ? EscapeCall : F.getEntryBlock().end()); Builder.CreateCall(FrameEscape, Args); if (EscapeCall) EscapeCall->eraseFromParent(); return Args.size() - 1; }
LLVMContextImpl::~LLVMContextImpl() { // NOTE: We need to delete the contents of OwnedModules, but we have to // duplicate it into a temporary vector, because the destructor of Module // will try to remove itself from OwnedModules set. This would cause // iterator invalidation if we iterated on the set directly. std::vector<Module*> Modules(OwnedModules.begin(), OwnedModules.end()); DeleteContainerPointers(Modules); // Free the constants. This is important to do here to ensure that they are // freed before the LeakDetector is torn down. std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(), DropReferences()); std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(), DropFirst()); std::for_each(StructConstants.map_begin(), StructConstants.map_end(), DropFirst()); std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(), DropFirst()); ExprConstants.freeConstants(); ArrayConstants.freeConstants(); StructConstants.freeConstants(); VectorConstants.freeConstants(); DeleteContainerSeconds(CAZConstants); DeleteContainerSeconds(CPNConstants); DeleteContainerSeconds(UVConstants); InlineAsms.freeConstants(); DeleteContainerSeconds(IntConstants); DeleteContainerSeconds(FPConstants); for (StringMap<ConstantDataSequential*>::iterator I = CDSConstants.begin(), E = CDSConstants.end(); I != E; ++I) delete I->second; CDSConstants.clear(); // Destroy attributes. for (FoldingSetIterator<AttributesImpl> I = AttrsSet.begin(), E = AttrsSet.end(); I != E;) { FoldingSetIterator<AttributesImpl> Elem = I++; delete &*Elem; } // Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet // and the NonUniquedMDNodes sets, so copy the values out first. SmallVector<MDNode*, 8> MDNodes; MDNodes.reserve(MDNodeSet.size() + NonUniquedMDNodes.size()); for (FoldingSetIterator<MDNode> I = MDNodeSet.begin(), E = MDNodeSet.end(); I != E; ++I) MDNodes.push_back(&*I); MDNodes.append(NonUniquedMDNodes.begin(), NonUniquedMDNodes.end()); for (SmallVectorImpl<MDNode *>::iterator I = MDNodes.begin(), E = MDNodes.end(); I != E; ++I) (*I)->destroy(); assert(MDNodeSet.empty() && NonUniquedMDNodes.empty() && "Destroying all MDNodes didn't empty the Context's sets."); // Destroy MDStrings. DeleteContainerSeconds(MDStringCache); }
Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { LLVMContext &C = CI->getModule()->getContext(); // If we are calling a function that is noreturn, we must remove that // attribute. The code we insert here does expect it to return, after we // catch the exception. if (CI->doesNotReturn()) { if (auto *F = dyn_cast<Function>(CI->getCalledValue())) F->removeFnAttr(Attribute::NoReturn); CI->removeAttribute(AttributeList::FunctionIndex, Attribute::NoReturn); } IRBuilder<> IRB(C); IRB.SetInsertPoint(CI); // Pre-invoke // __THREW__ = 0; IRB.CreateStore(IRB.getInt32(0), ThrewGV); // Invoke function wrapper in JavaScript SmallVector<Value *, 16> Args; // Put the pointer to the callee as first argument, so it can be called // within the invoke wrapper later Args.push_back(CI->getCalledValue()); Args.append(CI->arg_begin(), CI->arg_end()); CallInst *NewCall = IRB.CreateCall(getInvokeWrapper(CI), Args); NewCall->takeName(CI); NewCall->setCallingConv(CI->getCallingConv()); NewCall->setDebugLoc(CI->getDebugLoc()); // Because we added the pointer to the callee as first argument, all // argument attribute indices have to be incremented by one. SmallVector<AttributeSet, 8> ArgAttributes; const AttributeList &InvokeAL = CI->getAttributes(); // No attributes for the callee pointer. ArgAttributes.push_back(AttributeSet()); // Copy the argument attributes from the original for (unsigned i = 0, e = CI->getNumArgOperands(); i < e; ++i) ArgAttributes.push_back(InvokeAL.getParamAttributes(i)); // Reconstruct the AttributesList based on the vector we constructed. AttributeList NewCallAL = AttributeList::get(C, InvokeAL.getFnAttributes(), InvokeAL.getRetAttributes(), ArgAttributes); NewCall->setAttributes(NewCallAL); CI->replaceAllUsesWith(NewCall); // Post-invoke // %__THREW__.val = __THREW__; __THREW__ = 0; Value *Threw = IRB.CreateLoad(ThrewGV, ThrewGV->getName() + ".val"); IRB.CreateStore(IRB.getInt32(0), ThrewGV); return Threw; }
LLVMContextImpl::~LLVMContextImpl() { std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(), DropReferences()); std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(), DropReferences()); std::for_each(StructConstants.map_begin(), StructConstants.map_end(), DropReferences()); std::for_each(UnionConstants.map_begin(), UnionConstants.map_end(), DropReferences()); std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(), DropReferences()); ExprConstants.freeConstants(); ArrayConstants.freeConstants(); StructConstants.freeConstants(); UnionConstants.freeConstants(); VectorConstants.freeConstants(); AggZeroConstants.freeConstants(); NullPtrConstants.freeConstants(); UndefValueConstants.freeConstants(); InlineAsms.freeConstants(); for (IntMapTy::iterator I = IntConstants.begin(), E = IntConstants.end(); I != E; ++I) { delete I->second; } for (FPMapTy::iterator I = FPConstants.begin(), E = FPConstants.end(); I != E; ++I) { delete I->second; } AlwaysOpaqueTy->dropRef(); for (OpaqueTypesTy::iterator I = OpaqueTypes.begin(), E = OpaqueTypes.end(); I != E; ++I) { (*I)->AbstractTypeUsers.clear(); delete *I; } // Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet // and the NonUniquedMDNodes sets, so copy the values out first. SmallVector<MDNode*, 8> MDNodes; MDNodes.reserve(MDNodeSet.size() + NonUniquedMDNodes.size()); for (FoldingSetIterator<MDNode> I = MDNodeSet.begin(), E = MDNodeSet.end(); I != E; ++I) { MDNodes.push_back(&*I); } MDNodes.append(NonUniquedMDNodes.begin(), NonUniquedMDNodes.end()); for (SmallVector<MDNode*, 8>::iterator I = MDNodes.begin(), E = MDNodes.end(); I != E; ++I) { (*I)->destroy(); } assert(MDNodeSet.empty() && NonUniquedMDNodes.empty() && "Destroying all MDNodes didn't empty the Context's sets."); // Destroy MDStrings. for (StringMap<MDString*>::iterator I = MDStringCache.begin(), E = MDStringCache.end(); I != E; ++I) { delete I->second; } }
DIExpression *DIExpression::append(const DIExpression *Expr, ArrayRef<uint64_t> Ops) { assert(Expr && !Ops.empty() && "Can't append ops to this expression"); // Copy Expr's current op list. SmallVector<uint64_t, 16> NewOps; for (auto Op : Expr->expr_ops()) { // Append new opcodes before DW_OP_{stack_value, LLVM_fragment}. if (Op.getOp() == dwarf::DW_OP_stack_value || Op.getOp() == dwarf::DW_OP_LLVM_fragment) { NewOps.append(Ops.begin(), Ops.end()); // Ensure that the new opcodes are only appended once. Ops = None; } Op.appendToVector(NewOps); } NewOps.append(Ops.begin(), Ops.end()); return DIExpression::get(Expr->getContext(), NewOps); }
bool canUseASTWithSnapshots( ArrayRef<ImmutableTextSnapshotRef> Snapshots) override { if (!TryExistingAST) { LOG_INFO_FUNC(High, "will resolve using up-to-date AST"); return false; } // If there is an existing AST and the offset can be mapped back to the // document snapshot that was used to create it, then use that AST. // The downside is that we may return stale information, but we get the // benefit of increased responsiveness, since the request will not be // blocked waiting on the AST to be fully typechecked. ImmutableTextSnapshotRef InputSnap; if (auto EditorDoc = Lang.getEditorDocuments().findByPath(InputFile)) InputSnap = EditorDoc->getLatestSnapshot(); if (!InputSnap) return false; auto mappedBackOffset = [&]()->llvm::Optional<unsigned> { for (auto &Snap : Snapshots) { if (Snap->isFromSameBuffer(InputSnap)) { if (Snap->getStamp() == InputSnap->getStamp()) return Offset; auto OptOffset = mapOffsetToOlderSnapshot(Offset, InputSnap, Snap); if (!OptOffset.hasValue()) return None; // Check that the new and old offset still point to the same token. StringRef NewTok = getSourceToken(Offset, InputSnap); if (NewTok.empty()) return None; if (NewTok == getSourceToken(OptOffset.getValue(), Snap)) return OptOffset; return None; } } return None; }; auto OldOffsetOpt = mappedBackOffset(); if (OldOffsetOpt.hasValue()) { Offset = *OldOffsetOpt; PreviousASTSnaps.append(Snapshots.begin(), Snapshots.end()); LOG_INFO_FUNC(High, "will try existing AST"); return true; } LOG_INFO_FUNC(High, "will resolve using up-to-date AST"); return false; }
bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B, const DominatorTree *DT, const LoopInfo *LI) { assert(A->getParent()->getParent() == B->getParent()->getParent() && "This analysis is function-local!"); SmallVector<BasicBlock*, 32> Worklist; if (A->getParent() == B->getParent()) { // The same block case is special because it's the only time we're looking // within a single block to see which instruction comes first. Once we // start looking at multiple blocks, the first instruction of the block is // reachable, so we only need to determine reachability between whole // blocks. BasicBlock *BB = const_cast<BasicBlock *>(A->getParent()); // If the block is in a loop then we can reach any instruction in the block // from any other instruction in the block by going around a backedge. if (LI && LI->getLoopFor(BB) != nullptr) return true; // Linear scan, start at 'A', see whether we hit 'B' or the end first. for (BasicBlock::const_iterator I = A->getIterator(), E = BB->end(); I != E; ++I) { if (&*I == B) return true; } // Can't be in a loop if it's the entry block -- the entry block may not // have predecessors. if (BB == &BB->getParent()->getEntryBlock()) return false; // Otherwise, continue doing the normal per-BB CFG walk. Worklist.append(succ_begin(BB), succ_end(BB)); if (Worklist.empty()) { // We've proven that there's no path! return false; } } else { Worklist.push_back(const_cast<BasicBlock*>(A->getParent())); } if (A->getParent() == &A->getParent()->getParent()->getEntryBlock()) return true; if (B->getParent() == &A->getParent()->getParent()->getEntryBlock()) return false; return isPotentiallyReachableFromMany( Worklist, const_cast<BasicBlock *>(B->getParent()), DT, LI); }
// Calls to setjmp(p) are lowered to _setjmp3(p, 0) by the frontend. // The idea behind _setjmp3 is that it takes an optional number of personality // specific parameters to indicate how to restore the personality-specific frame // state when longjmp is initiated. Typically, the current TryLevel is saved. void WinEHStatePass::rewriteSetJmpCallSite(IRBuilder<> &Builder, Function &F, CallSite CS, Value *State) { // Don't rewrite calls with a weird number of arguments. if (CS.getNumArgOperands() != 2) return; Instruction *Inst = CS.getInstruction(); SmallVector<OperandBundleDef, 1> OpBundles; CS.getOperandBundlesAsDefs(OpBundles); SmallVector<Value *, 3> OptionalArgs; if (Personality == EHPersonality::MSVC_CXX) { OptionalArgs.push_back(CxxLongjmpUnwind); OptionalArgs.push_back(State); OptionalArgs.push_back(emitEHLSDA(Builder, &F)); } else if (Personality == EHPersonality::MSVC_X86SEH) { OptionalArgs.push_back(SehLongjmpUnwind); OptionalArgs.push_back(State); if (UseStackGuard) OptionalArgs.push_back(Cookie); } else { llvm_unreachable("unhandled personality!"); } SmallVector<Value *, 5> Args; Args.push_back( Builder.CreateBitCast(CS.getArgOperand(0), Builder.getInt8PtrTy())); Args.push_back(Builder.getInt32(OptionalArgs.size())); Args.append(OptionalArgs.begin(), OptionalArgs.end()); CallSite NewCS; if (CS.isCall()) { auto *CI = cast<CallInst>(Inst); CallInst *NewCI = Builder.CreateCall(SetJmp3, Args, OpBundles); NewCI->setTailCallKind(CI->getTailCallKind()); NewCS = NewCI; } else { auto *II = cast<InvokeInst>(Inst); NewCS = Builder.CreateInvoke( SetJmp3, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles); } NewCS.setCallingConv(CS.getCallingConv()); NewCS.setAttributes(CS.getAttributes()); NewCS->setDebugLoc(CS->getDebugLoc()); Instruction *NewInst = NewCS.getInstruction(); NewInst->takeName(Inst); Inst->replaceAllUsesWith(NewInst); Inst->eraseFromParent(); }
void ASTProducer::getASTUnitAsync(SwiftASTManager::Implementation &MgrImpl, ArrayRef<ImmutableTextSnapshotRef> Snaps, std::function<void(ASTUnitRef Unit, StringRef Error)> Receiver) { ASTProducerRef ThisProducer = this; SmallVector<ImmutableTextSnapshotRef, 4> Snapshots; Snapshots.append(Snaps.begin(), Snaps.end()); MgrImpl.ASTBuildQueue.dispatch([ThisProducer, &MgrImpl, Snapshots, Receiver] { std::string Error; ASTUnitRef Unit = ThisProducer->getASTUnitImpl(MgrImpl, Snapshots, Error); Receiver(Unit, Error); }, /*isStackDeep=*/true); }
bool LowerEmuTLS::runOnModule(Module &M) { if (!TM || !TM->Options.EmulatedTLS) return false; bool Changed = false; SmallVector<const GlobalVariable*, 8> TlsVars; for (const auto &G : M.globals()) { if (G.isThreadLocal()) TlsVars.append({&G}); } for (const auto G : TlsVars) Changed |= addEmuTlsVar(M, G); return Changed; }
void DIBuilder::finalizeSubprogram(DISubprogram *SP) { MDTuple *Temp = SP->getVariables().get(); if (!Temp || !Temp->isTemporary()) return; SmallVector<Metadata *, 4> Variables; auto PV = PreservedVariables.find(SP); if (PV != PreservedVariables.end()) Variables.append(PV->second.begin(), PV->second.end()); DINodeArray AV = getOrCreateArray(Variables); TempMDTuple(Temp)->replaceAllUsesWith(AV.get()); }
int main(int argc, const char *argv[]) { unsigned numForwardedArgs = argc - 1 // we drop argv[0] + 1; // -interpret SmallVector<const char *, 8> forwardedArgs; forwardedArgs.reserve(numForwardedArgs); forwardedArgs.append(&argv[1], &argv[argc]); forwardedArgs.push_back("-interpret"); assert(forwardedArgs.size() == numForwardedArgs); Observer observer; return performFrontend(forwardedArgs, argv[0], (void*) &printMetadataType, &observer); }
/// If there are any -verify errors (e.g. differences between expectations /// and actual diagnostics produced), apply fixits to the original source /// file and drop it back in place. void DiagnosticVerifier::autoApplyFixes(unsigned BufferID, ArrayRef<llvm::SMDiagnostic> diags) { // Walk the list of diagnostics, pulling out any fixits into an array of just // them. SmallVector<llvm::SMFixIt, 4> FixIts; for (auto &diag : diags) FixIts.append(diag.getFixIts().begin(), diag.getFixIts().end()); // If we have no fixits to apply, avoid touching the file. if (FixIts.empty()) return; // Sort the fixits by their start location. std::sort(FixIts.begin(), FixIts.end(), [&](const llvm::SMFixIt &lhs, const llvm::SMFixIt &rhs) -> bool { return lhs.getRange().Start.getPointer() < rhs.getRange().Start.getPointer(); }); // Get the contents of the original source file. auto memBuffer = SM.getLLVMSourceMgr().getMemoryBuffer(BufferID); auto bufferRange = memBuffer->getBuffer(); // Apply the fixes, building up a new buffer as an std::string. const char *LastPos = bufferRange.begin(); std::string Result; for (auto &fix : FixIts) { // We cannot handle overlapping fixits, so assert that they don't happen. assert(LastPos <= fix.getRange().Start.getPointer() && "Cannot handle overlapping fixits"); // Keep anything from the last spot we've checked to the start of the fixit. Result.append(LastPos, fix.getRange().Start.getPointer()); // Replace the content covered by the fixit with the replacement text. Result.append(fix.getText().begin(), fix.getText().end()); // Next character to consider is at the end of the fixit. LastPos = fix.getRange().End.getPointer(); } // Retain the end of the file. Result.append(LastPos, bufferRange.end()); std::ofstream outs(memBuffer->getBufferIdentifier()); outs << Result; }
SILType SILBuilder::getPartialApplyResultType(SILType origTy, unsigned argCount, SILModule &M, SubstitutionMap subs, ParameterConvention calleeConvention, PartialApplyInst::OnStackKind onStack) { CanSILFunctionType FTI = origTy.castTo<SILFunctionType>(); if (!subs.empty()) FTI = FTI->substGenericArgs(M, subs); assert(!FTI->isPolymorphic() && "must provide substitutions for generic partial_apply"); auto params = FTI->getParameters(); auto newParams = params.slice(0, params.size() - argCount); auto extInfo = FTI->getExtInfo() .withRepresentation(SILFunctionType::Representation::Thick) .withIsPseudogeneric(false); if (onStack) extInfo = extInfo.withNoEscape(); // If the original method has an @unowned_inner_pointer return, the partial // application thunk will lifetime-extend 'self' for us, converting the // return value to @unowned. // // If the original method has an @autoreleased return, the partial application // thunk will retain it for us, converting the return value to @owned. SmallVector<SILResultInfo, 4> results; results.append(FTI->getResults().begin(), FTI->getResults().end()); for (auto &result : results) { if (result.getConvention() == ResultConvention::UnownedInnerPointer) result = SILResultInfo(result.getType(), ResultConvention::Unowned); else if (result.getConvention() == ResultConvention::Autoreleased) result = SILResultInfo(result.getType(), ResultConvention::Owned); } auto appliedFnType = SILFunctionType::get(nullptr, extInfo, FTI->getCoroutineKind(), calleeConvention, newParams, FTI->getYields(), results, FTI->getOptionalErrorResult(), M.getASTContext()); return SILType::getPrimitiveObjectType(appliedFnType); }
// Merge the given FunctionAccessedStorage in `other` into this // FunctionAccessedStorage. Use the given `transformStorage` to map `other` // AccessedStorage into this context. If `other` is from a callee, argument // substitution will be performed if possible. However, there's no guarantee // that the merged access values will belong to this function. // // Note that we may have `this` == `other` for self-recursion. We still need to // propagate and merge in that case in case arguments are recursively dependent. bool FunctionAccessedStorage::mergeAccesses( const FunctionAccessedStorage &other, std::function<StorageAccessInfo(const StorageAccessInfo &)> transformStorage) { // Insertion in DenseMap invalidates the iterator in the rare case of // self-recursion (`this` == `other`) that passes accessed storage though an // argument. Rather than complicate the code, make a temporary copy of the // AccessedStorage. // // Also note that the storageAccessIndex from otherStorage is relative to its // original context and should not be copied into this context. SmallVector<StorageAccessInfo, 8> otherStorageAccesses; otherStorageAccesses.reserve(other.storageAccessSet.size()); otherStorageAccesses.append(other.storageAccessSet.begin(), other.storageAccessSet.end()); bool changed = false; for (auto &rawStorageInfo : otherStorageAccesses) { const StorageAccessInfo &otherStorageInfo = transformStorage(rawStorageInfo); // If transformStorage() returns invalid storage object for local storage, // that should not be merged with the caller. if (!otherStorageInfo) continue; if (otherStorageInfo.getKind() == AccessedStorage::Unidentified) { changed |= updateUnidentifiedAccess(otherStorageInfo.getAccessKind()); continue; } // Attempt to add identified AccessedStorage to this map. auto result = insertStorageAccess(otherStorageInfo); if (result.second) { // A new AccessedStorage key was added to this map. changed = true; continue; } // Merge StorageAccessInfo into already-mapped AccessedStorage. changed |= result.first->mergeFrom(otherStorageInfo); } if (other.unidentifiedAccess != None) changed |= updateUnidentifiedAccess(other.unidentifiedAccess.getValue()); return changed; }
int Compilation::performSingleCommand(const Job *Cmd) { assert(Cmd->getInputs().empty() && "This can only be used to run a single command with no inputs"); switch (Cmd->getCondition()) { case Job::Condition::CheckDependencies: return 0; case Job::Condition::RunWithoutCascading: case Job::Condition::Always: case Job::Condition::NewlyAdded: break; } if (!writeFilelistIfNecessary(Cmd, Diags)) return 1; if (Level == OutputLevel::Verbose) Cmd->printCommandLine(llvm::errs()); SmallVector<const char *, 128> Argv; Argv.push_back(Cmd->getExecutable()); Argv.append(Cmd->getArguments().begin(), Cmd->getArguments().end()); Argv.push_back(nullptr); const char *ExecPath = Cmd->getExecutable(); const char **argv = Argv.data(); for (auto &envPair : Cmd->getExtraEnvironment()) { #if defined(_MSC_VER) int envResult =_putenv_s(envPair.first, envPair.second); #else int envResult = setenv(envPair.first, envPair.second, /*replacing=*/true); #endif assert(envResult == 0 && "expected environment variable to be set successfully"); // Bail out early in release builds. if (envResult != 0) { return envResult; } } return ExecuteInPlace(ExecPath, argv); }
/// createComplexVariable - Create a new descriptor for the specified variable /// which has a complex address expression for its address. DIVariable DIBuilder::createComplexVariable(unsigned Tag, DIDescriptor Scope, StringRef Name, DIFile F, unsigned LineNo, DIType Ty, ArrayRef<Value *> Addr, unsigned ArgNo) { SmallVector<Value *, 15> Elts; Elts.push_back(GetTagConstant(VMContext, Tag)); Elts.push_back(getNonCompileUnitScope(Scope)), Elts.push_back(MDString::get(VMContext, Name)); Elts.push_back(F); Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext), (LineNo | (ArgNo << 24)))); Elts.push_back(Ty); Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); Elts.append(Addr.begin(), Addr.end()); return DIVariable(MDNode::get(VMContext, Elts)); }
int clang_indexSourceFile(CXIndexAction idxAction, CXClientData client_data, IndexerCallbacks *index_callbacks, unsigned index_callbacks_size, unsigned index_options, const char *source_filename, const char * const *command_line_args, int num_command_line_args, struct CXUnsavedFile *unsaved_files, unsigned num_unsaved_files, CXTranslationUnit *out_TU, unsigned TU_options) { SmallVector<const char *, 4> Args; Args.push_back("clang"); Args.append(command_line_args, command_line_args + num_command_line_args); return clang_indexSourceFileFullArgv( idxAction, client_data, index_callbacks, index_callbacks_size, index_options, source_filename, Args.data(), Args.size(), unsaved_files, num_unsaved_files, out_TU, TU_options); }
// Each LexicalScope has first instruction and last instruction to mark // beginning and end of a scope respectively. Create an inverse map that list // scopes starts (and ends) with an instruction. One instruction may start (or // end) multiple scopes. Ignore scopes that are not reachable. void DebugHandlerBase::identifyScopeMarkers() { SmallVector<LexicalScope *, 4> WorkList; WorkList.push_back(LScopes.getCurrentFunctionScope()); while (!WorkList.empty()) { LexicalScope *S = WorkList.pop_back_val(); const SmallVectorImpl<LexicalScope *> &Children = S->getChildren(); if (!Children.empty()) WorkList.append(Children.begin(), Children.end()); if (S->isAbstractScope()) continue; for (const InsnRange &R : S->getRanges()) { assert(R.first && "InsnRange does not have first instruction!"); assert(R.second && "InsnRange does not have second instruction!"); requestLabelBeforeInsn(R.first); requestLabelAfterInsn(R.second); } } }