void FixedAddressChecker::checkPreStmt(const BinaryOperator *B, CheckerContext &C) const { // Using a fixed address is not portable because that address will probably // not be valid in all environments or platforms. if (B->getOpcode() != BO_Assign) return; QualType T = B->getType(); if (!T->isPointerType()) return; SVal RV = C.getSVal(B->getRHS()); if (!RV.isConstant() || RV.isZeroConstant()) return; if (ExplodedNode *N = C.generateNonFatalErrorNode()) { if (!BT) BT.reset( new BuiltinBug(this, "Use fixed address", "Using a fixed address is not portable because that " "address will probably not be valid in all " "environments or platforms.")); auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N); R->addRange(B->getRHS()->getSourceRange()); C.emitReport(std::move(R)); } }
void PointerArithChecker::checkPreStmt(const BinaryOperator *B, CheckerContext &C) const { if (B->getOpcode() != BO_Sub && B->getOpcode() != BO_Add) return; ProgramStateRef state = C.getState(); const LocationContext *LCtx = C.getLocationContext(); SVal LV = state->getSVal(B->getLHS(), LCtx); SVal RV = state->getSVal(B->getRHS(), LCtx); const MemRegion *LR = LV.getAsRegion(); if (!LR || !RV.isConstant()) return; // If pointer arithmetic is done on variables of non-array type, this often // means behavior rely on memory organization, which is dangerous. if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) || isa<CompoundLiteralRegion>(LR)) { if (ExplodedNode *N = C.generateNonFatalErrorNode()) { if (!BT) BT.reset( new BuiltinBug(this, "Dangerous pointer arithmetic", "Pointer arithmetic done on non-array variables " "means reliance on memory layout, which is " "dangerous.")); auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N); R->addRange(B->getSourceRange()); C.emitReport(std::move(R)); } } }
void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast, CheckerContext &C) const { // TODO: For now we only warn about DeclRefExpr, to avoid noise. Warn for // calculations also. if (!isa<DeclRefExpr>(Cast->IgnoreParenImpCasts())) return; // Don't warn for loss of sign/precision in macros. if (Cast->getExprLoc().isMacroID()) return; // Get Parent. const ParentMap &PM = C.getLocationContext()->getParentMap(); const Stmt *Parent = PM.getParent(Cast); if (!Parent) return; bool LossOfSign = false; bool LossOfPrecision = false; // Loss of sign/precision in binary operation. if (const auto *B = dyn_cast<BinaryOperator>(Parent)) { BinaryOperator::Opcode Opc = B->getOpcode(); if (Opc == BO_Assign) { LossOfSign = isLossOfSign(Cast, C); LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C); } else if (Opc == BO_AddAssign || Opc == BO_SubAssign) { // No loss of sign. LossOfPrecision = isLossOfPrecision(Cast, B->getLHS()->getType(), C); } else if (Opc == BO_MulAssign) { LossOfSign = isLossOfSign(Cast, C); LossOfPrecision = isLossOfPrecision(Cast, B->getLHS()->getType(), C); } else if (Opc == BO_DivAssign || Opc == BO_RemAssign) { LossOfSign = isLossOfSign(Cast, C); // No loss of precision. } else if (Opc == BO_AndAssign) { LossOfSign = isLossOfSign(Cast, C); // No loss of precision. } else if (Opc == BO_OrAssign || Opc == BO_XorAssign) { LossOfSign = isLossOfSign(Cast, C); LossOfPrecision = isLossOfPrecision(Cast, B->getLHS()->getType(), C); } else if (B->isRelationalOp() || B->isMultiplicativeOp()) { LossOfSign = isLossOfSign(Cast, C); } } else if (isa<DeclStmt>(Parent)) { LossOfSign = isLossOfSign(Cast, C); LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C); } if (LossOfSign || LossOfPrecision) { // Generate an error node. ExplodedNode *N = C.generateNonFatalErrorNode(C.getState()); if (!N) return; if (LossOfSign) reportBug(N, C, "Loss of sign in implicit conversion"); if (LossOfPrecision) reportBug(N, C, "Loss of precision in implicit conversion"); } }
void MPIChecker::checkDoubleNonblocking(const CallEvent &PreCallEvent, CheckerContext &Ctx) const { if (!FuncClassifier->isNonBlockingType(PreCallEvent.getCalleeIdentifier())) { return; } const MemRegion *const MR = PreCallEvent.getArgSVal(PreCallEvent.getNumArgs() - 1).getAsRegion(); if (!MR) return; const ElementRegion *const ER = dyn_cast<ElementRegion>(MR); // The region must be typed, in order to reason about it. if (!isa<TypedRegion>(MR) || (ER && !isa<TypedRegion>(ER->getSuperRegion()))) return; ProgramStateRef State = Ctx.getState(); const Request *const Req = State->get<RequestMap>(MR); // double nonblocking detected if (Req && Req->CurrentState == Request::State::Nonblocking) { ExplodedNode *ErrorNode = Ctx.generateNonFatalErrorNode(); BReporter.reportDoubleNonblocking(PreCallEvent, *Req, MR, ErrorNode, Ctx.getBugReporter()); Ctx.addTransition(ErrorNode->getState(), ErrorNode); } // no error else { State = State->set<RequestMap>(MR, Request::State::Nonblocking); Ctx.addTransition(State); } }
void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const { ProgramStateRef state = C.getState(); if (!(state = CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()), state, C))) return; // Check the legality of the 'whence' argument of 'fseek'. SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext()); Optional<nonloc::ConcreteInt> CI = Whence.getAs<nonloc::ConcreteInt>(); if (!CI) return; int64_t x = CI->getValue().getSExtValue(); if (x >= 0 && x <= 2) return; if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) { if (!BT_illegalwhence) BT_illegalwhence.reset( new BuiltinBug(this, "Illegal whence argument", "The whence argument to fseek() should be " "SEEK_SET, SEEK_END, or SEEK_CUR.")); C.emitReport(llvm::make_unique<BugReport>( *BT_illegalwhence, BT_illegalwhence->getDescription(), N)); } }
bool GenericTaintChecker::generateReportIfTainted(const Expr *E, const char Msg[], CheckerContext &C) const { assert(E); // Check for taint. ProgramStateRef State = C.getState(); Optional<SVal> PointedToSVal = getPointedToSVal(C, E); SVal TaintedSVal; if (PointedToSVal && State->isTainted(*PointedToSVal)) TaintedSVal = *PointedToSVal; else if (State->isTainted(E, C.getLocationContext())) TaintedSVal = C.getSVal(E); else return false; // Generate diagnostic. if (ExplodedNode *N = C.generateNonFatalErrorNode()) { initBugType(); auto report = llvm::make_unique<BugReport>(*BT, Msg, N); report->addRange(E->getSourceRange()); report->addVisitor(llvm::make_unique<TaintBugVisitor>(TaintedSVal)); C.emitReport(std::move(report)); return true; } return false; }
void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const { const Expr *Ex = S->getSynchExpr(); ProgramStateRef state = C.getState(); SVal V = C.getSVal(Ex); // Uninitialized value used for the mutex? if (V.getAs<UndefinedVal>()) { if (ExplodedNode *N = C.generateErrorNode()) { if (!BT_undef) BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex " "for @synchronized")); auto report = llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N); bugreporter::trackExpressionValue(N, Ex, *report); C.emitReport(std::move(report)); } return; } if (V.isUnknown()) return; // Check for null mutexes. ProgramStateRef notNullState, nullState; std::tie(notNullState, nullState) = state->assume(V.castAs<DefinedSVal>()); if (nullState) { if (!notNullState) { // Generate an error node. This isn't a sink since // a null mutex just means no synchronization occurs. if (ExplodedNode *N = C.generateNonFatalErrorNode(nullState)) { if (!BT_null) BT_null.reset(new BuiltinBug( this, "Nil value used as mutex for @synchronized() " "(no synchronization will occur)")); auto report = llvm::make_unique<BugReport>(*BT_null, BT_null->getDescription(), N); bugreporter::trackExpressionValue(N, Ex, *report); C.emitReport(std::move(report)); return; } } // Don't add a transition for 'nullState'. If the value is // under-constrained to be null or non-null, assume it is non-null // afterwards. } if (notNullState) C.addTransition(notNullState); }
void DoubleFetchChecker::reportDoubleFetch(CheckerContext &Ctx, ProgramStateRef state) const { // We reached a bug, stop exploring the path here by generating a sink. //ExplodedNode *ErrNode = Ctx.generateErrorNode(Ctx.getState()); ExplodedNode *N = Ctx.generateNonFatalErrorNode(state); // If we've already reached this node on another path, return. if (!N) return; // Generate the report. auto R = llvm::make_unique<BugReport>(*DoubleFetchType, "DF, use of untrusted data", N); //R->addRange(Call.getSourceRange()); Ctx.emitReport(std::move(R)); }
void TaintTesterChecker::checkPostStmt(const Expr *E, CheckerContext &C) const { ProgramStateRef State = C.getState(); if (!State) return; if (State->isTainted(E, C.getLocationContext())) { if (ExplodedNode *N = C.generateNonFatalErrorNode()) { initBugType(); auto report = llvm::make_unique<BugReport>(*BT, "tainted",N); report->addRange(E->getSourceRange()); C.emitReport(std::move(report)); } } }
void BlockInCriticalSectionChecker::reportBlockInCritSection( SymbolRef BlockDescSym, const CallEvent &Call, CheckerContext &C) const { ExplodedNode *ErrNode = C.generateNonFatalErrorNode(); if (!ErrNode) return; std::string msg; llvm::raw_string_ostream os(msg); os << "Call to blocking function '" << Call.getCalleeIdentifier()->getName() << "' inside of critical section"; auto R = llvm::make_unique<BugReport>(*BlockInCritSectionBugType, os.str(), ErrNode); R->addRange(Call.getSourceRange()); R->markInteresting(BlockDescSym); C.emitReport(std::move(R)); }
void IteratorChecker::verifyDereference(CheckerContext &C, const SVal &Val) const { auto State = C.getState(); const auto *Pos = getIteratorPosition(State, Val); if (Pos && isOutOfRange(State, *Pos)) { // If I do not put a tag here, some range tests will fail static CheckerProgramPointTag Tag("IteratorRangeChecker", "IteratorOutOfRange"); auto *N = C.generateNonFatalErrorNode(State, &Tag); if (!N) { return; } reportOutOfRangeBug("Iterator accessed outside of its range.", Val, C, N); } }
void MPIChecker::checkUnmatchedWaits(const CallEvent &PreCallEvent, CheckerContext &Ctx) const { if (!FuncClassifier->isWaitType(PreCallEvent.getCalleeIdentifier())) return; const MemRegion *const MR = topRegionUsedByWait(PreCallEvent); if (!MR) return; const ElementRegion *const ER = dyn_cast<ElementRegion>(MR); // The region must be typed, in order to reason about it. if (!isa<TypedRegion>(MR) || (ER && !isa<TypedRegion>(ER->getSuperRegion()))) return; llvm::SmallVector<const MemRegion *, 2> ReqRegions; allRegionsUsedByWait(ReqRegions, MR, PreCallEvent, Ctx); if (ReqRegions.empty()) return; ProgramStateRef State = Ctx.getState(); static CheckerProgramPointTag Tag("MPI-Checker", "UnmatchedWait"); ExplodedNode *ErrorNode{nullptr}; // Check all request regions used by the wait function. for (const auto &ReqRegion : ReqRegions) { const Request *const Req = State->get<RequestMap>(ReqRegion); State = State->set<RequestMap>(ReqRegion, Request::State::Wait); if (!Req) { if (!ErrorNode) { ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag); State = ErrorNode->getState(); } // A wait has no matching nonblocking call. BReporter.reportUnmatchedWait(PreCallEvent, ReqRegion, ErrorNode, Ctx.getBugReporter()); } } if (!ErrorNode) { Ctx.addTransition(State); } else { Ctx.addTransition(State, ErrorNode); } }
void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE) const { ExplodedNode *N = C.generateNonFatalErrorNode(); if (!N) return; if (!BT_returnstack) BT_returnstack = llvm::make_unique<BuiltinBug>( this, "Return of address to stack-allocated memory"); // Generate a report for this bug. SmallString<128> buf; llvm::raw_svector_ostream os(buf); SourceRange range = genName(os, R, C.getASTContext()); os << " returned to caller"; auto report = llvm::make_unique<BugReport>(*BT_returnstack, os.str(), N); report->addRange(RetE->getSourceRange()); if (range.isValid()) report->addRange(range); C.emitReport(std::move(report)); }
void DeleteWithNonVirtualDtorChecker::checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const { const Expr *DeletedObj = DE->getArgument(); const MemRegion *MR = C.getSVal(DeletedObj).getAsRegion(); if (!MR) return; const auto *BaseClassRegion = MR->getAs<TypedValueRegion>(); const auto *DerivedClassRegion = MR->getBaseRegion()->getAs<SymbolicRegion>(); if (!BaseClassRegion || !DerivedClassRegion) return; const auto *BaseClass = BaseClassRegion->getValueType()->getAsCXXRecordDecl(); const auto *DerivedClass = DerivedClassRegion->getSymbol()->getType()->getPointeeCXXRecordDecl(); if (!BaseClass || !DerivedClass) return; if (!BaseClass->hasDefinition() || !DerivedClass->hasDefinition()) return; if (BaseClass->getDestructor()->isVirtual()) return; if (!DerivedClass->isDerivedFrom(BaseClass)) return; if (!BT) BT.reset(new BugType(this, "Destruction of a polymorphic object with no " "virtual destructor", "Logic error")); ExplodedNode *N = C.generateNonFatalErrorNode(); auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N); // Mark region of problematic base class for later use in the BugVisitor. R->markInteresting(BaseClassRegion); R->addVisitor(llvm::make_unique<DeleteBugVisitor>()); C.emitReport(std::move(R)); }
bool GenericTaintChecker::generateReportIfTainted(const Expr *E, const char Msg[], CheckerContext &C) const { assert(E); // Check for taint. ProgramStateRef State = C.getState(); if (!State->isTainted(getPointedToSymbol(C, E)) && !State->isTainted(E, C.getLocationContext())) return false; // Generate diagnostic. if (ExplodedNode *N = C.generateNonFatalErrorNode()) { initBugType(); auto report = llvm::make_unique<BugReport>(*BT, Msg, N); report->addRange(E->getSourceRange()); C.emitReport(std::move(report)); return true; } return false; }
void StackAddrEscapeChecker::checkReturnedBlockCaptures( const BlockDataRegion &B, CheckerContext &C) const { for (const MemRegion *Region : getCapturedStackRegions(B, C)) { if (isArcManagedBlock(Region, C) || isNotInCurrentFrame(Region, C)) continue; ExplodedNode *N = C.generateNonFatalErrorNode(); if (!N) continue; if (!BT_capturedstackret) BT_capturedstackret = llvm::make_unique<BuiltinBug>( this, "Address of stack-allocated memory is captured"); SmallString<128> Buf; llvm::raw_svector_ostream Out(Buf); SourceRange Range = genName(Out, Region, C.getASTContext()); Out << " is captured by a returned block"; auto Report = llvm::make_unique<BugReport>(*BT_capturedstackret, Out.str(), N); if (Range.isValid()) Report->addRange(Range); C.emitReport(std::move(Report)); } }
void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const { if (!BT) { BT.reset(new APIMisuse( this, "message incorrectly sent to class instead of class instance")); ASTContext &Ctx = C.getASTContext(); releaseS = GetNullarySelector("release", Ctx); retainS = GetNullarySelector("retain", Ctx); autoreleaseS = GetNullarySelector("autorelease", Ctx); drainS = GetNullarySelector("drain", Ctx); } if (msg.isInstanceMessage()) return; const ObjCInterfaceDecl *Class = msg.getReceiverInterface(); assert(Class); Selector S = msg.getSelector(); if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS)) return; if (ExplodedNode *N = C.generateNonFatalErrorNode()) { SmallString<200> buf; llvm::raw_svector_ostream os(buf); os << "The '"; S.print(os); os << "' message should be sent to instances " "of class '" << Class->getName() << "' and not the class directly"; auto report = llvm::make_unique<BugReport>(*BT, os.str(), N); report->addRange(msg.getSourceRange()); C.emitReport(std::move(report)); } }
void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures( const BlockDataRegion &B, CheckerContext &C) const { // There is a not-too-uncommon idiom // where a block passed to dispatch_async captures a semaphore // and then the thread (which called dispatch_async) is blocked on waiting // for the completion of the execution of the block // via dispatch_semaphore_wait. To avoid false-positives (for now) // we ignore all the blocks which have captured // a variable of the type "dispatch_semaphore_t". if (isSemaphoreCaptured(*B.getDecl())) return; for (const MemRegion *Region : getCapturedStackRegions(B, C)) { // The block passed to dispatch_async may capture another block // created on the stack. However, there is no leak in this situaton, // no matter if ARC or no ARC is enabled: // dispatch_async copies the passed "outer" block (via Block_copy) // and if the block has captured another "inner" block, // the "inner" block will be copied as well. if (isa<BlockDataRegion>(Region)) continue; ExplodedNode *N = C.generateNonFatalErrorNode(); if (!N) continue; if (!BT_capturedstackasync) BT_capturedstackasync = llvm::make_unique<BuiltinBug>( this, "Address of stack-allocated memory is captured"); SmallString<128> Buf; llvm::raw_svector_ostream Out(Buf); SourceRange Range = genName(Out, Region, C.getASTContext()); Out << " is captured by an asynchronously-executed block"; auto Report = llvm::make_unique<BugReport>(*BT_capturedstackasync, Out.str(), N); if (Range.isValid()) Report->addRange(Range); C.emitReport(std::move(Report)); } }
void PointerSubChecker::checkPreStmt(const BinaryOperator *B, CheckerContext &C) const { // When doing pointer subtraction, if the two pointers do not point to the // same memory chunk, emit a warning. if (B->getOpcode() != BO_Sub) return; SVal LV = C.getSVal(B->getLHS()); SVal RV = C.getSVal(B->getRHS()); const MemRegion *LR = LV.getAsRegion(); const MemRegion *RR = RV.getAsRegion(); if (!(LR && RR)) return; const MemRegion *BaseLR = LR->getBaseRegion(); const MemRegion *BaseRR = RR->getBaseRegion(); if (BaseLR == BaseRR) return; // Allow arithmetic on different symbolic regions. if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR)) return; if (ExplodedNode *N = C.generateNonFatalErrorNode()) { if (!BT) BT.reset( new BuiltinBug(this, "Pointer subtraction", "Subtraction of two pointers that do not point to " "the same memory chunk may cause incorrect result.")); auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N); R->addRange(B->getSourceRange()); C.emitReport(std::move(R)); } }
void MPIChecker::checkMissingWaits(SymbolReaper &SymReaper, CheckerContext &Ctx) const { if (!SymReaper.hasDeadSymbols()) return; ProgramStateRef State = Ctx.getState(); const auto &Requests = State->get<RequestMap>(); if (Requests.isEmpty()) return; static CheckerProgramPointTag Tag("MPI-Checker", "MissingWait"); ExplodedNode *ErrorNode{nullptr}; auto ReqMap = State->get<RequestMap>(); for (const auto &Req : ReqMap) { if (!SymReaper.isLiveRegion(Req.first)) { if (Req.second.CurrentState == Request::State::Nonblocking) { if (!ErrorNode) { ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag); State = ErrorNode->getState(); } BReporter.reportMissingWait(Req.second, Req.first, ErrorNode, Ctx.getBugReporter()); } State = State->remove<RequestMap>(Req.first); } } // Transition to update the state regarding removed requests. if (!ErrorNode) { Ctx.addTransition(State); } else { Ctx.addTransition(State, ErrorNode); } }
void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const { ProgramStateRef State = C.getState(); SymbolVector LeakedStreams; StreamMapTy TrackedStreams = State->get<StreamMap>(); for (StreamMapTy::iterator I = TrackedStreams.begin(), E = TrackedStreams.end(); I != E; ++I) { SymbolRef Sym = I->first; bool IsSymDead = SymReaper.isDead(Sym); // Collect leaked symbols. if (isLeaked(Sym, I->second, IsSymDead, State)) LeakedStreams.push_back(Sym); // Remove the dead symbol from the streams map. if (IsSymDead) State = State->remove<StreamMap>(Sym); } ExplodedNode *N = C.generateNonFatalErrorNode(State); if (!N) return; reportLeaks(LeakedStreams, C, N); }
void CFNumberChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const { ProgramStateRef state = C.getState(); const FunctionDecl *FD = C.getCalleeDecl(CE); if (!FD) return; ASTContext &Ctx = C.getASTContext(); if (!ICreate) { ICreate = &Ctx.Idents.get("CFNumberCreate"); IGetValue = &Ctx.Idents.get("CFNumberGetValue"); } if (!(FD->getIdentifier() == ICreate || FD->getIdentifier() == IGetValue) || CE->getNumArgs() != 3) return; // Get the value of the "theType" argument. SVal TheTypeVal = C.getSVal(CE->getArg(1)); // FIXME: We really should allow ranges of valid theType values, and // bifurcate the state appropriately. Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>(); if (!V) return; uint64_t NumberKind = V->getValue().getLimitedValue(); Optional<uint64_t> OptCFNumberSize = GetCFNumberSize(Ctx, NumberKind); // FIXME: In some cases we can emit an error. if (!OptCFNumberSize) return; uint64_t CFNumberSize = *OptCFNumberSize; // Look at the value of the integer being passed by reference. Essentially // we want to catch cases where the value passed in is not equal to the // size of the type being created. SVal TheValueExpr = C.getSVal(CE->getArg(2)); // FIXME: Eventually we should handle arbitrary locations. We can do this // by having an enhanced memory model that does low-level typing. Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>(); if (!LV) return; const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts()); if (!R) return; QualType T = Ctx.getCanonicalType(R->getValueType()); // FIXME: If the pointee isn't an integer type, should we flag a warning? // People can do weird stuff with pointers. if (!T->isIntegralOrEnumerationType()) return; uint64_t PrimitiveTypeSize = Ctx.getTypeSize(T); if (PrimitiveTypeSize == CFNumberSize) return; // FIXME: We can actually create an abstract "CFNumber" object that has // the bits initialized to the provided values. ExplodedNode *N = C.generateNonFatalErrorNode(); if (N) { SmallString<128> sbuf; llvm::raw_svector_ostream os(sbuf); bool isCreate = (FD->getIdentifier() == ICreate); if (isCreate) { os << (PrimitiveTypeSize == 8 ? "An " : "A ") << PrimitiveTypeSize << "-bit integer is used to initialize a " << "CFNumber object that represents " << (CFNumberSize == 8 ? "an " : "a ") << CFNumberSize << "-bit integer; "; } else { os << "A CFNumber object that represents " << (CFNumberSize == 8 ? "an " : "a ") << CFNumberSize << "-bit integer is used to initialize " << (PrimitiveTypeSize == 8 ? "an " : "a ") << PrimitiveTypeSize << "-bit integer; "; } if (PrimitiveTypeSize < CFNumberSize) os << (CFNumberSize - PrimitiveTypeSize) << " bits of the CFNumber value will " << (isCreate ? "be garbage." : "overwrite adjacent storage."); else os << (PrimitiveTypeSize - CFNumberSize) << " bits of the integer value will be " << (isCreate ? "lost." : "garbage."); if (!BT) BT.reset(new APIMisuse(this, "Bad use of CFNumber APIs")); auto report = llvm::make_unique<BugReport>(*BT, os.str(), N); report->addRange(CE->getArg(2)->getSourceRange()); C.emitReport(std::move(report)); } }
void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const { ProgramStateRef state = Ctx.getState(); // Iterate over all bindings to global variables and see if it contains // a memory region in the stack space. class CallBack : public StoreManager::BindingsHandler { private: CheckerContext &Ctx; const StackFrameContext *CurSFC; public: SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V; CallBack(CheckerContext &CC) : Ctx(CC), CurSFC(CC.getLocationContext()->getCurrentStackFrame()) {} bool HandleBinding(StoreManager &SMgr, Store store, const MemRegion *region, SVal val) override { if (!isa<GlobalsSpaceRegion>(region->getMemorySpace())) return true; const MemRegion *vR = val.getAsRegion(); if (!vR) return true; // Under automated retain release, it is okay to assign a block // directly to a global variable. if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount && isa<BlockDataRegion>(vR)) return true; if (const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) { // If the global variable holds a location in the current stack frame, // record the binding to emit a warning. if (SSR->getStackFrame() == CurSFC) V.push_back(std::make_pair(region, vR)); } return true; } }; CallBack cb(Ctx); state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb); if (cb.V.empty()) return; // Generate an error node. ExplodedNode *N = Ctx.generateNonFatalErrorNode(state); if (!N) return; if (!BT_stackleak) BT_stackleak.reset( new BuiltinBug(this, "Stack address stored into global variable", "Stack address was saved into a global variable. " "This is dangerous because the address will become " "invalid after returning from the function")); for (unsigned i = 0, e = cb.V.size(); i != e; ++i) { // Generate a report for this bug. SmallString<512> buf; llvm::raw_svector_ostream os(buf); SourceRange range = genName(os, cb.V[i].second, Ctx.getASTContext()); os << " is still referred to by the global variable '"; const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion()); os << *VR->getDecl() << "' upon returning to the caller. This will be a dangling reference"; auto report = llvm::make_unique<BugReport>(*BT_stackleak, os.str(), N); if (range.isValid()) report->addRange(range); Ctx.emitReport(std::move(report)); } }
/// Report any unreleased instance variables for the current instance being /// dealloced. void ObjCDeallocChecker::diagnoseMissingReleases(CheckerContext &C) const { ProgramStateRef State = C.getState(); SVal SelfVal; if (!isInInstanceDealloc(C, SelfVal)) return; const MemRegion *SelfRegion = SelfVal.castAs<loc::MemRegionVal>().getRegion(); const LocationContext *LCtx = C.getLocationContext(); ExplodedNode *ErrNode = nullptr; SymbolRef SelfSym = SelfVal.getAsSymbol(); if (!SelfSym) return; const SymbolSet *OldUnreleased = State->get<UnreleasedIvarMap>(SelfSym); if (!OldUnreleased) return; SymbolSet NewUnreleased = *OldUnreleased; SymbolSet::Factory &F = State->getStateManager().get_context<SymbolSet>(); ProgramStateRef InitialState = State; for (auto *IvarSymbol : *OldUnreleased) { const TypedValueRegion *TVR = cast<SymbolRegionValue>(IvarSymbol)->getRegion(); const ObjCIvarRegion *IvarRegion = cast<ObjCIvarRegion>(TVR); // Don't warn if the ivar is not for this instance. if (SelfRegion != IvarRegion->getSuperRegion()) continue; const ObjCIvarDecl *IvarDecl = IvarRegion->getDecl(); // Prevent an inlined call to -dealloc in a super class from warning // about the values the subclass's -dealloc should release. if (IvarDecl->getContainingInterface() != cast<ObjCMethodDecl>(LCtx->getDecl())->getClassInterface()) continue; // Prevents diagnosing multiple times for the same instance variable // at, for example, both a return and at the end of the function. NewUnreleased = F.remove(NewUnreleased, IvarSymbol); if (State->getStateManager() .getConstraintManager() .isNull(State, IvarSymbol) .isConstrainedTrue()) { continue; } // A missing release manifests as a leak, so treat as a non-fatal error. if (!ErrNode) ErrNode = C.generateNonFatalErrorNode(); // If we've already reached this node on another path, return without // diagnosing. if (!ErrNode) return; std::string Buf; llvm::raw_string_ostream OS(Buf); const ObjCInterfaceDecl *Interface = IvarDecl->getContainingInterface(); // If the class is known to have a lifecycle with teardown that is // separate from -dealloc, do not warn about missing releases. We // suppress here (rather than not tracking for instance variables in // such classes) because these classes are rare. if (classHasSeparateTeardown(Interface)) return; ObjCImplDecl *ImplDecl = Interface->getImplementation(); const ObjCPropertyImplDecl *PropImpl = ImplDecl->FindPropertyImplIvarDecl(IvarDecl->getIdentifier()); const ObjCPropertyDecl *PropDecl = PropImpl->getPropertyDecl(); assert(PropDecl->getSetterKind() == ObjCPropertyDecl::Copy || PropDecl->getSetterKind() == ObjCPropertyDecl::Retain); OS << "The '" << *IvarDecl << "' ivar in '" << *ImplDecl << "' was "; if (PropDecl->getSetterKind() == ObjCPropertyDecl::Retain) OS << "retained"; else OS << "copied"; OS << " by a synthesized property but not released" " before '[super dealloc]'"; std::unique_ptr<BugReport> BR( new BugReport(*MissingReleaseBugType, OS.str(), ErrNode)); C.emitReport(std::move(BR)); } if (NewUnreleased.isEmpty()) { State = State->remove<UnreleasedIvarMap>(SelfSym); } else { State = State->set<UnreleasedIvarMap>(SelfSym, NewUnreleased); } if (ErrNode) { C.addTransition(State, ErrNode); } else if (State != InitialState) { C.addTransition(State); } // Make sure that after checking in the top-most frame the list of // tracked ivars is empty. This is intended to detect accidental leaks in // the UnreleasedIvarMap program state. assert(!LCtx->inTopFrame() || State->get<UnreleasedIvarMap>().isEmpty()); }
/// Emits a warning if the current context is -dealloc and ReleasedValue /// must not be directly released in a -dealloc. Returns true if a diagnostic /// was emitted. bool ObjCDeallocChecker::diagnoseExtraRelease(SymbolRef ReleasedValue, const ObjCMethodCall &M, CheckerContext &C) const { // Try to get the region from which the released value was loaded. // Note that, unlike diagnosing for missing releases, here we don't track // values that must not be released in the state. This is because even if // these values escape, it is still an error under the rules of MRR to // release them in -dealloc. const ObjCPropertyImplDecl *PropImpl = findPropertyOnDeallocatingInstance(ReleasedValue, C); if (!PropImpl) return false; // If the ivar belongs to a property that must not be released directly // in dealloc, emit a warning. if (getDeallocReleaseRequirement(PropImpl) != ReleaseRequirement::MustNotReleaseDirectly) { return false; } // If the property is readwrite but it shadows a read-only property in its // external interface, treat the property a read-only. If the outside // world cannot write to a property then the internal implementation is free // to make its own convention about whether the value is stored retained // or not. We look up the shadow here rather than in // getDeallocReleaseRequirement() because doing so can be expensive. const ObjCPropertyDecl *PropDecl = findShadowedPropertyDecl(PropImpl); if (PropDecl) { if (PropDecl->isReadOnly()) return false; } else { PropDecl = PropImpl->getPropertyDecl(); } ExplodedNode *ErrNode = C.generateNonFatalErrorNode(); if (!ErrNode) return false; std::string Buf; llvm::raw_string_ostream OS(Buf); assert(PropDecl->getSetterKind() == ObjCPropertyDecl::Weak || (PropDecl->getSetterKind() == ObjCPropertyDecl::Assign && !PropDecl->isReadOnly()) || isReleasedByCIFilterDealloc(PropImpl) ); const ObjCImplDecl *Container = getContainingObjCImpl(C.getLocationContext()); OS << "The '" << *PropImpl->getPropertyIvarDecl() << "' ivar in '" << *Container; if (isReleasedByCIFilterDealloc(PropImpl)) { OS << "' will be released by '-[CIFilter dealloc]' but also released here"; } else { OS << "' was synthesized for "; if (PropDecl->getSetterKind() == ObjCPropertyDecl::Weak) OS << "a weak"; else OS << "an assign, readwrite"; OS << " property but was released in 'dealloc'"; } std::unique_ptr<BugReport> BR( new BugReport(*ExtraReleaseBugType, OS.str(), ErrNode)); BR->addRange(M.getOriginExpr()->getSourceRange()); C.emitReport(std::move(BR)); return true; }
void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const { ProgramStateRef state = C.getState(); const FunctionDecl *FD = C.getCalleeDecl(CE); if (!FD) return; ASTContext &Ctx = C.getASTContext(); if (!II) II = &Ctx.Idents.get("CFNumberCreate"); if (FD->getIdentifier() != II || CE->getNumArgs() != 3) return; // Get the value of the "theType" argument. const LocationContext *LCtx = C.getLocationContext(); SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx); // FIXME: We really should allow ranges of valid theType values, and // bifurcate the state appropriately. Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>(); if (!V) return; uint64_t NumberKind = V->getValue().getLimitedValue(); Optional<uint64_t> OptTargetSize = GetCFNumberSize(Ctx, NumberKind); // FIXME: In some cases we can emit an error. if (!OptTargetSize) return; uint64_t TargetSize = *OptTargetSize; // Look at the value of the integer being passed by reference. Essentially // we want to catch cases where the value passed in is not equal to the // size of the type being created. SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx); // FIXME: Eventually we should handle arbitrary locations. We can do this // by having an enhanced memory model that does low-level typing. Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>(); if (!LV) return; const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts()); if (!R) return; QualType T = Ctx.getCanonicalType(R->getValueType()); // FIXME: If the pointee isn't an integer type, should we flag a warning? // People can do weird stuff with pointers. if (!T->isIntegralOrEnumerationType()) return; uint64_t SourceSize = Ctx.getTypeSize(T); // CHECK: is SourceSize == TargetSize if (SourceSize == TargetSize) return; // Generate an error. Only generate a sink error node // if 'SourceSize < TargetSize'; otherwise generate a non-fatal error node. // // FIXME: We can actually create an abstract "CFNumber" object that has // the bits initialized to the provided values. // ExplodedNode *N = SourceSize < TargetSize ? C.generateErrorNode() : C.generateNonFatalErrorNode(); if (N) { SmallString<128> sbuf; llvm::raw_svector_ostream os(sbuf); os << (SourceSize == 8 ? "An " : "A ") << SourceSize << " bit integer is used to initialize a CFNumber " "object that represents " << (TargetSize == 8 ? "an " : "a ") << TargetSize << " bit integer. "; if (SourceSize < TargetSize) os << (TargetSize - SourceSize) << " bits of the CFNumber value will be garbage." ; else os << (SourceSize - TargetSize) << " bits of the input integer will be lost."; if (!BT) BT.reset(new APIMisuse(this, "Bad use of CFNumberCreate")); auto report = llvm::make_unique<BugReport>(*BT, os.str(), N); report->addRange(CE->getArg(2)->getSourceRange()); C.emitReport(std::move(report)); } }
void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const { if (!ChecksEnabled[CK_StackAddrEscapeChecker]) return; ProgramStateRef State = Ctx.getState(); // Iterate over all bindings to global variables and see if it contains // a memory region in the stack space. class CallBack : public StoreManager::BindingsHandler { private: CheckerContext &Ctx; const StackFrameContext *CurSFC; public: SmallVector<std::pair<const MemRegion *, const MemRegion *>, 10> V; CallBack(CheckerContext &CC) : Ctx(CC), CurSFC(CC.getLocationContext()->getCurrentStackFrame()) {} bool HandleBinding(StoreManager &SMgr, Store S, const MemRegion *Region, SVal Val) override { if (!isa<GlobalsSpaceRegion>(Region->getMemorySpace())) return true; const MemRegion *VR = Val.getAsRegion(); if (VR && isa<StackSpaceRegion>(VR->getMemorySpace()) && !isArcManagedBlock(VR, Ctx) && !isNotInCurrentFrame(VR, Ctx)) V.emplace_back(Region, VR); return true; } }; CallBack Cb(Ctx); State->getStateManager().getStoreManager().iterBindings(State->getStore(), Cb); if (Cb.V.empty()) return; // Generate an error node. ExplodedNode *N = Ctx.generateNonFatalErrorNode(State); if (!N) return; if (!BT_stackleak) BT_stackleak = llvm::make_unique<BuiltinBug>( this, "Stack address stored into global variable", "Stack address was saved into a global variable. " "This is dangerous because the address will become " "invalid after returning from the function"); for (const auto &P : Cb.V) { // Generate a report for this bug. SmallString<128> Buf; llvm::raw_svector_ostream Out(Buf); SourceRange Range = genName(Out, P.second, Ctx.getASTContext()); Out << " is still referred to by the "; if (isa<StaticGlobalSpaceRegion>(P.first->getMemorySpace())) Out << "static"; else Out << "global"; Out << " variable '"; const VarRegion *VR = cast<VarRegion>(P.first->getBaseRegion()); Out << *VR->getDecl() << "' upon returning to the caller. This will be a dangling reference"; auto Report = llvm::make_unique<BugReport>(*BT_stackleak, Out.str(), N); if (Range.isValid()) Report->addRange(Range); Ctx.emitReport(std::move(Report)); } }
void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const { if (!BT) { BT.reset(new APIMisuse(this, "Arguments passed to variadic method aren't all " "Objective-C pointer types")); ASTContext &Ctx = C.getASTContext(); arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx); dictionaryWithObjectsAndKeysS = GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx); setWithObjectsS = GetUnarySelector("setWithObjects", Ctx); orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx); initWithObjectsS = GetUnarySelector("initWithObjects", Ctx); initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx); } if (!isVariadicMessage(msg)) return; // We are not interested in the selector arguments since they have // well-defined types, so the compiler will issue a warning for them. unsigned variadicArgsBegin = msg.getSelector().getNumArgs(); // We're not interested in the last argument since it has to be nil or the // compiler would have issued a warning for it elsewhere. unsigned variadicArgsEnd = msg.getNumArgs() - 1; if (variadicArgsEnd <= variadicArgsBegin) return; // Verify that all arguments have Objective-C types. Optional<ExplodedNode*> errorNode; for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) { QualType ArgTy = msg.getArgExpr(I)->getType(); if (ArgTy->isObjCObjectPointerType()) continue; // Block pointers are treaded as Objective-C pointers. if (ArgTy->isBlockPointerType()) continue; // Ignore pointer constants. if (msg.getArgSVal(I).getAs<loc::ConcreteInt>()) continue; // Ignore pointer types annotated with 'NSObject' attribute. if (C.getASTContext().isObjCNSObjectType(ArgTy)) continue; // Ignore CF references, which can be toll-free bridged. if (coreFoundation::isCFObjectRef(ArgTy)) continue; // Generate only one error node to use for all bug reports. if (!errorNode.hasValue()) errorNode = C.generateNonFatalErrorNode(); if (!errorNode.getValue()) continue; SmallString<128> sbuf; llvm::raw_svector_ostream os(sbuf); StringRef TypeName = GetReceiverInterfaceName(msg); if (!TypeName.empty()) os << "Argument to '" << TypeName << "' method '"; else os << "Argument to method '"; msg.getSelector().print(os); os << "' should be an Objective-C pointer type, not '"; ArgTy.print(os, C.getLangOpts()); os << "'"; auto R = llvm::make_unique<BugReport>(*BT, os.str(), errorNode.getValue()); R->addRange(msg.getArgSourceRange(I)); C.emitReport(std::move(R)); } }
void UninitializedObjectChecker::checkEndFunction( const ReturnStmt *RS, CheckerContext &Context) const { const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>( Context.getLocationContext()->getDecl()); if (!CtorDecl) return; if (!CtorDecl->isUserProvided()) return; if (CtorDecl->getParent()->isUnion()) return; // This avoids essentially the same error being reported multiple times. if (willObjectBeAnalyzedLater(CtorDecl, Context)) return; Optional<nonloc::LazyCompoundVal> Object = getObjectVal(CtorDecl, Context); if (!Object) return; FindUninitializedFields F(Context.getState(), Object->getRegion(), CheckPointeeInitialization); const UninitFieldMap &UninitFields = F.getUninitFields(); if (UninitFields.empty()) return; // In non-pedantic mode, if Object's region doesn't contain a single // initialized field, we'll assume that Object was intentionally left // uninitialized. if (!IsPedantic && !F.isAnyFieldInitialized()) return; // There are uninitialized fields in the record. ExplodedNode *Node = Context.generateNonFatalErrorNode(Context.getState()); if (!Node) return; PathDiagnosticLocation LocUsedForUniqueing; const Stmt *CallSite = Context.getStackFrame()->getCallSite(); if (CallSite) LocUsedForUniqueing = PathDiagnosticLocation::createBegin( CallSite, Context.getSourceManager(), Node->getLocationContext()); // For Plist consumers that don't support notes just yet, we'll convert notes // to warnings. if (ShouldConvertNotesToWarnings) { for (const auto &Pair : UninitFields) { auto Report = llvm::make_unique<BugReport>( *BT_uninitField, Pair.second, Node, LocUsedForUniqueing, Node->getLocationContext()->getDecl()); Context.emitReport(std::move(Report)); } return; } SmallString<100> WarningBuf; llvm::raw_svector_ostream WarningOS(WarningBuf); WarningOS << UninitFields.size() << " uninitialized field" << (UninitFields.size() == 1 ? "" : "s") << " at the end of the constructor call"; auto Report = llvm::make_unique<BugReport>( *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing, Node->getLocationContext()->getDecl()); for (const auto &Pair : UninitFields) { Report->addNote(Pair.second, PathDiagnosticLocation::create(Pair.first->getDecl(), Context.getSourceManager())); } Context.emitReport(std::move(Report)); }