void InferSummaries(const Vector<BlockSummary*> &summary_list) { static BaseTimer infer_timer("infer_summaries"); Timer _timer(&infer_timer); if (summary_list.Empty()) return; Variable *function = summary_list[0]->GetId()->BaseVar(); Vector<BlockCFG*> *annot_list = BodyAnnotCache.Lookup(function->GetName()); // all traces which might refer to the result of pointer arithmetic. Vector<Exp*> arithmetic_list; ArithmeticEscape escape(function, arithmetic_list); // initial pass over the CFGs to get traces used in pointer arithmetic. for (size_t ind = 0; ind < summary_list.Size(); ind++) { BlockSummary *sum = summary_list[ind]; BlockCFG *cfg = sum->GetMemory()->GetCFG(); for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); if (PEdgeAssign *assign_edge = edge->IfAssign()) { Exp *left = assign_edge->GetLeftSide(); Exp *right = assign_edge->GetRightSide(); ProcessArithmeticAssign(&escape, cfg->GetId(), left, right); } } } for (size_t ind = 0; ind < summary_list.Size(); ind++) { BlockSummary *sum = summary_list[ind]; BlockMemory *mcfg = sum->GetMemory(); BlockCFG *cfg = mcfg->GetCFG(); // accumulate all the assertions at points in the CFG. Vector<AssertInfo> asserts; // add assertions at function exit for any postconditions. if (cfg->GetId()->Kind() == B_Function) { for (size_t aind = 0; annot_list && aind < annot_list->Size(); aind++) { BlockCFG *annot_cfg = annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Postcondition) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = cfg->GetExitPoint(); info.bit = bit; asserts.PushBack(info); } } } // add assertions for any point annotations within the CFG. for (size_t pind = 0; pind < cfg->GetPointAnnotationCount(); pind++) { PointAnnotation pann = cfg->GetPointAnnotation(pind); BlockCFG *annot_cfg = GetAnnotationCFG(pann.annot); if (!annot_cfg) continue; if (annot_cfg->GetAnnotationKind() != AK_Assert) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = pann.point; info.bit = bit; asserts.PushBack(info); } } for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); PPoint point = edge->GetSource(); if (PEdgeAnnotation *nedge = edge->IfAnnotation()) { // add an assertion for this annotation if it not an assume. BlockCFG *annot_cfg = GetAnnotationCFG(nedge->GetAnnotationId()); if (!annot_cfg) continue; if (annot_cfg->GetAnnotationKind() != AK_Assert && annot_cfg->GetAnnotationKind() != AK_AssertRuntime) { continue; } if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = (annot_cfg->GetAnnotationKind() == AK_Assert) ? ASK_Annotation : ASK_AnnotationRuntime; info.cls = ASC_Check; info.point = point; info.bit = bit; asserts.PushBack(info); } } // add assertions for any invariants affected by a write. Exp *left = NULL; if (PEdgeAssign *nedge = edge->IfAssign()) left = nedge->GetLeftSide(); if (PEdgeCall *nedge = edge->IfCall()) left = nedge->GetReturnValue(); // for now our detection of affected invariants is pretty crude; // writes to fields can affect type invariants on the field's type // which use that field, and writes to global variables can affect // invariants on that global. TODO: pin this down once we draw a // precise line between which invariants can and can't be checked. if (left && left->IsFld()) { ExpFld *nleft = left->AsFld(); String *csu_name = nleft->GetField()->GetCSUType()->GetCSUName(); Vector<BlockCFG*> *comp_annot_list = CompAnnotCache.Lookup(csu_name); for (size_t aind = 0; comp_annot_list && aind < comp_annot_list->Size(); aind++) { BlockCFG *annot_cfg = comp_annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Invariant) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; Vector<Exp*> lval_list; LvalListVisitor visitor(&lval_list); bit->DoVisit(&visitor); bool uses_field = false; for (size_t ind = 0; ind < lval_list.Size(); ind++) { if (ExpFld *lval = lval_list[ind]->IfFld()) { if (lval->GetField() == nleft->GetField()) uses_field = true; } } if (uses_field) { // this is a type invariant which uses the field being written // as an lvalue. we need to assert this write preserves // the invariant. BlockId *id = annot_cfg->GetId(); Variable *this_var = Variable::Make(id, VK_This, NULL, 0, NULL); Exp *this_exp = Exp::MakeVar(this_var); Exp *this_drf = Exp::MakeDrf(this_exp); Bit *new_bit = BitReplaceExp(bit, this_drf, nleft->GetTarget()); AssertInfo info; info.kind = ASK_Invariant; info.cls = ASC_Check; info.point = point; info.bit = new_bit; asserts.PushBack(info); } } CompAnnotCache.Release(csu_name); } if (left && left->IsVar()) { Variable *var = left->AsVar()->GetVariable(); if (var->Kind() == VK_Glob) { Vector<BlockCFG*> *glob_annot_list = InitAnnotCache.Lookup(var->GetName()); for (size_t aind = 0; glob_annot_list && aind < glob_annot_list->Size(); aind++) { BlockCFG *annot_cfg = glob_annot_list->At(aind); Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; AssertInfo info; info.kind = ASK_Invariant; info.cls = ASC_Check; info.point = point; info.bit = bit; asserts.PushBack(info); } InitAnnotCache.Release(var->GetName()); } } if (PEdgeCall *nedge = edge->IfCall()) { // add assertions for any callee preconditions. // pull preconditions from both direct and indirect calls. Vector<Variable*> callee_names; if (Variable *callee = nedge->GetDirectFunction()) { callee_names.PushBack(callee); } else { CallEdgeSet *callees = CalleeCache.Lookup(function); if (callees) { for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &edge = callees->GetEdge(cind); if (edge.where.id == cfg->GetId() && edge.where.point == point) callee_names.PushBack(edge.callee); } } // CalleeCache release is below. } for (size_t cind = 0; cind < callee_names.Size(); cind++) { String *callee = callee_names[cind]->GetName(); Vector<BlockCFG*> *call_annot_list = BodyAnnotCache.Lookup(callee); for (size_t aind = 0; call_annot_list && aind < call_annot_list->Size(); aind++) { BlockCFG *annot_cfg = call_annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Precondition) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { ConvertCallsiteMapper mapper(cfg, point, false); Bit *caller_bit = bit->DoMap(&mapper); if (!caller_bit) continue; AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = point; info.bit = caller_bit; asserts.PushBack(info); } } BodyAnnotCache.Release(callee); } if (!nedge->GetDirectFunction()) CalleeCache.Release(function); } BufferScanVisitor write_visitor(asserts, arithmetic_list, point, true); BufferScanVisitor read_visitor(asserts, arithmetic_list, point, false); IntegerScanVisitor integer_visitor(asserts, point); GCScanVisitor gcsafe_visitor(asserts, point); // only look at the written lvalues for the write visitor. if (PEdgeAssign *assign = edge->IfAssign()) write_visitor.Visit(assign->GetLeftSide()); if (PEdgeCall *call = edge->IfCall()) { if (Exp *returned = call->GetReturnValue()) write_visitor.Visit(returned); } edge->DoVisit(&read_visitor); // disable integer overflow visitor for now. // edge->DoVisit(&integer_visitor); edge->DoVisit(&gcsafe_visitor); } if (cfg->GetId()->Kind() == B_Function) { BlockModset *modset = GetBlockModset(cfg->GetId()); if (modset->CanGC()) { AssertInfo info; info.kind = ASK_CanGC; info.cls = ASC_Check; info.point = cfg->GetExitPoint(); String *name = cfg->GetId()->BaseVar()->GetName(); Variable *var = Variable::Make(NULL, VK_Glob, name, 0, name); Exp *varexp = Exp::MakeVar(var); Exp *gcsafe = Exp::MakeGCSafe(varexp, false); info.bit = Bit::MakeVar(gcsafe); asserts.PushBack(info); } } MarkRedundantAssertions(mcfg, asserts); // move the finished assertion list into the summary. for (size_t ind = 0; ind < asserts.Size(); ind++) { const AssertInfo &info = asserts[ind]; sum->AddAssert(info.kind, info.cls, info.point, info.bit); } } // infer delta and termination invariants for all summaries. for (size_t ind = 0; ind < summary_list.Size(); ind++) InferInvariants(summary_list[ind], arithmetic_list); BodyAnnotCache.Release(function->GetName()); }
void GetMatchingHeapWrites(const EscapeAccess &heap_write, Vector<HeapWriteInfo> *writes) { BlockId *id = heap_write.where.id; BlockMemory *mcfg = GetBlockMemory(id); if (mcfg == NULL) { logout << "WARNING: Missing memory: '" << id << "'" << endl; return; } BlockCFG *cfg = mcfg->GetCFG(); // for incremental analysis, make sure the write CFG uses the right version. // as for checking callers, if the CFG has changed but the new one still // has a matching write, we will see an escape access for the new CFG. if (cfg->GetVersion() != heap_write.where.version) { if (checker_verbose.IsSpecified()) logout << "CHECK: Write is an older version: " << heap_write.where.id << ": " << heap_write.where.version << endl; mcfg->DecRef(); return; } PPoint point = heap_write.where.point; PPoint exit_point = mcfg->GetCFG()->GetExitPoint(); // find a point-relative lvalue written at the write point with // the sanitized representation from the heap_write trace. // TODO: we only match against direct assignments in the CFG for now, // ignoring structural copies (which are simple recursive writes). PEdge *edge = cfg->GetSingleOutgoingEdge(point); Exp *point_lval = NULL; if (PEdgeAssign *nedge = edge->IfAssign()) point_lval = nedge->GetLeftSide(); else if (PEdgeCall *nedge = edge->IfCall()) point_lval = nedge->GetReturnValue(); bool lval_matches = false; if (point_lval) { if (Exp *new_point_lval = Trace::SanitizeExp(point_lval)) { lval_matches = (new_point_lval == heap_write.target->GetValue()); new_point_lval->DecRef(); } } if (!lval_matches) { mcfg->DecRef(); return; } // it would be nice to remove Val() expressions from this list, but we can't // do that as lvalues in memory assignments can contain Val and we want to // see the effects of those assignments. TODO: fix. GuardExpVector lval_res; mcfg->TranslateExp(TRK_Point, point, point_lval, &lval_res); for (size_t ind = 0; ind < lval_res.Size(); ind++) { const GuardExp &lv = lval_res[ind]; HeapWriteInfo info; info.mcfg = mcfg; info.lval = lv.exp; info.base_lval = point_lval; // look for a condition where the lvalue is not written. GuardExpVector exit_vals; info.mcfg->GetValComplete(info.lval, NULL, exit_point, &exit_vals); for (size_t ind = 0; ind < exit_vals.Size(); ind++) { const GuardExp &val = exit_vals[ind]; // exclude cases where the lvalue refers to its value at block entry. if (ExpDrf *nval = val.exp->IfDrf()) { if (nval->GetTarget() == info.lval) info.exclude.PushBack(val.guard); } } if (!writes->Contains(info)) { info.mcfg->IncRef(writes); info.lval->IncRef(writes); info.base_lval->IncRef(writes); IncRefVector<Bit>(info.exclude, writes); writes->PushBack(info); } } mcfg->DecRef(); }
Where* CheckerPropagate::TryPropagate(Bit *bit, Exp *lval) { BlockMemory *mcfg = m_frame->Memory(); TypeCSU *csu = NULL; Exp *csu_lval = NULL; if (UseHeapExp(lval, &csu, &csu_lval)) { // do the heap propagation unless we are trying to push heap data // up into the caller. if (!m_prefer_precondition || !UseCallerExp(lval, m_frame->Kind() == B_Function) || (m_frame->Kind() == B_Loop && !mcfg->IsExpPreserved(lval))) { Where *res = WhereInvariant::Make(csu, csu_lval, bit); if (res) return res; // fall through, we might still be able to treat this as a precondition. } } if (UseCallerExp(lval, m_frame->Kind() == B_Function)) return WherePrecondition::Make(m_frame->Memory(), bit); if (PPoint point = UseCalleeExp(lval)) { // fail propagation if this is from a later callee than the point // this propagation occurs at. this can come up when generating // sufficient conditions. if (point > m_point || (point == m_point && !m_allow_point)) return NULL; // cutoff propagation if the buffer came from a primitive memory // allocator. if we find a sufficient condition that does not // mention the allocator we could continue propagation. PEdge *edge = mcfg->GetCFG()->GetSingleOutgoingEdge(point); PEdgeCall *edge_call = edge->IfCall(); Variable *callee = edge_call ? edge_call->GetDirectFunction() : NULL; Exp *callee_base; Exp *callee_size; if (callee && GetAllocationFunction(callee, &callee_base, &callee_size)) { callee_base->DecRef(); callee_size->DecRef(); if (lval->IsBound()) lval = lval->GetLvalTarget(); // ignore this if it refers to fields or other structures // in the result of the allocation. this data is either // uninitialized or zeroed, either way we don't care. if (ExpClobber *nlval = lval->IfClobber()) { Exp *callee_lval = nlval->GetCallee(); Exp *callee_target = NULL; if (nlval->GetValueKind() == NULL) { callee_target = callee_lval; } else { // skip the first dereference. for terminators we still depend on // the initial contents of the allocated buffer. if (ExpExit *ncallee = callee_lval->IfExit()) callee_target = ncallee->GetTarget(); } if (callee_target) { while (callee_target->IsFld()) callee_target = callee_target->GetLvalTarget(); if (callee_target->IsExit()) return new WhereNone(RK_None); } } // watch for accessing indexes of a buffer returned via the allocator, // which currently aren't mapped back into the callee correctly. // TODO: fix hack. if (lval->IsDrf() && lval->GetLvalTarget()->IsIndex()) return new WhereNone(RK_None); return new WhereNone(RK_Finished); } if (callee && IsCutoffFunction(callee)) return new WhereNone(RK_Finished); return WherePostcondition::Make(m_frame, point, bit); } return NULL; }