void WherePostcondition::PrintHook(OutStream &out) const { BlockId *id = m_frame->CFG()->GetId(); Variable *func_var = id->BaseVar(); PEdge *edge = m_frame->CFG()->GetSingleOutgoingEdge(m_point); if (edge->IsLoop()) { PEdgeLoop *nedge = edge->AsLoop(); out << nedge->GetLoopId()->Loop()->Value() << " " << func_var->GetName()->Value(); } else { PEdgeCall *nedge = edge->AsCall(); if (Variable *callee = nedge->GetDirectFunction()) { // direct call, just one hook function. out << "post " << callee->GetName()->Value(); } else { // indirect call, one hook function for each callee. CallEdgeSet *callees = CalleeCache.Lookup(func_var); bool found_callee = false; if (callees) { for (size_t eind = 0; eind < callees->GetEdgeCount(); eind++) { const CallEdge &edge = callees->GetEdge(eind); if (edge.where.id == id && edge.where.point == m_point) { if (found_callee) out << "$"; // add the separator found_callee = true; out << "post " << edge.callee->GetName()->Value(); } } } CalleeCache.Release(func_var); } } }
bool CheckFrame(CheckerState *state, CheckerFrame *frame, CheckerPropagate *propagate) { Assert(!state->GetReportKind()); BlockMemory *mcfg = frame->Memory(); BlockCFG *cfg = mcfg->GetCFG(); BlockId *id = cfg->GetId(); if (checker_verbose.IsSpecified()) { logout << "CHECK: " << frame << ": Entering " << id << endl; if (propagate) propagate->Print(); } Where *where = propagate ? propagate->m_where : NULL; // check if we should terminate the search at this point (with or without // generating a report). if (where && where->IsNone()) { WhereNone *nwhere = where->AsNone(); ReportKind kind = nwhere->GetReportKind(); if (kind == RK_None) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Ignoring" << endl; return false; } else { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Propagation failed" << endl; state->SetReport(kind); return true; } } // check for other propagations on the stack with frames for the same block, // and block the recursion if we exceed the checker's depth. we assume that // if we're ever going to terminate in the presence of recursion, we will // do so quickly. if (propagate) { if (uint32_t depth = checker_depth.UIntValue()) { Vector<CheckerFrame*> recurse_frames; for (size_t ind = 0; ind < state->m_stack.Size(); ind++) { CheckerFrame *other_frame = state->m_stack[ind]->m_frame; if (other_frame != frame && other_frame->Memory() == mcfg && !recurse_frames.Contains(other_frame)) recurse_frames.PushBack(other_frame); } if (recurse_frames.Size() >= depth) { state->SetReport(RK_Recursion); return true; } } } // check if we are propagating into some callee. if (where && where->IsPostcondition()) { WherePostcondition *nwhere = where->AsPostcondition(); // expand the callee at the specified point. PPoint point = nwhere->GetPoint(); PEdge *edge = cfg->GetSingleOutgoingEdge(point); if (edge->IsLoop()) { // expanding data from a loop. first try the case that the loop // does not execute at all. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Trying to skip loop at " << point << endl; state->PushContext(); if (CheckSkipLoop(state, frame, point, nwhere)) return true; state->PopContext(); } if (BlockId *callee = edge->GetDirectCallee()) { // easy case, there is only a single callee. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Expanding single callee at " << point << ": " << callee << endl; state->PushContext(); if (CheckSingleCallee(state, frame, point, nwhere, callee, true)) return true; state->PopContext(); } else { // iterate through all the possible callees Variable *function = id->BaseVar(); CallEdgeSet *callees = CalleeCache.Lookup(function); Vector<Variable*> callee_vars; if (callees) { for (size_t eind = 0; eind < callees->GetEdgeCount(); eind++) { const CallEdge &edge = callees->GetEdge(eind); if (edge.where.id == id && edge.where.point == point) callee_vars.PushBack(edge.callee); } } SortVector<Variable*,Variable>(&callee_vars); for (size_t cind = 0; cind < callee_vars.Size(); cind++) { Variable *callee = callee_vars[cind]; if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Expanding indirect callee at " << point << ": " << callee << endl; callee->IncRef(); BlockId *callee_id = BlockId::Make(B_Function, callee); state->PushContext(); if (CheckSingleCallee(state, frame, point, nwhere, callee_id, false)) { CalleeCache.Release(function); return true; } state->PopContext(); } if (callee_vars.Empty()) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": No callees to expand at " << point << endl; } CalleeCache.Release(function); } return false; } // any precondition we have to propagate up to the callers. WherePrecondition *precondition = NULL; if (where) precondition = where->IfPrecondition(); // whether we will be reconnecting to the caller without any // propagation information. bool reconnect_caller = false; if (precondition) { Bit *bit = precondition->GetBit(); WherePrecondition *dupe_precondition = new WherePrecondition(mcfg, bit); state->m_precondition_list.PushBack(dupe_precondition); } else { // we will propagate to the caller regardless if there is already a caller // hooked up or if we are inside a loop body. if (frame->GetCaller().id != NULL) reconnect_caller = true; if (frame->Kind() == B_Loop) reconnect_caller = true; } if (propagate && reconnect_caller) { // check to see if we are delaying any heap propagation. if (where->IsInvariant()) { Assert(state->m_delayed_propagate_heap == NULL); state->m_delayed_propagate_heap = propagate; } } else if (!precondition && !reconnect_caller) { // check to see if we are performing heap propagation. if (state->m_delayed_propagate_heap) { Assert(propagate == NULL); CheckerPropagate *heap_propagate = state->m_delayed_propagate_heap; state->m_delayed_propagate_heap = NULL; WhereInvariant *invariant = heap_propagate->m_where->AsInvariant(); if (CheckHeapWrites(state, frame, heap_propagate->m_frame, invariant)) return true; state->m_delayed_propagate_heap = heap_propagate; return false; } else if (where && where->IsInvariant()) { return CheckHeapWrites(state, frame, frame, where->AsInvariant()); } Assert(propagate); // don't need to expand the callers or anything else. // we can finally terminate propagation with an error report. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Nothing to expand, finishing" << endl; state->SetReport(RK_Finished); return true; } if (frame->GetCaller().id != NULL) { // just propagate to the existing caller. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Returning to caller" << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, frame->GetCaller())) return true; state->PopContext(); } else if (id->Kind() == B_Function) { // propagate to all callers to the function. Variable *function = id->BaseVar(); CallEdgeSet *callers = CallerCache.Lookup(function); Vector<BlockPPoint> caller_points; for (size_t eind = 0; callers && eind < callers->GetEdgeCount(); eind++) { const CallEdge &edge = callers->GetEdge(eind); Assert(edge.callee == function); caller_points.PushBack(edge.where); } SortVector<BlockPPoint,BlockPPoint>(&caller_points); for (size_t cind = 0; cind < caller_points.Size(); cind++) { BlockPPoint caller = caller_points[cind]; if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Checking caller: " << caller << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, caller)) { CallerCache.Release(function); return true; } state->PopContext(); } if (caller_points.Empty()) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": No callers to expand" << endl; } CallerCache.Release(function); } else if (id->Kind() == B_Loop) { // check all possible callers of the loop. unroll an iteration before // checking the parents so that if we can't figure out a sufficient // condition for the loop we will stop exploration quickly. // unroll another iteration of the loop. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Unrolling loop iteration" << endl; state->PushContext(); BlockPPoint recursive_caller(id, cfg->GetExitPoint()); if (CheckSingleCaller(state, frame, precondition, recursive_caller)) return true; state->PopContext(); // check the parents which can initially invoke this loop. if (frame->GetLoopParent().id != NULL) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Checking existing loop parent: " << frame->GetLoopParent() << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, frame->GetLoopParent())) return true; state->PopContext(); } else { for (size_t pind = 0; pind < cfg->GetLoopParentCount(); pind++) { BlockPPoint where = cfg->GetLoopParent(pind); if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Checking loop parent: " << where << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, where)) return true; state->PopContext(); } } } else if (id->Kind() == B_Initializer) { // initializers don't have callers, can just ignore this. // TODO: should address why this code is being reached in the first place. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Initializer has no callers" << endl; return false; } else { // unknown type of block. Assert(false); } // if we set the state's delayed heap propagation then unset it. if (propagate && state->m_delayed_propagate_heap == propagate) state->m_delayed_propagate_heap = NULL; return false; }
void BlockSummary::GetAssumedBits(BlockMemory *mcfg, PPoint end_point, Vector<AssumeInfo> *assume_list) { BlockId *id = mcfg->GetId(); BlockCFG *cfg = mcfg->GetCFG(); BlockSummary *sum = GetBlockSummary(id); const Vector<Bit*> *assumes = sum->GetAssumes(); size_t assume_count = VectorSize<Bit*>(assumes); // pull in assumptions from the summary for mcfg. in some cases these // assumptions won't be useful, e.g. describing the state at exit // for functions. for now we're just adding all of them though. TODO: fix. for (size_t ind = 0; ind < assume_count; ind++) { Bit *bit = assumes->At(ind); bit->IncRef(assume_list); AssumeInfo info; info.bit = bit; assume_list->PushBack(info); } sum->DecRef(); Vector<BlockCFG*> *annot_list = BodyAnnotCache.Lookup(id->Function()); // add assumes at function entry for any preconditions. if (id->Kind() == B_Function) { for (size_t ind = 0; annot_list && ind < annot_list->Size(); ind++) { BlockCFG *annot_cfg = annot_list->At(ind); if (annot_cfg->GetAnnotationKind() != AK_Precondition && annot_cfg->GetAnnotationKind() != AK_PreconditionAssume) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; annot_cfg->IncRef(assume_list); bit->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.bit = bit; assume_list->PushBack(info); } } // add assumptions from points within the block. for (size_t pind = 0; pind < cfg->GetPointAnnotationCount(); pind++) { PointAnnotation pann = cfg->GetPointAnnotation(pind); if (end_point && pann.point >= end_point) continue; BlockCFG *annot_cfg = GetAnnotationCFG(pann.annot); if (!annot_cfg) continue; Assert(annot_cfg->GetAnnotationKind() != AK_AssertRuntime); if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { // get the annotation bit in terms of block entry. Bit *point_bit = NULL; mcfg->TranslateBit(TRK_Point, pann.point, bit, &point_bit); point_bit->MoveRef(&point_bit, assume_list); annot_cfg->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.point = pann.point; info.bit = point_bit; assume_list->PushBack(info); } annot_cfg->DecRef(); } // add assumptions from annotation edges within the block, invariants // on values accessed by the block, and from the summaries of any callees. for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); PPoint point = edge->GetSource(); if (end_point && point >= end_point) continue; InvariantAssumeVisitor visitor(mcfg, point, assume_list); edge->DoVisit(&visitor); if (PEdgeAnnotation *nedge = edge->IfAnnotation()) { // add an assumption for this annotation. BlockCFG *annot_cfg = GetAnnotationCFG(nedge->GetAnnotationId()); if (!annot_cfg) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); // don't incorporate AssertRuntimes, these are not assumed. if (bit && annot_cfg->GetAnnotationKind() != AK_AssertRuntime) { // get the annotation bit in terms of block entry. Bit *point_bit = NULL; mcfg->TranslateBit(TRK_Point, point, bit, &point_bit); point_bit->MoveRef(&point_bit, assume_list); annot_cfg->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.point = point; info.bit = point_bit; assume_list->PushBack(info); } annot_cfg->DecRef(); } if (BlockId *callee = edge->GetDirectCallee()) { GetCallAssumedBits(mcfg, edge, callee, false, assume_list); callee->DecRef(); } else if (edge->IsCall()) { // add conditional assumes for the indirect targets of the call. // this is most useful for baked information and annotations, where // we sometimes need to attach information at indirect calls. CallEdgeSet *callees = CalleeCache.Lookup(id->BaseVar()); size_t old_count = assume_list->Size(); if (callees) { for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &cedge = callees->GetEdge(cind); if (cedge.where.id == id && cedge.where.point == point) { cedge.callee->IncRef(); BlockId *callee = BlockId::Make(B_Function, cedge.callee); GetCallAssumedBits(mcfg, edge, callee, true, assume_list); callee->DecRef(); } } } if (assume_list->Size() != old_count) { // we managed to do something at this indirect call site. // add another assumption restricting the possible callees to // only those identified by our callgraph. GuardExpVector receiver_list; mcfg->TranslateReceiver(point, &receiver_list); for (size_t rind = 0; rind < receiver_list.Size(); rind++) { const GuardExp &gs = receiver_list[rind]; gs.guard->IncRef(); // make a bit: !when || rcv == callee0 || rcv == callee1 || ... Bit *extra_bit = Bit::MakeNot(gs.guard); for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &cedge = callees->GetEdge(cind); if (cedge.where.id == id && cedge.where.point == point) { Variable *callee_var = cedge.callee; callee_var->IncRef(); Exp *callee_exp = Exp::MakeVar(callee_var); gs.exp->IncRef(); Bit *equal = Exp::MakeCompareBit(B_Equal, callee_exp, gs.exp); extra_bit = Bit::MakeOr(extra_bit, equal); } } extra_bit->MoveRef(NULL, assume_list); AssumeInfo info; info.bit = extra_bit; assume_list->PushBack(info); } } CalleeCache.Release(id->BaseVar()); } } BodyAnnotCache.Release(id->Function()); // add assumptions from heap invariants describing values mentioned // in added assumptions. we could keep doing this transitively but don't, // to ensure termination. size_t count = assume_list->Size(); for (size_t ind = 0; ind < count; ind++) { InvariantAssumeVisitor visitor(NULL, 0, assume_list); assume_list->At(ind).bit->DoVisit(&visitor); } CombineAssumeList(assume_list); }
void InferSummaries(const Vector<BlockSummary*> &summary_list) { static BaseTimer infer_timer("infer_summaries"); Timer _timer(&infer_timer); if (summary_list.Empty()) return; Variable *function = summary_list[0]->GetId()->BaseVar(); Vector<BlockCFG*> *annot_list = BodyAnnotCache.Lookup(function->GetName()); // all traces which might refer to the result of pointer arithmetic. Vector<Exp*> arithmetic_list; ArithmeticEscape escape(function, arithmetic_list); // initial pass over the CFGs to get traces used in pointer arithmetic. for (size_t ind = 0; ind < summary_list.Size(); ind++) { BlockSummary *sum = summary_list[ind]; BlockCFG *cfg = sum->GetMemory()->GetCFG(); for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); if (PEdgeAssign *assign_edge = edge->IfAssign()) { Exp *left = assign_edge->GetLeftSide(); Exp *right = assign_edge->GetRightSide(); ProcessArithmeticAssign(&escape, cfg->GetId(), left, right); } } } for (size_t ind = 0; ind < summary_list.Size(); ind++) { BlockSummary *sum = summary_list[ind]; BlockMemory *mcfg = sum->GetMemory(); BlockCFG *cfg = mcfg->GetCFG(); // accumulate all the assertions at points in the CFG. Vector<AssertInfo> asserts; // add assertions at function exit for any postconditions. if (cfg->GetId()->Kind() == B_Function) { for (size_t aind = 0; annot_list && aind < annot_list->Size(); aind++) { BlockCFG *annot_cfg = annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Postcondition) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = cfg->GetExitPoint(); info.bit = bit; asserts.PushBack(info); } } } // add assertions for any point annotations within the CFG. for (size_t pind = 0; pind < cfg->GetPointAnnotationCount(); pind++) { PointAnnotation pann = cfg->GetPointAnnotation(pind); BlockCFG *annot_cfg = GetAnnotationCFG(pann.annot); if (!annot_cfg) continue; if (annot_cfg->GetAnnotationKind() != AK_Assert) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = pann.point; info.bit = bit; asserts.PushBack(info); } } for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); PPoint point = edge->GetSource(); if (PEdgeAnnotation *nedge = edge->IfAnnotation()) { // add an assertion for this annotation if it not an assume. BlockCFG *annot_cfg = GetAnnotationCFG(nedge->GetAnnotationId()); if (!annot_cfg) continue; if (annot_cfg->GetAnnotationKind() != AK_Assert && annot_cfg->GetAnnotationKind() != AK_AssertRuntime) { continue; } if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = (annot_cfg->GetAnnotationKind() == AK_Assert) ? ASK_Annotation : ASK_AnnotationRuntime; info.cls = ASC_Check; info.point = point; info.bit = bit; asserts.PushBack(info); } } // add assertions for any invariants affected by a write. Exp *left = NULL; if (PEdgeAssign *nedge = edge->IfAssign()) left = nedge->GetLeftSide(); if (PEdgeCall *nedge = edge->IfCall()) left = nedge->GetReturnValue(); // for now our detection of affected invariants is pretty crude; // writes to fields can affect type invariants on the field's type // which use that field, and writes to global variables can affect // invariants on that global. TODO: pin this down once we draw a // precise line between which invariants can and can't be checked. if (left && left->IsFld()) { ExpFld *nleft = left->AsFld(); String *csu_name = nleft->GetField()->GetCSUType()->GetCSUName(); Vector<BlockCFG*> *comp_annot_list = CompAnnotCache.Lookup(csu_name); for (size_t aind = 0; comp_annot_list && aind < comp_annot_list->Size(); aind++) { BlockCFG *annot_cfg = comp_annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Invariant) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; Vector<Exp*> lval_list; LvalListVisitor visitor(&lval_list); bit->DoVisit(&visitor); bool uses_field = false; for (size_t ind = 0; ind < lval_list.Size(); ind++) { if (ExpFld *lval = lval_list[ind]->IfFld()) { if (lval->GetField() == nleft->GetField()) uses_field = true; } } if (uses_field) { // this is a type invariant which uses the field being written // as an lvalue. we need to assert this write preserves // the invariant. BlockId *id = annot_cfg->GetId(); Variable *this_var = Variable::Make(id, VK_This, NULL, 0, NULL); Exp *this_exp = Exp::MakeVar(this_var); Exp *this_drf = Exp::MakeDrf(this_exp); Bit *new_bit = BitReplaceExp(bit, this_drf, nleft->GetTarget()); AssertInfo info; info.kind = ASK_Invariant; info.cls = ASC_Check; info.point = point; info.bit = new_bit; asserts.PushBack(info); } } CompAnnotCache.Release(csu_name); } if (left && left->IsVar()) { Variable *var = left->AsVar()->GetVariable(); if (var->Kind() == VK_Glob) { Vector<BlockCFG*> *glob_annot_list = InitAnnotCache.Lookup(var->GetName()); for (size_t aind = 0; glob_annot_list && aind < glob_annot_list->Size(); aind++) { BlockCFG *annot_cfg = glob_annot_list->At(aind); Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; AssertInfo info; info.kind = ASK_Invariant; info.cls = ASC_Check; info.point = point; info.bit = bit; asserts.PushBack(info); } InitAnnotCache.Release(var->GetName()); } } if (PEdgeCall *nedge = edge->IfCall()) { // add assertions for any callee preconditions. // pull preconditions from both direct and indirect calls. Vector<Variable*> callee_names; if (Variable *callee = nedge->GetDirectFunction()) { callee_names.PushBack(callee); } else { CallEdgeSet *callees = CalleeCache.Lookup(function); if (callees) { for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &edge = callees->GetEdge(cind); if (edge.where.id == cfg->GetId() && edge.where.point == point) callee_names.PushBack(edge.callee); } } // CalleeCache release is below. } for (size_t cind = 0; cind < callee_names.Size(); cind++) { String *callee = callee_names[cind]->GetName(); Vector<BlockCFG*> *call_annot_list = BodyAnnotCache.Lookup(callee); for (size_t aind = 0; call_annot_list && aind < call_annot_list->Size(); aind++) { BlockCFG *annot_cfg = call_annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Precondition) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { ConvertCallsiteMapper mapper(cfg, point, false); Bit *caller_bit = bit->DoMap(&mapper); if (!caller_bit) continue; AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = point; info.bit = caller_bit; asserts.PushBack(info); } } BodyAnnotCache.Release(callee); } if (!nedge->GetDirectFunction()) CalleeCache.Release(function); } BufferScanVisitor write_visitor(asserts, arithmetic_list, point, true); BufferScanVisitor read_visitor(asserts, arithmetic_list, point, false); IntegerScanVisitor integer_visitor(asserts, point); GCScanVisitor gcsafe_visitor(asserts, point); // only look at the written lvalues for the write visitor. if (PEdgeAssign *assign = edge->IfAssign()) write_visitor.Visit(assign->GetLeftSide()); if (PEdgeCall *call = edge->IfCall()) { if (Exp *returned = call->GetReturnValue()) write_visitor.Visit(returned); } edge->DoVisit(&read_visitor); // disable integer overflow visitor for now. // edge->DoVisit(&integer_visitor); edge->DoVisit(&gcsafe_visitor); } if (cfg->GetId()->Kind() == B_Function) { BlockModset *modset = GetBlockModset(cfg->GetId()); if (modset->CanGC()) { AssertInfo info; info.kind = ASK_CanGC; info.cls = ASC_Check; info.point = cfg->GetExitPoint(); String *name = cfg->GetId()->BaseVar()->GetName(); Variable *var = Variable::Make(NULL, VK_Glob, name, 0, name); Exp *varexp = Exp::MakeVar(var); Exp *gcsafe = Exp::MakeGCSafe(varexp, false); info.bit = Bit::MakeVar(gcsafe); asserts.PushBack(info); } } MarkRedundantAssertions(mcfg, asserts); // move the finished assertion list into the summary. for (size_t ind = 0; ind < asserts.Size(); ind++) { const AssertInfo &info = asserts[ind]; sum->AddAssert(info.kind, info.cls, info.point, info.bit); } } // infer delta and termination invariants for all summaries. for (size_t ind = 0; ind < summary_list.Size(); ind++) InferInvariants(summary_list[ind], arithmetic_list); BodyAnnotCache.Release(function->GetName()); }