static void PopulateNodeDef(const std::wstring& scope, const Variable& src, tensorflow::NodeDef& dst) { // Constant nodes in TensorBoard have special meaning, so need to set the expected name. std::wstring opName = (src.Kind() == VariableKind::Constant) ? L"Const" : VariableKindName(src.Kind()); PopulateNodeDef(GetScopedName(scope, &src), opName, src.GetDataType(), { src }, dst); // TODO: set attrs["value"] for Constant - how to get the value? }
void BlockModset::ProcessUpdatedLval(BlockMemory *mcfg, Exp *lval, Exp *kind, bool consider_assign, bool from_call) { if (!m_modset_list) m_modset_list = new Vector<PointValue>(); if (!m_assign_list) m_assign_list = new Vector<GuardAssign>(); // use the ID from the memory rather than the ID from this modset, // as this modset has a temporary ID. BlockId *use_id = mcfg->GetId(); // hold a reference on the lvalue, drop it at exit. lval->IncRef(); goto entry; // exit label up here to avoid goofy gcc 'crosses initialization' errors. exit: lval->DecRef(); return; entry: ModsetIncludeVisitor visitor(use_id->Kind(), from_call); // use the base buffer if we are updating a terminator. if (kind) { if (ExpTerminate *nkind = kind->IfTerminate()) { // ignore field terminator modsets, these are pretty much useless. if (nkind->GetTerminateTest()->IsFld()) goto exit; Exp *new_lval = mcfg->GetBaseBuffer(lval, nkind->GetStrideType()); lval->DecRef(); lval = new_lval; visitor.buffer = true; } } visitor.SetFoundLval(true); lval->DoVisit(&visitor); visitor.SetFoundLval(false); visitor.rvalue = true; if (visitor.excluded) goto exit; Variable *root = lval->Root(); if (!root) goto exit; // argument lvals with zero dereferences are additionally excluded. // these updates are local to the current function. also look for updates // to 'this' which can come from frontend parse/tcheck errors. if (use_id->Kind() == B_Function && (root->Kind() == VK_Arg || root->Kind() == VK_This)) { if (lval->DrfCount() == 0) goto exit; } // add to the modset if this is not the function's return value. we don't // need to explicitly add the return value as it is special cased by // BlockMemory and is always treated as modified. if (root->Kind() != VK_Return) { lval->IncRef(); if (kind) kind->IncRef(); AddModset(lval, kind); } if (!consider_assign) goto exit; // should only be generating direct assignments for Drf() updates. Assert(kind == NULL); // don't generate assignments for loop iterations. if (use_id->Kind() != B_Function) goto exit; // don't generate assignments for global variables. if (root->IsGlobal()) goto exit; // see if we already have assignments for this lval. for (size_t ind = 0; ind < GetAssignCount(); ind++) { if (m_assign_list->At(ind).left == lval) goto exit; } PPoint exit_point = mcfg->GetCFG()->GetExitPoint(); if (!exit_point) goto exit; // temporary vector to hold assignments. if we find a problem with // the assigns (bad lvalue, etc.) we will bail out and clear this list. Vector<GuardAssign> assigns; GuardExpVector exit_values; mcfg->GetValComplete(lval, NULL, exit_point, &exit_values, true); // cases we will currently generate assignments for. in all cases // the rvalue and guard must be functionally determined from the arguments. // 1. one or two possible values for the lval. // 2. all values for the lval are constants. for (size_t ind = 0; ind < exit_values.Size(); ind++) { const GuardExp &val = exit_values[ind]; if (val.guard->Size() >= ASSIGN_BIT_CUTOFF) goto exit; if (exit_values.Size() <= 2) { if (val.exp->TermCountExceeds(ASSIGN_EXP_CUTOFF)) goto exit; } else { if (!val.exp->IsInt()) goto exit; } } for (size_t ind = 0; ind < exit_values.Size(); ind++) { const GuardExp &val = exit_values[ind]; val.exp->DoVisit(&visitor); val.guard->DoVisit(&visitor); lval->IncRef(); val.IncRef(); assigns.PushBack(GuardAssign(lval, val.exp, val.guard)); } if (visitor.excluded) { for (size_t ind = 0; ind < assigns.Size(); ind++) { const GuardAssign &gasn = assigns[ind]; gasn.left->DecRef(); gasn.right->DecRef(); gasn.guard->DecRef(); } } else { for (size_t ind = 0; ind < assigns.Size(); ind++) { const GuardAssign &gasn = assigns[ind]; AddAssign(gasn.left, gasn.right, gasn.guard); } } goto exit; }
void InferSummaries(const Vector<BlockSummary*> &summary_list) { static BaseTimer infer_timer("infer_summaries"); Timer _timer(&infer_timer); if (summary_list.Empty()) return; Variable *function = summary_list[0]->GetId()->BaseVar(); Vector<BlockCFG*> *annot_list = BodyAnnotCache.Lookup(function->GetName()); // all traces which might refer to the result of pointer arithmetic. Vector<Exp*> arithmetic_list; ArithmeticEscape escape(function, arithmetic_list); // initial pass over the CFGs to get traces used in pointer arithmetic. for (size_t ind = 0; ind < summary_list.Size(); ind++) { BlockSummary *sum = summary_list[ind]; BlockCFG *cfg = sum->GetMemory()->GetCFG(); for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); if (PEdgeAssign *assign_edge = edge->IfAssign()) { Exp *left = assign_edge->GetLeftSide(); Exp *right = assign_edge->GetRightSide(); ProcessArithmeticAssign(&escape, cfg->GetId(), left, right); } } } for (size_t ind = 0; ind < summary_list.Size(); ind++) { BlockSummary *sum = summary_list[ind]; BlockMemory *mcfg = sum->GetMemory(); BlockCFG *cfg = mcfg->GetCFG(); // accumulate all the assertions at points in the CFG. Vector<AssertInfo> asserts; // add assertions at function exit for any postconditions. if (cfg->GetId()->Kind() == B_Function) { for (size_t aind = 0; annot_list && aind < annot_list->Size(); aind++) { BlockCFG *annot_cfg = annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Postcondition) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = cfg->GetExitPoint(); info.bit = bit; asserts.PushBack(info); } } } // add assertions for any point annotations within the CFG. for (size_t pind = 0; pind < cfg->GetPointAnnotationCount(); pind++) { PointAnnotation pann = cfg->GetPointAnnotation(pind); BlockCFG *annot_cfg = GetAnnotationCFG(pann.annot); if (!annot_cfg) continue; if (annot_cfg->GetAnnotationKind() != AK_Assert) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = pann.point; info.bit = bit; asserts.PushBack(info); } } for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); PPoint point = edge->GetSource(); if (PEdgeAnnotation *nedge = edge->IfAnnotation()) { // add an assertion for this annotation if it not an assume. BlockCFG *annot_cfg = GetAnnotationCFG(nedge->GetAnnotationId()); if (!annot_cfg) continue; if (annot_cfg->GetAnnotationKind() != AK_Assert && annot_cfg->GetAnnotationKind() != AK_AssertRuntime) { continue; } if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { AssertInfo info; info.kind = (annot_cfg->GetAnnotationKind() == AK_Assert) ? ASK_Annotation : ASK_AnnotationRuntime; info.cls = ASC_Check; info.point = point; info.bit = bit; asserts.PushBack(info); } } // add assertions for any invariants affected by a write. Exp *left = NULL; if (PEdgeAssign *nedge = edge->IfAssign()) left = nedge->GetLeftSide(); if (PEdgeCall *nedge = edge->IfCall()) left = nedge->GetReturnValue(); // for now our detection of affected invariants is pretty crude; // writes to fields can affect type invariants on the field's type // which use that field, and writes to global variables can affect // invariants on that global. TODO: pin this down once we draw a // precise line between which invariants can and can't be checked. if (left && left->IsFld()) { ExpFld *nleft = left->AsFld(); String *csu_name = nleft->GetField()->GetCSUType()->GetCSUName(); Vector<BlockCFG*> *comp_annot_list = CompAnnotCache.Lookup(csu_name); for (size_t aind = 0; comp_annot_list && aind < comp_annot_list->Size(); aind++) { BlockCFG *annot_cfg = comp_annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Invariant) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; Vector<Exp*> lval_list; LvalListVisitor visitor(&lval_list); bit->DoVisit(&visitor); bool uses_field = false; for (size_t ind = 0; ind < lval_list.Size(); ind++) { if (ExpFld *lval = lval_list[ind]->IfFld()) { if (lval->GetField() == nleft->GetField()) uses_field = true; } } if (uses_field) { // this is a type invariant which uses the field being written // as an lvalue. we need to assert this write preserves // the invariant. BlockId *id = annot_cfg->GetId(); Variable *this_var = Variable::Make(id, VK_This, NULL, 0, NULL); Exp *this_exp = Exp::MakeVar(this_var); Exp *this_drf = Exp::MakeDrf(this_exp); Bit *new_bit = BitReplaceExp(bit, this_drf, nleft->GetTarget()); AssertInfo info; info.kind = ASK_Invariant; info.cls = ASC_Check; info.point = point; info.bit = new_bit; asserts.PushBack(info); } } CompAnnotCache.Release(csu_name); } if (left && left->IsVar()) { Variable *var = left->AsVar()->GetVariable(); if (var->Kind() == VK_Glob) { Vector<BlockCFG*> *glob_annot_list = InitAnnotCache.Lookup(var->GetName()); for (size_t aind = 0; glob_annot_list && aind < glob_annot_list->Size(); aind++) { BlockCFG *annot_cfg = glob_annot_list->At(aind); Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; AssertInfo info; info.kind = ASK_Invariant; info.cls = ASC_Check; info.point = point; info.bit = bit; asserts.PushBack(info); } InitAnnotCache.Release(var->GetName()); } } if (PEdgeCall *nedge = edge->IfCall()) { // add assertions for any callee preconditions. // pull preconditions from both direct and indirect calls. Vector<Variable*> callee_names; if (Variable *callee = nedge->GetDirectFunction()) { callee_names.PushBack(callee); } else { CallEdgeSet *callees = CalleeCache.Lookup(function); if (callees) { for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &edge = callees->GetEdge(cind); if (edge.where.id == cfg->GetId() && edge.where.point == point) callee_names.PushBack(edge.callee); } } // CalleeCache release is below. } for (size_t cind = 0; cind < callee_names.Size(); cind++) { String *callee = callee_names[cind]->GetName(); Vector<BlockCFG*> *call_annot_list = BodyAnnotCache.Lookup(callee); for (size_t aind = 0; call_annot_list && aind < call_annot_list->Size(); aind++) { BlockCFG *annot_cfg = call_annot_list->At(aind); if (annot_cfg->GetAnnotationKind() != AK_Precondition) continue; if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { ConvertCallsiteMapper mapper(cfg, point, false); Bit *caller_bit = bit->DoMap(&mapper); if (!caller_bit) continue; AssertInfo info; info.kind = ASK_Annotation; info.cls = ASC_Check; info.point = point; info.bit = caller_bit; asserts.PushBack(info); } } BodyAnnotCache.Release(callee); } if (!nedge->GetDirectFunction()) CalleeCache.Release(function); } BufferScanVisitor write_visitor(asserts, arithmetic_list, point, true); BufferScanVisitor read_visitor(asserts, arithmetic_list, point, false); IntegerScanVisitor integer_visitor(asserts, point); GCScanVisitor gcsafe_visitor(asserts, point); // only look at the written lvalues for the write visitor. if (PEdgeAssign *assign = edge->IfAssign()) write_visitor.Visit(assign->GetLeftSide()); if (PEdgeCall *call = edge->IfCall()) { if (Exp *returned = call->GetReturnValue()) write_visitor.Visit(returned); } edge->DoVisit(&read_visitor); // disable integer overflow visitor for now. // edge->DoVisit(&integer_visitor); edge->DoVisit(&gcsafe_visitor); } if (cfg->GetId()->Kind() == B_Function) { BlockModset *modset = GetBlockModset(cfg->GetId()); if (modset->CanGC()) { AssertInfo info; info.kind = ASK_CanGC; info.cls = ASC_Check; info.point = cfg->GetExitPoint(); String *name = cfg->GetId()->BaseVar()->GetName(); Variable *var = Variable::Make(NULL, VK_Glob, name, 0, name); Exp *varexp = Exp::MakeVar(var); Exp *gcsafe = Exp::MakeGCSafe(varexp, false); info.bit = Bit::MakeVar(gcsafe); asserts.PushBack(info); } } MarkRedundantAssertions(mcfg, asserts); // move the finished assertion list into the summary. for (size_t ind = 0; ind < asserts.Size(); ind++) { const AssertInfo &info = asserts[ind]; sum->AddAssert(info.kind, info.cls, info.point, info.bit); } } // infer delta and termination invariants for all summaries. for (size_t ind = 0; ind < summary_list.Size(); ind++) InferInvariants(summary_list[ind], arithmetic_list); BodyAnnotCache.Release(function->GetName()); }
void Visit(Exp *exp) { if (exp->IsVar()) { Variable *root = exp->AsVar()->GetVariable(); // allow global exps when the assign was not generated from a call. if (root->IsGlobal() && (!from_call || rvalue)) return; if (kind == B_Function) { // only consider exps derived from arguments, 'this' and the return // variable. note that we will special case the return var later in the // modset as it is automatically handled by BlockMemory, but we don't // exclude it here so that we can get exact side effects for it // if possible. if (root->Kind() != VK_Arg && root->Kind() != VK_This && root->Kind() != VK_Return) excluded = exp; // watch for taking the address of function arguments and leaving them // accessible in the caller. weird! if (root->Kind() == VK_Arg && !FoundLval()) excluded = exp; } else { // only consider exps derived from arguments, 'this' and locals. if (root->Kind() != VK_Arg && root->Kind() != VK_This && root->Kind() != VK_Local) excluded = exp; } return; } if (exp->IsDrf()) { if (!FoundLval()) return; if (!rvalue) { // limits on the number of dereferences in expressions. size_t max_derefs = buffer ? 2 : 1; if (exp->DrfCount() > max_derefs) excluded = exp; } return; } if (exp->IsIndex() && !rvalue) { // indexes are allowed only for assignment rvalues. excluded = exp; return; } if (exp->IsFld() && exp->FldCount() > 6) { // limit on the number of fields in expressions. this cuts off infinite // recursion during modset computation when the program does funny casts. excluded = exp; return; } if (exp->IsRfld()) { // all rfld expressions are excluded. these are usually here because // of indirect calls which could operate on a variety of structures // (this happens in both C and C++). excluded = exp; return; } if (exp->IsClobber() || exp->IsVal()) { excluded = exp; return; } }