Exemple #1
0
// read a binary tag value from the specified buffer and get whatever
// hash object it represents from the buffer, returning a reference
// on that object. we will just match against the types of values that
// can appear at the top level of a database entry.
HashObject* ReadSingleValue(Buffer *buf)
{
  switch (PeekOpenTag(buf)) {

  case TAG_BlockCFG:         return BlockCFG::Read(buf);
  case TAG_CompositeCSU:     return CompositeCSU::Read(buf);
  case TAG_EscapeEdgeSet:    return EscapeEdgeSet::Read(buf);
  case TAG_EscapeAccessSet:  return EscapeAccessSet::Read(buf);
  case TAG_CallEdgeSet:      return CallEdgeSet::Read(buf);
  case TAG_BlockModset:      return BlockModset::Read(buf);
  case TAG_BlockSummary:     return BlockSummary::Read(buf);

  // special case: get the CFG too.
  case TAG_BlockMemory: {
    BlockMemory *mcfg = BlockMemory::Read(buf);
    BlockCFG *cfg = GetBlockCFG(mcfg->GetId());
    if (cfg)
      mcfg->SetCFG(cfg);
    return mcfg;
  }

  default:
    logout << "ERROR: Unknown top-level tag in entry: "
           << PeekOpenTag(buf) << endl;
    Assert(false);
  }
}
Exemple #2
0
void WherePostcondition::GetSkipLoopBits(Bit **base_bit, GuardBitVector *res)
{
  BlockMemory *mcfg = m_frame->Memory();

  *base_bit = BitConvertExitClobber(m_bit);

  // TODO: is SkipClobber the best translation to do here?
  // there can't be any clobbers in m_bit, just exit expressions
  // which will be handled correctly by TranslateBit. needs cleanup.

  GuardBitVector base_res;
  mcfg->TranslateBit(TRK_SkipClobber, m_point, m_bit, &base_res);
  RemoveValBit(m_frame->Id(), m_frame->Memory(), base_res, res);
}
Exemple #3
0
void WhereInvariant::GetHeapBits(CheckerFrame *write_frame,
                                 Exp *write_csu, Exp *base_csu,
                                 Bit **base_bit, GuardBitVector *res)
{
  BlockMemory *mcfg = write_frame->Memory();

  Exp *old_lval = NULL;
  if (m_csu) {
    Variable *this_var = Variable::Make(NULL, VK_This, NULL, 0, NULL);
    Exp *old_this = Exp::MakeVar(this_var);
    old_lval = Exp::MakeDrf(old_this);
  }

  Bit *exit_bit = TranslateHeapBit(old_lval, write_csu, true, m_bit);
  Assert(exit_bit);

  if (old_lval)
    old_lval->DecRef();

  // TODO: using this to get the base bit for an invariant is fairly
  // hacked up, but for now we can't do this correctly as the base bit
  // needs to be relative to the CFG exit point, not the point where
  // any writes occur at. for now just get the displayable point for
  // the base CSU, and hope that means the same thing at exit as at
  // the point of the write.

  Bit *new_bit = BitConvertExitClobber(m_bit);

  if (base_csu) {
    *base_bit = BitReplaceExp(new_bit, old_lval, base_csu);
    new_bit->DecRef();
  }
  else {
    *base_bit = new_bit;
  }

  GuardBitVector base_res;
  PPoint exit_point = mcfg->GetCFG()->GetExitPoint();
  mcfg->TranslateBit(TRK_Exit, exit_point, exit_bit, &base_res);

  exit_bit->DecRef();
  RemoveValBit(write_frame->Id(), write_frame->Memory(), base_res, res);
}
Exemple #4
0
bool CheckFrame(CheckerState *state, CheckerFrame *frame,
                CheckerPropagate *propagate)
{
  Assert(!state->GetReportKind());

  BlockMemory *mcfg = frame->Memory();
  BlockCFG *cfg = mcfg->GetCFG();
  BlockId *id = cfg->GetId();

  if (checker_verbose.IsSpecified()) {
    logout << "CHECK: " << frame << ": Entering " << id << endl;
    if (propagate)
      propagate->Print();
  }

  Where *where = propagate ? propagate->m_where : NULL;

  // check if we should terminate the search at this point (with or without
  // generating a report).
  if (where && where->IsNone()) {
    WhereNone *nwhere = where->AsNone();
    ReportKind kind = nwhere->GetReportKind();

    if (kind == RK_None) {
      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame << ": Ignoring" << endl;
      return false;
    }
    else {
      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame << ": Propagation failed" << endl;
      state->SetReport(kind);
      return true;
    }
  }

  // check for other propagations on the stack with frames for the same block,
  // and block the recursion if we exceed the checker's depth. we assume that
  // if we're ever going to terminate in the presence of recursion, we will
  // do so quickly.

  if (propagate) {
    if (uint32_t depth = checker_depth.UIntValue()) {
      Vector<CheckerFrame*> recurse_frames;

      for (size_t ind = 0; ind < state->m_stack.Size(); ind++) {
        CheckerFrame *other_frame = state->m_stack[ind]->m_frame;
        if (other_frame != frame && other_frame->Memory() == mcfg &&
            !recurse_frames.Contains(other_frame))
          recurse_frames.PushBack(other_frame);
      }

      if (recurse_frames.Size() >= depth) {
        state->SetReport(RK_Recursion);
        return true;
      }
    }
  }

  // check if we are propagating into some callee.
  if (where && where->IsPostcondition()) {
    WherePostcondition *nwhere = where->AsPostcondition();

    // expand the callee at the specified point.
    PPoint point = nwhere->GetPoint();
    PEdge *edge = cfg->GetSingleOutgoingEdge(point);

    if (edge->IsLoop()) {
      // expanding data from a loop. first try the case that the loop
      // does not execute at all.

      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame
               << ": Trying to skip loop at " << point << endl;

      state->PushContext();

      if (CheckSkipLoop(state, frame, point, nwhere))
        return true;

      state->PopContext();
    }

    if (BlockId *callee = edge->GetDirectCallee()) {
      // easy case, there is only a single callee.

      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame
               << ": Expanding single callee at " << point
               << ": " << callee << endl;

      state->PushContext();

      if (CheckSingleCallee(state, frame, point, nwhere, callee, true))
        return true;

      state->PopContext();
    }
    else {
      // iterate through all the possible callees

      Variable *function = id->BaseVar();
      CallEdgeSet *callees = CalleeCache.Lookup(function);

      Vector<Variable*> callee_vars;

      if (callees) {
        for (size_t eind = 0; eind < callees->GetEdgeCount(); eind++) {
          const CallEdge &edge = callees->GetEdge(eind);
          if (edge.where.id == id && edge.where.point == point)
            callee_vars.PushBack(edge.callee);
        }
      }

      SortVector<Variable*,Variable>(&callee_vars);

      for (size_t cind = 0; cind < callee_vars.Size(); cind++) {
        Variable *callee = callee_vars[cind];

        if (checker_verbose.IsSpecified())
          logout << "CHECK: " << frame
                 << ": Expanding indirect callee at " << point
                 << ": " << callee << endl;

        callee->IncRef();
        BlockId *callee_id = BlockId::Make(B_Function, callee);

        state->PushContext();

        if (CheckSingleCallee(state, frame, point,
                              nwhere, callee_id, false)) {
          CalleeCache.Release(function);
          return true;
        }

        state->PopContext();
      }

      if (callee_vars.Empty()) {
        if (checker_verbose.IsSpecified())
          logout << "CHECK: " << frame
                 << ": No callees to expand at " << point << endl;
      }

      CalleeCache.Release(function);
    }

    return false;
  }

  // any precondition we have to propagate up to the callers.
  WherePrecondition *precondition = NULL;
  if (where)
    precondition = where->IfPrecondition();

  // whether we will be reconnecting to the caller without any
  // propagation information.
  bool reconnect_caller = false;

  if (precondition) {
    Bit *bit = precondition->GetBit();
    WherePrecondition *dupe_precondition = new WherePrecondition(mcfg, bit);
    state->m_precondition_list.PushBack(dupe_precondition);
  }
  else {
    // we will propagate to the caller regardless if there is already a caller
    // hooked up or if we are inside a loop body.

    if (frame->GetCaller().id != NULL)
      reconnect_caller = true;

    if (frame->Kind() == B_Loop)
      reconnect_caller = true;
  }

  if (propagate && reconnect_caller) {
    // check to see if we are delaying any heap propagation.
    if (where->IsInvariant()) {
      Assert(state->m_delayed_propagate_heap == NULL);
      state->m_delayed_propagate_heap = propagate;
    }
  }
  else if (!precondition && !reconnect_caller) {
    // check to see if we are performing heap propagation.

    if (state->m_delayed_propagate_heap) {
      Assert(propagate == NULL);
      CheckerPropagate *heap_propagate = state->m_delayed_propagate_heap;
      state->m_delayed_propagate_heap = NULL;

      WhereInvariant *invariant = heap_propagate->m_where->AsInvariant();

      if (CheckHeapWrites(state, frame, heap_propagate->m_frame, invariant))
        return true;

      state->m_delayed_propagate_heap = heap_propagate;
      return false;
    }
    else if (where && where->IsInvariant()) {
      return CheckHeapWrites(state, frame, frame, where->AsInvariant());
    }

    Assert(propagate);

    // don't need to expand the callers or anything else.
    // we can finally terminate propagation with an error report.

    if (checker_verbose.IsSpecified())
      logout << "CHECK: " << frame
             << ": Nothing to expand, finishing" << endl;

    state->SetReport(RK_Finished);
    return true;
  }

  if (frame->GetCaller().id != NULL) {
    // just propagate to the existing caller.

    if (checker_verbose.IsSpecified())
      logout << "CHECK: " << frame
             << ": Returning to caller" << endl;

    state->PushContext();

    if (CheckSingleCaller(state, frame, precondition, frame->GetCaller()))
      return true;

    state->PopContext();
  }
  else if (id->Kind() == B_Function) {
    // propagate to all callers to the function.

    Variable *function = id->BaseVar();
    CallEdgeSet *callers = CallerCache.Lookup(function);

    Vector<BlockPPoint> caller_points;

    for (size_t eind = 0; callers && eind < callers->GetEdgeCount(); eind++) {
      const CallEdge &edge = callers->GetEdge(eind);
      Assert(edge.callee == function);

      caller_points.PushBack(edge.where);
    }

    SortVector<BlockPPoint,BlockPPoint>(&caller_points);

    for (size_t cind = 0; cind < caller_points.Size(); cind++) {
      BlockPPoint caller = caller_points[cind];

      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame
               << ": Checking caller: " << caller << endl;

      state->PushContext();

      if (CheckSingleCaller(state, frame, precondition, caller)) {
        CallerCache.Release(function);
        return true;
      }

      state->PopContext();
    }

    if (caller_points.Empty()) {
      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame << ": No callers to expand" << endl;
    }

    CallerCache.Release(function);
  }
  else if (id->Kind() == B_Loop) {
    // check all possible callers of the loop. unroll an iteration before
    // checking the parents so that if we can't figure out a sufficient
    // condition for the loop we will stop exploration quickly.

    // unroll another iteration of the loop.

    if (checker_verbose.IsSpecified())
      logout << "CHECK: " << frame
             << ": Unrolling loop iteration" << endl;

    state->PushContext();

    BlockPPoint recursive_caller(id, cfg->GetExitPoint());
    if (CheckSingleCaller(state, frame, precondition, recursive_caller))
      return true;

    state->PopContext();

    // check the parents which can initially invoke this loop.

    if (frame->GetLoopParent().id != NULL) {
      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame
               << ": Checking existing loop parent: "
               << frame->GetLoopParent() << endl;

      state->PushContext();

      if (CheckSingleCaller(state, frame, precondition,
                            frame->GetLoopParent()))
        return true;

      state->PopContext();
    }
    else {
      for (size_t pind = 0; pind < cfg->GetLoopParentCount(); pind++) {
        BlockPPoint where = cfg->GetLoopParent(pind);

        if (checker_verbose.IsSpecified())
          logout << "CHECK: " << frame
                 << ": Checking loop parent: " << where << endl;

        state->PushContext();

        if (CheckSingleCaller(state, frame, precondition, where))
          return true;

        state->PopContext();
      }
    }
  }
  else if (id->Kind() == B_Initializer) {
    // initializers don't have callers, can just ignore this.
    // TODO: should address why this code is being reached in the first place.
    if (checker_verbose.IsSpecified())
      logout << "CHECK: " << frame << ": Initializer has no callers" << endl;
    return false;
  }
  else {
    // unknown type of block.
    Assert(false);
  }

  // if we set the state's delayed heap propagation then unset it.
  if (propagate && state->m_delayed_propagate_heap == propagate)
    state->m_delayed_propagate_heap = NULL;

  return false;
}
Exemple #5
0
void GetMatchingHeapWrites(const EscapeAccess &heap_write,
                           Vector<HeapWriteInfo> *writes)
{
  BlockId *id = heap_write.where.id;
  BlockMemory *mcfg = GetBlockMemory(id);

  if (mcfg == NULL) {
    logout << "WARNING: Missing memory: '" << id << "'" << endl;
    return;
  }

  BlockCFG *cfg = mcfg->GetCFG();

  // for incremental analysis, make sure the write CFG uses the right version.
  // as for checking callers, if the CFG has changed but the new one still
  // has a matching write, we will see an escape access for the new CFG.
  if (cfg->GetVersion() != heap_write.where.version) {
    if (checker_verbose.IsSpecified())
      logout << "CHECK: Write is an older version: "
             << heap_write.where.id << ": "
             << heap_write.where.version << endl;
    mcfg->DecRef();
    return;
  }

  PPoint point = heap_write.where.point;
  PPoint exit_point = mcfg->GetCFG()->GetExitPoint();

  // find a point-relative lvalue written at the write point with
  // the sanitized representation from the heap_write trace.
  // TODO: we only match against direct assignments in the CFG for now,
  // ignoring structural copies (which are simple recursive writes).

  PEdge *edge = cfg->GetSingleOutgoingEdge(point);
  Exp *point_lval = NULL;

  if (PEdgeAssign *nedge = edge->IfAssign())
    point_lval = nedge->GetLeftSide();
  else if (PEdgeCall *nedge = edge->IfCall())
    point_lval = nedge->GetReturnValue();

  bool lval_matches = false;

  if (point_lval) {
    if (Exp *new_point_lval = Trace::SanitizeExp(point_lval)) {
      lval_matches = (new_point_lval == heap_write.target->GetValue());
      new_point_lval->DecRef();
    }
  }

  if (!lval_matches) {
    mcfg->DecRef();
    return;
  }

  // it would be nice to remove Val() expressions from this list, but we can't
  // do that as lvalues in memory assignments can contain Val and we want to
  // see the effects of those assignments. TODO: fix.
  GuardExpVector lval_res;
  mcfg->TranslateExp(TRK_Point, point, point_lval, &lval_res);

  for (size_t ind = 0; ind < lval_res.Size(); ind++) {
    const GuardExp &lv = lval_res[ind];

    HeapWriteInfo info;
    info.mcfg = mcfg;
    info.lval = lv.exp;
    info.base_lval = point_lval;

    // look for a condition where the lvalue is not written.
    GuardExpVector exit_vals;
    info.mcfg->GetValComplete(info.lval, NULL, exit_point, &exit_vals);

    for (size_t ind = 0; ind < exit_vals.Size(); ind++) {
      const GuardExp &val = exit_vals[ind];

      // exclude cases where the lvalue refers to its value at block entry.
      if (ExpDrf *nval = val.exp->IfDrf()) {
        if (nval->GetTarget() == info.lval)
          info.exclude.PushBack(val.guard);
      }
    }

    if (!writes->Contains(info)) {
      info.mcfg->IncRef(writes);
      info.lval->IncRef(writes);
      info.base_lval->IncRef(writes);
      IncRefVector<Bit>(info.exclude, writes);
      writes->PushBack(info);
    }
  }

  mcfg->DecRef();
}
Exemple #6
0
// returns whether the error condition is satisfiable within frame.
bool TestErrorSatisfiable(CheckerState *state, CheckerFrame *frame, Bit *bit)
{
  BlockMemory *mcfg = frame->Memory();
  Solver *solver = state->GetSolver();

  if (!solver->IsSatisfiable()) {
    if (checker_verbose.IsSpecified())
      logout << "CHECK: " << frame << ": Guard unsatisfiable: " << bit
             << " [" << bit->Hash() << "]" << endl;
    return false;
  }

  state->PushContext();
  state->AssertBaseBits();

  if (!solver->IsSatisfiable()) {
    if (checker_verbose.IsSpecified())
      logout << "CHECK: " << frame << ": Error unsatisfiable: " << bit
             << " [" << bit->Hash() << "]" << endl;
    state->PopContext();
    return false;
  }

  if (!frame->m_checked_assertions) {
    frame->m_checked_assertions = true;

    // check to see if the error is contradicted by previous assertions
    // in this frame. assert the previous assertions, but don't keep
    // them around past this function to avoid polluting the solver
    // with worthless extra checks.

    BlockSummary *sum = GetBlockSummary(mcfg->GetId());

    const Vector<AssertInfo> *asserts = sum->GetAsserts();
    size_t assert_count = VectorSize<AssertInfo>(asserts);

    for (size_t ind = 0; ind < assert_count; ind++) {
      const AssertInfo &info = asserts->At(ind);

      // only use the same kind of assertion to check for redundancy.
      if (info.kind != state->GetAssertKind())
        continue;

      if (info.cls != ASC_Check)
        continue;

      if (info.point < frame->EndPoint()) {
        // get the asserted condition relative to block entry.

        Bit *assert_value;
        mcfg->TranslateBit(TRK_Point, info.point, info.bit, &assert_value);
        assert_value->MoveRef(&assert_value, NULL);

        Bit *point_guard = mcfg->GetGuard(info.point);
        point_guard->IncRef();

        Bit *imply_assert =
          Bit::MakeImply(point_guard, assert_value);

        solver->AddConstraint(frame->Id(), imply_assert);
      }
    }

    sum->DecRef();

    if (!solver->IsSatisfiable()) {
      if (checker_verbose.IsSpecified())
        logout << "CHECK: " << frame
               << ": Unsatisfiable from assertions" << endl;

      state->PopContext();
      return false;
    }
  }

  state->PopContext();
  return true;
}
Exemple #7
0
  void Visit(Exp *exp)
  {
    if (ExpFld *nexp = exp->IfFld()) {
      // pick up any type invariants from the host type.
      String *csu_name = nexp->GetField()->GetCSUType()->GetCSUName();
      Vector<BlockCFG*> *annot_list = CompAnnotCache.Lookup(csu_name);

      for (size_t ind = 0; annot_list && ind < annot_list->Size(); ind++) {
        BlockCFG *annot_cfg = annot_list->At(ind);
        Assert(annot_cfg->GetAnnotationKind() == AK_Invariant ||
               annot_cfg->GetAnnotationKind() == AK_InvariantAssume);
        BlockId *id = annot_cfg->GetId();

        Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg);
        if (!bit) continue;

        // get the *this expression. we'll replace this with the actual CSU
        // lvalue to get the assumed bit.
        id->IncRef();
        Variable *this_var = Variable::Make(id, VK_This, NULL, 0, NULL);
        Exp *this_exp = Exp::MakeVar(this_var);
        Exp *this_drf = Exp::MakeDrf(this_exp);
        Exp *target = nexp->GetTarget();

        GuardExpVector lval_res;
        if (mcfg) {
          mcfg->TranslateExp(TRK_Point, point, target, &lval_res);
        }
        else {
          target->IncRef();
          lval_res.PushBack(GuardExp(target, Bit::MakeConstant(true)));
        }

        for (size_t lind = 0; lind < lval_res.Size(); lind++) {
          // ignore the guard component of the result here. this means that
          // accessing a field of a value means related invariants hold for
          // the value along all paths. which is normally right, except when
          // the value is the result of a cast, and could have a different type
          // along other paths. TODO: sort this out.
          const GuardExp &gs = lval_res[lind];
          Bit *new_bit = BitReplaceExp(bit, this_drf, gs.exp);

          new_bit->MoveRef(NULL, assume_list);
          annot_cfg->IncRef(assume_list);

          AssumeInfo info;
          info.annot = annot_cfg;
          info.point = 0;
          info.bit = new_bit;
          assume_list->PushBack(info);
        }

        this_drf->DecRef();
      }

      CompAnnotCache.Release(csu_name);
    }

    if (ExpVar *nexp = exp->IfVar()) {
      if (nexp->GetVariable()->Kind() == VK_Glob) {
        String *var_name = nexp->GetVariable()->GetName();
        Vector<BlockCFG*> *annot_list = InitAnnotCache.Lookup(var_name);

        for (size_t ind = 0; annot_list && ind < annot_list->Size(); ind++) {
          BlockCFG *annot_cfg = annot_list->At(ind);
          Assert(annot_cfg->GetAnnotationKind() == AK_Invariant ||
                 annot_cfg->GetAnnotationKind() == AK_InvariantAssume);

          Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg);
          if (!bit) continue;

          bit->IncRef(assume_list);
          annot_cfg->IncRef(assume_list);

          AssumeInfo info;
          info.annot = annot_cfg;
          info.point = 0;
          info.bit = bit;
          assume_list->PushBack(info);
        }

        InitAnnotCache.Release(var_name);
      }
    }
  }
void InferSummaries(const Vector<BlockSummary*> &summary_list)
{
  static BaseTimer infer_timer("infer_summaries");
  Timer _timer(&infer_timer);

  if (summary_list.Empty())
    return;

  Variable *function = summary_list[0]->GetId()->BaseVar();
  Vector<BlockCFG*> *annot_list = BodyAnnotCache.Lookup(function->GetName());

  // all traces which might refer to the result of pointer arithmetic.
  Vector<Exp*> arithmetic_list;
  ArithmeticEscape escape(function, arithmetic_list);

  // initial pass over the CFGs to get traces used in pointer arithmetic.
  for (size_t ind = 0; ind < summary_list.Size(); ind++) {
    BlockSummary *sum = summary_list[ind];

    BlockCFG *cfg = sum->GetMemory()->GetCFG();
    for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) {
      PEdge *edge = cfg->GetEdge(eind);

      if (PEdgeAssign *assign_edge = edge->IfAssign()) {
        Exp *left = assign_edge->GetLeftSide();
        Exp *right = assign_edge->GetRightSide();
        ProcessArithmeticAssign(&escape, cfg->GetId(), left, right);
      }
    }
  }

  for (size_t ind = 0; ind < summary_list.Size(); ind++) {
    BlockSummary *sum = summary_list[ind];
    BlockMemory *mcfg = sum->GetMemory();
    BlockCFG *cfg = mcfg->GetCFG();

    // accumulate all the assertions at points in the CFG.
    Vector<AssertInfo> asserts;

    // add assertions at function exit for any postconditions.
    if (cfg->GetId()->Kind() == B_Function) {
      for (size_t aind = 0; annot_list && aind < annot_list->Size(); aind++) {
        BlockCFG *annot_cfg = annot_list->At(aind);

        if (annot_cfg->GetAnnotationKind() != AK_Postcondition)
          continue;
        if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) {
          AssertInfo info;
          info.kind = ASK_Annotation;
          info.cls = ASC_Check;
          info.point = cfg->GetExitPoint();
          info.bit = bit;
          asserts.PushBack(info);
        }
      }
    }

    // add assertions for any point annotations within the CFG.
    for (size_t pind = 0; pind < cfg->GetPointAnnotationCount(); pind++) {
      PointAnnotation pann = cfg->GetPointAnnotation(pind);
      BlockCFG *annot_cfg = GetAnnotationCFG(pann.annot);
      if (!annot_cfg) continue;

      if (annot_cfg->GetAnnotationKind() != AK_Assert)
        continue;

      if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) {
        AssertInfo info;
        info.kind = ASK_Annotation;
        info.cls = ASC_Check;
        info.point = pann.point;
        info.bit = bit;
        asserts.PushBack(info);
      }
    }

    for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) {
      PEdge *edge = cfg->GetEdge(eind);
      PPoint point = edge->GetSource();

      if (PEdgeAnnotation *nedge = edge->IfAnnotation()) {
        // add an assertion for this annotation if it not an assume.
        BlockCFG *annot_cfg = GetAnnotationCFG(nedge->GetAnnotationId());
        if (!annot_cfg) continue;

        if (annot_cfg->GetAnnotationKind() != AK_Assert &&
            annot_cfg->GetAnnotationKind() != AK_AssertRuntime) {
          continue;
        }

        if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) {
          AssertInfo info;
          info.kind = (annot_cfg->GetAnnotationKind() == AK_Assert)
            ? ASK_Annotation : ASK_AnnotationRuntime;
          info.cls = ASC_Check;
          info.point = point;
          info.bit = bit;
          asserts.PushBack(info);
        }
      }

      // add assertions for any invariants affected by a write.

      Exp *left = NULL;
      if (PEdgeAssign *nedge = edge->IfAssign())
        left = nedge->GetLeftSide();
      if (PEdgeCall *nedge = edge->IfCall())
        left = nedge->GetReturnValue();

      // for now our detection of affected invariants is pretty crude;
      // writes to fields can affect type invariants on the field's type
      // which use that field, and writes to global variables can affect
      // invariants on that global. TODO: pin this down once we draw a
      // precise line between which invariants can and can't be checked.

      if (left && left->IsFld()) {
        ExpFld *nleft = left->AsFld();
        String *csu_name = nleft->GetField()->GetCSUType()->GetCSUName();
        Vector<BlockCFG*> *comp_annot_list = CompAnnotCache.Lookup(csu_name);

        for (size_t aind = 0; comp_annot_list &&
                              aind < comp_annot_list->Size(); aind++) {
          BlockCFG *annot_cfg = comp_annot_list->At(aind);

          if (annot_cfg->GetAnnotationKind() != AK_Invariant)
            continue;
          Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg);
          if (!bit) continue;

          Vector<Exp*> lval_list;
          LvalListVisitor visitor(&lval_list);
          bit->DoVisit(&visitor);

          bool uses_field = false;
          for (size_t ind = 0; ind < lval_list.Size(); ind++) {
            if (ExpFld *lval = lval_list[ind]->IfFld()) {
              if (lval->GetField() == nleft->GetField())
                uses_field = true;
            }
          }

          if (uses_field) {
            // this is a type invariant which uses the field being written
            // as an lvalue. we need to assert this write preserves
            // the invariant.
            BlockId *id = annot_cfg->GetId();
            Variable *this_var = Variable::Make(id, VK_This, NULL, 0, NULL);
            Exp *this_exp = Exp::MakeVar(this_var);
            Exp *this_drf = Exp::MakeDrf(this_exp);

            Bit *new_bit = BitReplaceExp(bit, this_drf, nleft->GetTarget());

            AssertInfo info;
            info.kind = ASK_Invariant;
            info.cls = ASC_Check;
            info.point = point;
            info.bit = new_bit;
            asserts.PushBack(info);
          }
        }

        CompAnnotCache.Release(csu_name);
      }

      if (left && left->IsVar()) {
        Variable *var = left->AsVar()->GetVariable();
        if (var->Kind() == VK_Glob) {
          Vector<BlockCFG*> *glob_annot_list =
            InitAnnotCache.Lookup(var->GetName());

          for (size_t aind = 0; glob_annot_list &&
                                aind < glob_annot_list->Size(); aind++) {
            BlockCFG *annot_cfg = glob_annot_list->At(aind);

            Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg);
            if (!bit) continue;

            AssertInfo info;
            info.kind = ASK_Invariant;
            info.cls = ASC_Check;
            info.point = point;
            info.bit = bit;
            asserts.PushBack(info);
          }

          InitAnnotCache.Release(var->GetName());
        }
      }

      if (PEdgeCall *nedge = edge->IfCall()) {
        // add assertions for any callee preconditions.

        // pull preconditions from both direct and indirect calls.
        Vector<Variable*> callee_names;

        if (Variable *callee = nedge->GetDirectFunction()) {
          callee_names.PushBack(callee);
        }
        else {
          CallEdgeSet *callees = CalleeCache.Lookup(function);

          if (callees) {
            for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) {
              const CallEdge &edge = callees->GetEdge(cind);
              if (edge.where.id == cfg->GetId() && edge.where.point == point)
                callee_names.PushBack(edge.callee);
            }
          }

          // CalleeCache release is below.
        }

        for (size_t cind = 0; cind < callee_names.Size(); cind++) {
          String *callee = callee_names[cind]->GetName();
          Vector<BlockCFG*> *call_annot_list = BodyAnnotCache.Lookup(callee);

          for (size_t aind = 0;
               call_annot_list && aind < call_annot_list->Size(); aind++) {
            BlockCFG *annot_cfg = call_annot_list->At(aind);

            if (annot_cfg->GetAnnotationKind() != AK_Precondition)
              continue;
            if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) {
              ConvertCallsiteMapper mapper(cfg, point, false);
              Bit *caller_bit = bit->DoMap(&mapper);
              if (!caller_bit)
                continue;

              AssertInfo info;
              info.kind = ASK_Annotation;
              info.cls = ASC_Check;
              info.point = point;
              info.bit = caller_bit;
              asserts.PushBack(info);
            }
          }

          BodyAnnotCache.Release(callee);
        }

        if (!nedge->GetDirectFunction())
          CalleeCache.Release(function);
      }

      BufferScanVisitor write_visitor(asserts, arithmetic_list, point, true);
      BufferScanVisitor read_visitor(asserts, arithmetic_list, point, false);
      IntegerScanVisitor integer_visitor(asserts, point);
      GCScanVisitor gcsafe_visitor(asserts, point);

      // only look at the written lvalues for the write visitor.
      if (PEdgeAssign *assign = edge->IfAssign())
        write_visitor.Visit(assign->GetLeftSide());
      if (PEdgeCall *call = edge->IfCall()) {
        if (Exp *returned = call->GetReturnValue())
          write_visitor.Visit(returned);
      }

      edge->DoVisit(&read_visitor);

      // disable integer overflow visitor for now.
      // edge->DoVisit(&integer_visitor);

      edge->DoVisit(&gcsafe_visitor);
    }

    if (cfg->GetId()->Kind() == B_Function) {
      BlockModset *modset = GetBlockModset(cfg->GetId());
      if (modset->CanGC()) {
        AssertInfo info;
        info.kind = ASK_CanGC;
        info.cls = ASC_Check;
        info.point = cfg->GetExitPoint();

        String *name = cfg->GetId()->BaseVar()->GetName();
        Variable *var = Variable::Make(NULL, VK_Glob, name, 0, name);
        Exp *varexp = Exp::MakeVar(var);
        Exp *gcsafe = Exp::MakeGCSafe(varexp, false);
        info.bit = Bit::MakeVar(gcsafe);
        asserts.PushBack(info);
      }
    }

    MarkRedundantAssertions(mcfg, asserts);

    // move the finished assertion list into the summary.
    for (size_t ind = 0; ind < asserts.Size(); ind++) {
      const AssertInfo &info = asserts[ind];
      sum->AddAssert(info.kind, info.cls, info.point, info.bit);
    }
  }

  // infer delta and termination invariants for all summaries.
  for (size_t ind = 0; ind < summary_list.Size(); ind++)
    InferInvariants(summary_list[ind], arithmetic_list);

  BodyAnnotCache.Release(function->GetName());
}
  void MultiMap(Exp *exp, Vector<Exp*> *res)
  {
    // follow possible equalities for lvalues and non-arithmetic binops
    // appearing in the input formula.
    bool handle_exp = false;

    if (exp->IsLvalue() || exp->IsBound() || exp->IsTerminate())
      handle_exp = true;
    else if (ExpBinop *nexp = exp->IfBinop()) {
      switch (nexp->GetBinopKind()) {
      case B_Mod:
      case B_BitwiseAnd:
      case B_BitwiseOr:
      case B_BitwiseXOr:
      case B_Min:
      case B_Max:
        handle_exp = true;
      default:
        break;
      }
    }

    /*
    // special cased constant substitution. if we see an 'n < val'
    // or 'n+1 <= val' and a comparison 'n+1 ~ oval', try substituting
    // 'oval <= val'.

    if (ExpBinop *nexp = exp->IfBinop()) {
      long left_value, right_value;
      BinopKind kind = nexp->GetBinopKind();
      Exp *left = nexp->GetLeftOperand();
      Exp *right = nexp->GetRightOperand();

      if ((kind == B_LessThan || kind == B_LessEqual) &&
          left->IsInt() && left->AsInt()->GetInt(&left_value)) {
        for (size_t ind = 0; ind < equalities.Size(); ind++) {
          const BaseCompare &equality = equalities[ind];
          if (equality.target->IsInt() &&
              equality.target->AsInt()->GetInt(&right_value) &&
              right_value == left_value + (kind == B_LessThan ? 1 : 0)) {
            Exp *new_exp = Exp::MakeBinop(B_LessEqual, equality.source, right);
            ExpAddResult(new_exp, res);
          }
        }
      }
    }
    */

    if (!handle_exp) {
      ExpAddResult(exp, res);
      return;
    }

    bool is_loop = (mcfg->GetId()->Kind() == B_Loop);

    // for loops, only follow equalities for terms which change with
    // each iteration.
    if (is_loop && mcfg->IsExpPreserved(exp)) {
      ExpAddResult(exp, res);
      return;
    }

    ExpAddResult(exp, res);

    // try to substitute the expression for anything it might share
    // an ==/<=/>= relationship with.

    for (size_t ind = 0; ind < equalities.Size(); ind++) {
      const BaseCompare &equality = equalities[ind];

      // watch for recursion when following compares.
      if (expand_stack.Contains(equality.test))
        continue;

      // check if there is a match between the exp and equality source.
      Exp *new_target = MatchEquality(exp, equality);

      if (new_target) {
        // keep track of the tests we use during recursive mapping.
        expand_stack.PushBack(equality.test);

        // list to hold result of mapping this substitution.
        Vector<Exp*> sub_res;

        EqualityMapper sub_mapper(mcfg, verbose, equalities, expand_stack);
        new_target->DoMultiMap(&sub_mapper, &sub_res);

        expand_stack.PopBack();

        // for functions, filter out substitutions which resulted in an
        // increase in the number of leaf terms in the expression. this both
        // gets rid of results we don't care about and prevents exponential
        // blowup. we don't need to worry about this for loops because
        // we only follow substitutions for values which change in each
        // iteration, a similar (and less crude) filtering.
        size_t base_term_count = exp->TermCount();

        if (verbose)
          logout << "SUFFICIENT: Substituted: " << exp
                 << " [" << sub_res.Size() << "]" << endl;

        for (size_t rind = 0; rind < sub_res.Size(); rind++) {
          Exp *res_exp = sub_res[rind];

          if (is_loop || res_exp->TermCount() <= base_term_count) {
            if (verbose)
              logout << "  Added: " << res_exp << endl;
            ExpAddResult(res_exp, res);
          }
          else {
            if (verbose)
              logout << "  Dropped: " << res_exp << endl;
          }
        }
      }
    }
  }
Exemple #10
0
Where* CheckerPropagate::TryPropagate(Bit *bit, Exp *lval)
{
  BlockMemory *mcfg = m_frame->Memory();

  TypeCSU *csu = NULL;
  Exp *csu_lval = NULL;

  if (UseHeapExp(lval, &csu, &csu_lval)) {
    // do the heap propagation unless we are trying to push heap data
    // up into the caller.
    if (!m_prefer_precondition ||
        !UseCallerExp(lval, m_frame->Kind() == B_Function) ||
        (m_frame->Kind() == B_Loop && !mcfg->IsExpPreserved(lval))) {
      Where *res = WhereInvariant::Make(csu, csu_lval, bit);

      if (res)
        return res;

      // fall through, we might still be able to treat this as a precondition.
    }
  }

  if (UseCallerExp(lval, m_frame->Kind() == B_Function))
    return WherePrecondition::Make(m_frame->Memory(), bit);

  if (PPoint point = UseCalleeExp(lval)) {

    // fail propagation if this is from a later callee than the point
    // this propagation occurs at. this can come up when generating
    // sufficient conditions.
    if (point > m_point || (point == m_point && !m_allow_point))
      return NULL;

    // cutoff propagation if the buffer came from a primitive memory
    // allocator. if we find a sufficient condition that does not
    // mention the allocator we could continue propagation.

    PEdge *edge = mcfg->GetCFG()->GetSingleOutgoingEdge(point);
    PEdgeCall *edge_call = edge->IfCall();
    Variable *callee = edge_call ? edge_call->GetDirectFunction() : NULL;

    Exp *callee_base;
    Exp *callee_size;

    if (callee && GetAllocationFunction(callee, &callee_base, &callee_size)) {
      callee_base->DecRef();
      callee_size->DecRef();

      if (lval->IsBound())
        lval = lval->GetLvalTarget();

      // ignore this if it refers to fields or other structures
      // in the result of the allocation. this data is either
      // uninitialized or zeroed, either way we don't care.
      if (ExpClobber *nlval = lval->IfClobber()) {
        Exp *callee_lval = nlval->GetCallee();
        Exp *callee_target = NULL;

        if (nlval->GetValueKind() == NULL) {
          callee_target = callee_lval;
        }
        else {
          // skip the first dereference. for terminators we still depend on
          // the initial contents of the allocated buffer.
          if (ExpExit *ncallee = callee_lval->IfExit())
            callee_target = ncallee->GetTarget();
        }

        if (callee_target) {
          while (callee_target->IsFld())
            callee_target = callee_target->GetLvalTarget();
          if (callee_target->IsExit())
            return new WhereNone(RK_None);
        }
      }

      // watch for accessing indexes of a buffer returned via the allocator,
      // which currently aren't mapped back into the callee correctly.
      // TODO: fix hack.
      if (lval->IsDrf() && lval->GetLvalTarget()->IsIndex())
        return new WhereNone(RK_None);

      return new WhereNone(RK_Finished);
    }

    if (callee && IsCutoffFunction(callee))
      return new WhereNone(RK_Finished);

    return WherePostcondition::Make(m_frame, point, bit);
  }

  return NULL;
}