Пример #1
0
void ExprEngine::
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
                              ExplodedNode *Pred,
                              ExplodedNodeSet &Dst) {
  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);

  QualType T = Ex->getTypeOfArgument();
  
  if (Ex->getKind() == UETT_SizeOf) {
    if (!T->isIncompleteType() && !T->isConstantSizeType()) {
      assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
      
      // FIXME: Add support for VLA type arguments and VLA expressions.
      // When that happens, we should probably refactor VLASizeChecker's code.
      return;
    }
    else if (T->getAs<ObjCObjectType>()) {
      // Some code tries to take the sizeof an ObjCObjectType, relying that
      // the compiler has laid out its representation.  Just report Unknown
      // for these.
      return;
    }
  }
  
  APSInt Value = Ex->EvaluateKnownConstInt(getContext());
  CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
  
  ProgramStateRef state = Pred->getState();
  state = state->BindExpr(Ex, Pred->getLocationContext(),
                          svalBuilder.makeIntVal(amt.getQuantity(),
                                                     Ex->getType()));
  Bldr.generateNode(Ex, Pred, state);
}
Пример #2
0
long long clang_Type_getSizeOf(CXType T) {
  if (T.kind == CXType_Invalid)
    return CXTypeLayoutError_Invalid;
  ASTContext &Ctx = cxtu::getASTUnit(GetTU(T))->getASTContext();
  QualType QT = GetQualType(T);
  // [expr.sizeof] p2: if reference type, return size of referenced type
  if (QT->isReferenceType())
    QT = QT.getNonReferenceType();
  // [expr.sizeof] p1: return -1 on: func, incomplete, bitfield, incomplete
  //                   enumeration
  // Note: We get the cxtype, not the cxcursor, so we can't call
  //       FieldDecl->isBitField()
  // [expr.sizeof] p3: pointer ok, function not ok.
  // [gcc extension] lib/AST/ExprConstant.cpp:1372 HandleSizeof : vla == error
  if (QT->isIncompleteType())
    return CXTypeLayoutError_Incomplete;
  if (QT->isDependentType())
    return CXTypeLayoutError_Dependent;
  if (!QT->isConstantSizeType())
    return CXTypeLayoutError_NotConstantSize;
  // [gcc extension] lib/AST/ExprConstant.cpp:1372
  //                 HandleSizeof : {voidtype,functype} == 1
  // not handled by ASTContext.cpp:1313 getTypeInfoImpl
  if (QT->isVoidType() || QT->isFunctionType())
    return 1;
  return Ctx.getTypeSizeInChars(QT).getQuantity();
}
Пример #3
0
static long long validateFieldParentType(CXCursor PC, CXType PT){
  if (clang_isInvalid(PC.kind))
    return CXTypeLayoutError_Invalid;
  const RecordDecl *RD =
        dyn_cast_or_null<RecordDecl>(cxcursor::getCursorDecl(PC));
  // validate parent declaration
  if (!RD || RD->isInvalidDecl())
    return CXTypeLayoutError_Invalid;
  RD = RD->getDefinition();
  if (!RD)
    return CXTypeLayoutError_Incomplete;
  if (RD->isInvalidDecl())
    return CXTypeLayoutError_Invalid;
  // validate parent type
  QualType RT = GetQualType(PT);
  if (RT->isIncompleteType())
    return CXTypeLayoutError_Incomplete;
  if (RT->isDependentType())
    return CXTypeLayoutError_Dependent;
  // We recurse into all record fields to detect incomplete and dependent types.
  long long Error = visitRecordForValidation(RD);
  if (Error < 0)
    return Error;
  return 0;
}
static bool CheckElementalArguments(CodeGenModule &CGM, const FunctionDecl *FD,
                                    llvm::Function *Fn, bool &HasThis) {
  // Check the return type.
  QualType RetTy = FD->getReturnType();
  if (RetTy->isAggregateType()) {
    CGM.Error(FD->getLocation(), "the return type for this elemental "
                                 "function is not supported yet");
    return false;
  }

  // Check each parameter type.
  for (unsigned I = 0, E = FD->param_size(); I < E; ++I) {
    const ParmVarDecl *VD = FD->getParamDecl(I);
    QualType Ty = VD->getType();
    assert(!Ty->isIncompleteType() && "incomplete type");
    if (Ty->isAggregateType()) {
      CGM.Error(VD->getLocation(), "the parameter type for this elemental "
                                   "function is not supported yet");
      return false;
    }
  }

  HasThis = isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance();

  // At this point, no passing struct arguments by value.
  unsigned NumArgs = FD->param_size();
  unsigned NumLLVMArgs = Fn->arg_size();

  // There is a single implicit 'this' parameter.
  if (HasThis && (NumArgs + 1 == NumLLVMArgs))
    return true;

  return NumArgs == NumLLVMArgs;
}
Пример #5
0
/// @brief Ensure that the type T is a complete type. 
///
/// This routine checks whether the type @p T is complete in any
/// context where a complete type is required. If @p T is a complete
/// type, returns false. If @p T is a class template specialization,
/// this routine then attempts to perform class template
/// instantiation. If instantiation fails, or if @p T is incomplete
/// and cannot be completed, issues the diagnostic @p diag (giving it
/// the type @p T) and returns true.
///
/// @param Loc  The location in the source that the incomplete type
/// diagnostic should refer to.
///
/// @param T  The type that this routine is examining for completeness.
///
/// @param diag The diagnostic value (e.g., 
/// @c diag::err_typecheck_decl_incomplete_type) that will be used
/// for the error message if @p T is incomplete.
///
/// @param Range1  An optional range in the source code that will be a
/// part of the "incomplete type" error message.
///
/// @param Range2  An optional range in the source code that will be a
/// part of the "incomplete type" error message.
///
/// @param PrintType If non-NULL, the type that should be printed
/// instead of @p T. This parameter should be used when the type that
/// we're checking for incompleteness isn't the type that should be
/// displayed to the user, e.g., when T is a type and PrintType is a
/// pointer to T.
///
/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, unsigned diag,
                                  SourceRange Range1, SourceRange Range2,
                                  QualType PrintType) {
  // If we have a complete type, we're done.
  if (!T->isIncompleteType())
    return false;

  // If we have a class template specialization or a class member of a
  // class template specialization, try to instantiate it.
  if (const RecordType *Record = T->getAsRecordType()) {
    if (ClassTemplateSpecializationDecl *ClassTemplateSpec
          = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
      if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) {
        // Update the class template specialization's location to
        // refer to the point of instantiation.
        if (Loc.isValid())
          ClassTemplateSpec->setLocation(Loc);
        return InstantiateClassTemplateSpecialization(ClassTemplateSpec,
                                             /*ExplicitInstantiation=*/false);
      }
    } else if (CXXRecordDecl *Rec 
                 = dyn_cast<CXXRecordDecl>(Record->getDecl())) {
      if (CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass()) {
        // Find the class template specialization that surrounds this
        // member class.
        ClassTemplateSpecializationDecl *Spec = 0;
        for (DeclContext *Parent = Rec->getDeclContext(); 
             Parent && !Spec; Parent = Parent->getParent())
          Spec = dyn_cast<ClassTemplateSpecializationDecl>(Parent);
        assert(Spec && "Not a member of a class template specialization?");
        return InstantiateClass(Loc, Rec, Pattern,
                                Spec->getTemplateArgs(), 
                                Spec->getNumTemplateArgs());
      }
    }
  }

  if (PrintType.isNull())
    PrintType = T;

  // We have an incomplete type. Produce a diagnostic.
  Diag(Loc, diag) << PrintType << Range1 << Range2;

  // If the type was a forward declaration of a class/struct/union
  // type, produce 
  const TagType *Tag = 0;
  if (const RecordType *Record = T->getAsRecordType())
    Tag = Record;
  else if (const EnumType *Enum = T->getAsEnumType())
    Tag = Enum;

  if (Tag && !Tag->getDecl()->isInvalidDecl())
    Diag(Tag->getDecl()->getLocation(), 
         Tag->isBeingDefined() ? diag::note_type_being_defined
                               : diag::note_forward_declaration)
        << QualType(Tag, 0);

  return true;
}
Пример #6
0
bool TypeResolver::requireCompleteType(SourceLocation loc, QualType Q, int msg) {
    if (Q.isIncompleteType()) {
        StringBuilder name;
        Q.DiagName(name);
        Diags.Report(loc, msg) << name;
        return false;
    }
    return true;
}
bool Sema::RequireCompleteTypeRoger(QualType T, RogerRequireCompleteReason RogerOnlyInheritance) {
    if (!T->isIncompleteType()) {
        return false;
    }
    if (T.getCanonicalType()->getTypeClass() != Type::Record) {
        return false;
    }
    RecordDecl *Rec = cast<RecordType>(T.getCanonicalType())->getDecl();
    return RequireCompleteRecordRoger(Rec, RogerOnlyInheritance);
}
Пример #8
0
void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
  const Expr *E = CE->getSubExpr();
  ASTContext &Ctx = C.getASTContext();
  QualType ToTy = Ctx.getCanonicalType(CE->getType());
  const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());

  if (!ToPTy)
    return;

  QualType ToPointeeTy = ToPTy->getPointeeType();

  // Only perform the check if 'ToPointeeTy' is a complete type.
  if (ToPointeeTy->isIncompleteType())
    return;

  ProgramStateRef state = C.getState();
  const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
  if (!R)
    return;

  const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
  if (!SR)
    return;

  SValBuilder &svalBuilder = C.getSValBuilder();
  SVal extent = SR->getExtent(svalBuilder);
  const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
  if (!extentInt)
    return;

  CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
  CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);

  // Ignore void, and a few other un-sizeable types.
  if (typeSize.isZero())
    return;

  if (regionSize % typeSize == 0)
    return;

  if (evenFlexibleArraySize(Ctx, regionSize, typeSize, ToPointeeTy))
    return;

  if (ExplodedNode *errorNode = C.generateErrorNode()) {
    if (!BT)
      BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
                                    "Cast a region whose size is not a multiple"
                                    " of the destination type size."));
    auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), errorNode);
    R->addRange(CE->getSourceRange());
    C.emitReport(std::move(R));
  }
}
Пример #9
0
DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
  ASTContext &Ctx = svalBuilder.getContext();
  QualType T = getDesugaredValueType(Ctx);

  if (isa<VariableArrayType>(T))
    return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
  if (T->isIncompleteType())
    return UnknownVal();

  CharUnits size = Ctx.getTypeSizeInChars(T);
  QualType sizeTy = svalBuilder.getArrayIndexType();
  return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
}
Пример #10
0
/// Compute a raw byte offset from a base region.  Used for array bounds
/// checking.
RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
                                                   SValBuilder &svalBuilder,
                                                   SVal location)
{
  const MemRegion *region = location.getAsRegion();
  SVal offset = UndefinedVal();
  
  while (region) {
    switch (region->getKind()) {
      default: {
        if (const SubRegion *subReg = dyn_cast<SubRegion>(region)) {
          offset = getValue(offset, svalBuilder);
          if (!offset.isUnknownOrUndef())
            return RegionRawOffsetV2(subReg, offset);
        }
        return RegionRawOffsetV2();
      }
      case MemRegion::ElementRegionKind: {
        const ElementRegion *elemReg = cast<ElementRegion>(region);
        SVal index = elemReg->getIndex();
        if (!index.getAs<NonLoc>())
          return RegionRawOffsetV2();
        QualType elemType = elemReg->getElementType();
        // If the element is an incomplete type, go no further.
        ASTContext &astContext = svalBuilder.getContext();
        if (elemType->isIncompleteType())
          return RegionRawOffsetV2();
        
        // Update the offset.
        offset = addValue(state,
                          getValue(offset, svalBuilder),
                          scaleValue(state,
                          index.castAs<NonLoc>(),
                          astContext.getTypeSizeInChars(elemType),
                          svalBuilder),
                          svalBuilder);

        if (offset.isUnknownOrUndef())
          return RegionRawOffsetV2();

        region = elemReg->getSuperRegion();
        continue;
      }
    }
  }
  return RegionRawOffsetV2();
}
Пример #11
0
// Based on QualType::isTrivial.
bool isTriviallyDefaultConstructible(QualType Type, const ASTContext &Context) {
  if (Type.isNull())
    return false;

  if (Type->isArrayType())
    return isTriviallyDefaultConstructible(Context.getBaseElementType(Type),
                                           Context);

  // Return false for incomplete types after skipping any incomplete array
  // types which are expressly allowed by the standard and thus our API.
  if (Type->isIncompleteType())
    return false;

  if (Context.getLangOpts().ObjCAutoRefCount) {
    switch (Type.getObjCLifetime()) {
    case Qualifiers::OCL_ExplicitNone:
      return true;

    case Qualifiers::OCL_Strong:
    case Qualifiers::OCL_Weak:
    case Qualifiers::OCL_Autoreleasing:
      return false;

    case Qualifiers::OCL_None:
      if (Type->isObjCLifetimeType())
        return false;
      break;
    }
  }

  QualType CanonicalType = Type.getCanonicalType();
  if (CanonicalType->isDependentType())
    return false;

  // As an extension, Clang treats vector types as Scalar types.
  if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
    return true;

  if (const auto *RT = CanonicalType->getAs<RecordType>()) {
    return recordIsTriviallyDefaultConstructible(*RT->getDecl(), Context);
  }

  // No other types can match.
  return false;
}
Пример #12
0
long long clang_Type_getOffsetOf(CXType PT, const char *S) {
  // check that PT is not incomplete/dependent
  CXCursor PC = clang_getTypeDeclaration(PT);
  if (clang_isInvalid(PC.kind))
    return CXTypeLayoutError_Invalid;
  const RecordDecl *RD =
        dyn_cast_or_null<RecordDecl>(cxcursor::getCursorDecl(PC));
  if (!RD || RD->isInvalidDecl())
    return CXTypeLayoutError_Invalid;
  RD = RD->getDefinition();
  if (!RD)
    return CXTypeLayoutError_Incomplete;
  if (RD->isInvalidDecl())
    return CXTypeLayoutError_Invalid;
  QualType RT = GetQualType(PT);
  if (RT->isIncompleteType())
    return CXTypeLayoutError_Incomplete;
  if (RT->isDependentType())
    return CXTypeLayoutError_Dependent;
  // We recurse into all record fields to detect incomplete and dependent types.
  long long Error = visitRecordForValidation(RD);
  if (Error < 0)
    return Error;
  if (!S)
    return CXTypeLayoutError_InvalidFieldName;
  // lookup field
  ASTContext &Ctx = cxtu::getASTUnit(GetTU(PT))->getASTContext();
  IdentifierInfo *II = &Ctx.Idents.get(S);
  DeclarationName FieldName(II);
  RecordDecl::lookup_const_result Res = RD->lookup(FieldName);
  // If a field of the parent record is incomplete, lookup will fail.
  // and we would return InvalidFieldName instead of Incomplete.
  // But this erroneous results does protects again a hidden assertion failure
  // in the RecordLayoutBuilder
  if (Res.size() != 1)
    return CXTypeLayoutError_InvalidFieldName;
  if (const FieldDecl *FD = dyn_cast<FieldDecl>(Res.front()))
    return Ctx.getFieldOffset(FD);
  if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(Res.front()))
    return Ctx.getFieldOffset(IFD);
  // we don't want any other Decl Type.
  return CXTypeLayoutError_InvalidFieldName;
}
Пример #13
0
long long clang_Type_getAlignOf(CXType T) {
  if (T.kind == CXType_Invalid)
    return CXTypeLayoutError_Invalid;
  ASTContext &Ctx = cxtu::getASTUnit(GetTU(T))->getASTContext();
  QualType QT = GetQualType(T);
  // [expr.alignof] p1: return size_t value for complete object type, reference
  //                    or array.
  // [expr.alignof] p3: if reference type, return size of referenced type
  if (QT->isReferenceType())
    QT = QT.getNonReferenceType();
  if (QT->isIncompleteType())
    return CXTypeLayoutError_Incomplete;
  if (QT->isDependentType())
    return CXTypeLayoutError_Dependent;
  // Exceptions by GCC extension - see ASTContext.cpp:1313 getTypeInfoImpl
  // if (QT->isFunctionType()) return 4; // Bug #15511 - should be 1
  // if (QT->isVoidType()) return 1;
  return Ctx.getTypeAlignInChars(QT).getQuantity();
}
Пример #14
0
static long long visitRecordForValidation(const RecordDecl *RD) {
  for (const auto *I : RD->fields()){
    QualType FQT = I->getType();
    if (FQT->isIncompleteType())
      return CXTypeLayoutError_Incomplete;
    if (FQT->isDependentType())
      return CXTypeLayoutError_Dependent;
    // recurse
    if (const RecordType *ChildType = I->getType()->getAs<RecordType>()) {
      if (const RecordDecl *Child = ChildType->getDecl()) {
        long long ret = visitRecordForValidation(Child);
        if (ret < 0)
          return ret;
      }
    }
    // else try next field
  }
  return 0;
}
Пример #15
0
void ExprEngine::
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
                              ExplodedNode *Pred,
                              ExplodedNodeSet &Dst) {
  // FIXME: Prechecks eventually go in ::Visit().
  ExplodedNodeSet CheckedSet;
  getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, Ex, *this);

  ExplodedNodeSet EvalSet;
  StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);

  QualType T = Ex->getTypeOfArgument();

  for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
       I != E; ++I) {
    if (Ex->getKind() == UETT_SizeOf) {
      if (!T->isIncompleteType() && !T->isConstantSizeType()) {
        assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");

        // FIXME: Add support for VLA type arguments and VLA expressions.
        // When that happens, we should probably refactor VLASizeChecker's code.
        continue;
      } else if (T->getAs<ObjCObjectType>()) {
        // Some code tries to take the sizeof an ObjCObjectType, relying that
        // the compiler has laid out its representation.  Just report Unknown
        // for these.
        continue;
      }
    }

    APSInt Value = Ex->EvaluateKnownConstInt(getContext());
    CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());

    ProgramStateRef state = (*I)->getState();
    state = state->BindExpr(Ex, (*I)->getLocationContext(),
                            svalBuilder.makeIntVal(amt.getQuantity(),
                                                   Ex->getType()));
    Bldr.generateNode(Ex, *I, state);
  }

  getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
}
Пример #16
0
static long long visitRecordForValidation(const RecordDecl *RD) {
  for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
       I != E; ++I){
    QualType FQT = (*I)->getType();
    if (FQT->isIncompleteType())
      return CXTypeLayoutError_Incomplete;
    if (FQT->isDependentType())
      return CXTypeLayoutError_Dependent;
    // recurse
    if (const RecordType *ChildType = (*I)->getType()->getAs<RecordType>()) {
      if (const RecordDecl *Child = ChildType->getDecl()) {
        long long ret = visitRecordForValidation(Child);
        if (ret < 0)
          return ret;
      }
    }
    // else try next field
  }
  return 0;
}
Пример #17
0
RegionRawOffset ElementRegion::getAsArrayOffset() const {
  CharUnits offset = CharUnits::Zero();
  const ElementRegion *ER = this;
  const MemRegion *superR = nullptr;
  ASTContext &C = getContext();

  // FIXME: Handle multi-dimensional arrays.

  while (ER) {
    superR = ER->getSuperRegion();

    // FIXME: generalize to symbolic offsets.
    SVal index = ER->getIndex();
    if (Optional<nonloc::ConcreteInt> CI = index.getAs<nonloc::ConcreteInt>()) {
      // Update the offset.
      int64_t i = CI->getValue().getSExtValue();

      if (i != 0) {
        QualType elemType = ER->getElementType();

        // If we are pointing to an incomplete type, go no further.
        if (elemType->isIncompleteType()) {
          superR = ER;
          break;
        }

        CharUnits size = C.getTypeSizeInChars(elemType);
        offset += (i * size);
      }

      // Go to the next ElementRegion (if any).
      ER = dyn_cast<ElementRegion>(superR);
      continue;
    }

    return nullptr;
  }

  assert(superR && "super region cannot be NULL");
  return RegionRawOffset(superR, offset);
}
Пример #18
0
const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
  ASTContext &Ctx = StateMgr.getContext();

  // Handle casts to Objective-C objects.
  if (CastToTy->isObjCObjectPointerType())
    return R->StripCasts();

  if (CastToTy->isBlockPointerType()) {
    // FIXME: We may need different solutions, depending on the symbol
    // involved.  Blocks can be casted to/from 'id', as they can be treated
    // as Objective-C objects.  This could possibly be handled by enhancing
    // our reasoning of downcasts of symbolic objects.
    if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
      return R;

    // We don't know what to make of it.  Return a NULL region, which
    // will be interpreted as UnknownVal.
    return nullptr;
  }

  // Now assume we are casting from pointer to pointer. Other cases should
  // already be handled.
  QualType PointeeTy = CastToTy->getPointeeType();
  QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);

  // Handle casts to void*.  We just pass the region through.
  if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
    return R;

  // Handle casts from compatible types.
  if (R->isBoundable())
    if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
      QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
      if (CanonPointeeTy == ObjTy)
        return R;
    }

  // Process region cast according to the kind of the region being cast.
  switch (R->getKind()) {
    case MemRegion::CXXThisRegionKind:
    case MemRegion::CodeSpaceRegionKind:
    case MemRegion::StackLocalsSpaceRegionKind:
    case MemRegion::StackArgumentsSpaceRegionKind:
    case MemRegion::HeapSpaceRegionKind:
    case MemRegion::UnknownSpaceRegionKind:
    case MemRegion::StaticGlobalSpaceRegionKind:
    case MemRegion::GlobalInternalSpaceRegionKind:
    case MemRegion::GlobalSystemSpaceRegionKind:
    case MemRegion::GlobalImmutableSpaceRegionKind: {
      llvm_unreachable("Invalid region cast");
    }

    case MemRegion::FunctionCodeRegionKind:
    case MemRegion::BlockCodeRegionKind:
    case MemRegion::BlockDataRegionKind:
    case MemRegion::StringRegionKind:
      // FIXME: Need to handle arbitrary downcasts.
    case MemRegion::SymbolicRegionKind:
    case MemRegion::AllocaRegionKind:
    case MemRegion::CompoundLiteralRegionKind:
    case MemRegion::FieldRegionKind:
    case MemRegion::ObjCIvarRegionKind:
    case MemRegion::ObjCStringRegionKind:
    case MemRegion::VarRegionKind:
    case MemRegion::CXXTempObjectRegionKind:
    case MemRegion::CXXBaseObjectRegionKind:
    case MemRegion::CXXDerivedObjectRegionKind:
      return MakeElementRegion(cast<SubRegion>(R), PointeeTy);

    case MemRegion::ElementRegionKind: {
      // If we are casting from an ElementRegion to another type, the
      // algorithm is as follows:
      //
      // (1) Compute the "raw offset" of the ElementRegion from the
      //     base region.  This is done by calling 'getAsRawOffset()'.
      //
      // (2a) If we get a 'RegionRawOffset' after calling
      //      'getAsRawOffset()', determine if the absolute offset
      //      can be exactly divided into chunks of the size of the
      //      casted-pointee type.  If so, create a new ElementRegion with
      //      the pointee-cast type as the new ElementType and the index
      //      being the offset divded by the chunk size.  If not, create
      //      a new ElementRegion at offset 0 off the raw offset region.
      //
      // (2b) If we don't a get a 'RegionRawOffset' after calling
      //      'getAsRawOffset()', it means that we are at offset 0.
      //
      // FIXME: Handle symbolic raw offsets.

      const ElementRegion *elementR = cast<ElementRegion>(R);
      const RegionRawOffset &rawOff = elementR->getAsArrayOffset();
      const MemRegion *baseR = rawOff.getRegion();

      // If we cannot compute a raw offset, throw up our hands and return
      // a NULL MemRegion*.
      if (!baseR)
        return nullptr;

      CharUnits off = rawOff.getOffset();

      if (off.isZero()) {
        // Edge case: we are at 0 bytes off the beginning of baseR.  We
        // check to see if type we are casting to is the same as the base
        // region.  If so, just return the base region.
        if (const auto *TR = dyn_cast<TypedValueRegion>(baseR)) {
          QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
          QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
          if (CanonPointeeTy == ObjTy)
            return baseR;
        }

        // Otherwise, create a new ElementRegion at offset 0.
        return MakeElementRegion(cast<SubRegion>(baseR), PointeeTy);
      }

      // We have a non-zero offset from the base region.  We want to determine
      // if the offset can be evenly divided by sizeof(PointeeTy).  If so,
      // we create an ElementRegion whose index is that value.  Otherwise, we
      // create two ElementRegions, one that reflects a raw offset and the other
      // that reflects the cast.

      // Compute the index for the new ElementRegion.
      int64_t newIndex = 0;
      const MemRegion *newSuperR = nullptr;

      // We can only compute sizeof(PointeeTy) if it is a complete type.
      if (!PointeeTy->isIncompleteType()) {
        // Compute the size in **bytes**.
        CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
        if (!pointeeTySize.isZero()) {
          // Is the offset a multiple of the size?  If so, we can layer the
          // ElementRegion (with elementType == PointeeTy) directly on top of
          // the base region.
          if (off % pointeeTySize == 0) {
            newIndex = off / pointeeTySize;
            newSuperR = baseR;
          }
        }
      }

      if (!newSuperR) {
        // Create an intermediate ElementRegion to represent the raw byte.
        // This will be the super region of the final ElementRegion.
        newSuperR = MakeElementRegion(cast<SubRegion>(baseR), Ctx.CharTy,
                                      off.getQuantity());
      }

      return MakeElementRegion(cast<SubRegion>(newSuperR), PointeeTy, newIndex);
    }
  }

  llvm_unreachable("unreachable");
}
Пример #19
0
RegionOffset MemRegion::getAsOffset() const {
  const MemRegion *R = this;
  const MemRegion *SymbolicOffsetBase = nullptr;
  int64_t Offset = 0;

  while (1) {
    switch (R->getKind()) {
    case CodeSpaceRegionKind:
    case StackLocalsSpaceRegionKind:
    case StackArgumentsSpaceRegionKind:
    case HeapSpaceRegionKind:
    case UnknownSpaceRegionKind:
    case StaticGlobalSpaceRegionKind:
    case GlobalInternalSpaceRegionKind:
    case GlobalSystemSpaceRegionKind:
    case GlobalImmutableSpaceRegionKind:
      // Stores can bind directly to a region space to set a default value.
      assert(Offset == 0 && !SymbolicOffsetBase);
      goto Finish;

    case FunctionCodeRegionKind:
    case BlockCodeRegionKind:
    case BlockDataRegionKind:
      // These will never have bindings, but may end up having values requested
      // if the user does some strange casting.
      if (Offset != 0)
        SymbolicOffsetBase = R;
      goto Finish;

    case SymbolicRegionKind:
    case AllocaRegionKind:
    case CompoundLiteralRegionKind:
    case CXXThisRegionKind:
    case StringRegionKind:
    case ObjCStringRegionKind:
    case VarRegionKind:
    case CXXTempObjectRegionKind:
      // Usual base regions.
      goto Finish;

    case ObjCIvarRegionKind:
      // This is a little strange, but it's a compromise between
      // ObjCIvarRegions having unknown compile-time offsets (when using the
      // non-fragile runtime) and yet still being distinct, non-overlapping
      // regions. Thus we treat them as "like" base regions for the purposes
      // of computing offsets.
      goto Finish;

    case CXXBaseObjectRegionKind: {
      const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
      R = BOR->getSuperRegion();

      QualType Ty;
      bool RootIsSymbolic = false;
      if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
        Ty = TVR->getDesugaredValueType(getContext());
      } else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
        // If our base region is symbolic, we don't know what type it really is.
        // Pretend the type of the symbol is the true dynamic type.
        // (This will at least be self-consistent for the life of the symbol.)
        Ty = SR->getSymbol()->getType()->getPointeeType();
        RootIsSymbolic = true;
      }

      const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
      if (!Child) {
        // We cannot compute the offset of the base class.
        SymbolicOffsetBase = R;
      } else {
        if (RootIsSymbolic) {
          // Base layers on symbolic regions may not be type-correct.
          // Double-check the inheritance here, and revert to a symbolic offset
          // if it's invalid (e.g. due to a reinterpret_cast).
          if (BOR->isVirtual()) {
            if (!Child->isVirtuallyDerivedFrom(BOR->getDecl()))
              SymbolicOffsetBase = R;
          } else {
            if (!isImmediateBase(Child, BOR->getDecl()))
              SymbolicOffsetBase = R;
          }
        }
      }

      // Don't bother calculating precise offsets if we already have a
      // symbolic offset somewhere in the chain.
      if (SymbolicOffsetBase)
        continue;

      CharUnits BaseOffset;
      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);
      if (BOR->isVirtual())
        BaseOffset = Layout.getVBaseClassOffset(BOR->getDecl());
      else
        BaseOffset = Layout.getBaseClassOffset(BOR->getDecl());

      // The base offset is in chars, not in bits.
      Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
      break;
    }
    case ElementRegionKind: {
      const ElementRegion *ER = cast<ElementRegion>(R);
      R = ER->getSuperRegion();

      QualType EleTy = ER->getValueType();
      if (EleTy->isIncompleteType()) {
        // We cannot compute the offset of the base class.
        SymbolicOffsetBase = R;
        continue;
      }

      SVal Index = ER->getIndex();
      if (Optional<nonloc::ConcreteInt> CI =
              Index.getAs<nonloc::ConcreteInt>()) {
        // Don't bother calculating precise offsets if we already have a
        // symbolic offset somewhere in the chain.
        if (SymbolicOffsetBase)
          continue;

        int64_t i = CI->getValue().getSExtValue();
        // This type size is in bits.
        Offset += i * getContext().getTypeSize(EleTy);
      } else {
        // We cannot compute offset for non-concrete index.
        SymbolicOffsetBase = R;
      }
      break;
    }
    case FieldRegionKind: {
      const FieldRegion *FR = cast<FieldRegion>(R);
      R = FR->getSuperRegion();

      const RecordDecl *RD = FR->getDecl()->getParent();
      if (RD->isUnion() || !RD->isCompleteDefinition()) {
        // We cannot compute offset for incomplete type.
        // For unions, we could treat everything as offset 0, but we'd rather
        // treat each field as a symbolic offset so they aren't stored on top
        // of each other, since we depend on things in typed regions actually
        // matching their types.
        SymbolicOffsetBase = R;
      }

      // Don't bother calculating precise offsets if we already have a
      // symbolic offset somewhere in the chain.
      if (SymbolicOffsetBase)
        continue;

      // Get the field number.
      unsigned idx = 0;
      for (RecordDecl::field_iterator FI = RD->field_begin(),
             FE = RD->field_end(); FI != FE; ++FI, ++idx) {
        if (FR->getDecl() == *FI)
          break;
      }
      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
      // This is offset in bits.
      Offset += Layout.getFieldOffset(idx);
      break;
    }
    }
  }

 Finish:
  if (SymbolicOffsetBase)
    return RegionOffset(SymbolicOffsetBase, RegionOffset::Symbolic);
  return RegionOffset(R, Offset);
}