Exemple #1
0
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
                                               const ObjCInterfaceDecl *OID,
                                               llvm::Value *BaseValue,
                                               const ObjCIvarDecl *Ivar,
                                               unsigned CVRQualifiers,
                                               llvm::Value *Offset) {
  // Compute (type*) ( (char *) BaseValue + Offset)
  QualType IvarTy = Ivar->getType();
  llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
  llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
  V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");

  if (!Ivar->isBitField()) {
    V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
    LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
    LV.getQuals().addCVRQualifiers(CVRQualifiers);
    return LV;
  }

  // We need to compute an access strategy for this bit-field. We are given the
  // offset to the first byte in the bit-field, the sub-byte offset is taken
  // from the original layout. We reuse the normal bit-field access strategy by
  // treating this as an access to a struct where the bit-field is in byte 0,
  // and adjust the containing type size as appropriate.
  //
  // FIXME: Note that currently we make a very conservative estimate of the
  // alignment of the bit-field, because (a) it is not clear what guarantees the
  // runtime makes us, and (b) we don't have a way to specify that the struct is
  // at an alignment plus offset.
  //
  // Note, there is a subtle invariant here: we can only call this routine on
  // non-synthesized ivars but we may be called for synthesized ivars.  However,
  // a synthesized ivar can never be a bit-field, so this is safe.
  uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
  uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
  uint64_t AlignmentBits = CGF.CGM.getContext().getTargetInfo().getCharAlign();
  uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
  CharUnits StorageSize =
    CGF.CGM.getContext().toCharUnitsFromBits(
      llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits));
  CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);

  // Allocate a new CGBitFieldInfo object to describe this access.
  //
  // FIXME: This is incredibly wasteful, these should be uniqued or part of some
  // layout object. However, this is blocked on other cleanups to the
  // Objective-C code, so for now we just live with allocating a bunch of these
  // objects.
  CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
    CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
                             CGF.CGM.getContext().toBits(StorageSize),
                             Alignment.getQuantity()));

  V = CGF.Builder.CreateBitCast(V,
                                llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
                                                         Info->StorageSize));
  return LValue::MakeBitfield(V, *Info,
                              IvarTy.withCVRQualifiers(CVRQualifiers),
                              Alignment);
}
void SlicingCheck::check(const MatchFinder::MatchResult &Result) {
  const auto *BaseDecl = Result.Nodes.getNodeAs<CXXRecordDecl>("BaseDecl");
  const auto *DerivedDecl =
      Result.Nodes.getNodeAs<CXXRecordDecl>("DerivedDecl");
  const auto *Call = Result.Nodes.getNodeAs<Expr>("Call");
  assert(BaseDecl != nullptr);
  assert(DerivedDecl != nullptr);
  assert(Call != nullptr);

  // Warn when slicing the vtable.
  // We're looking through all the methods in the derived class and see if they
  // override some methods in the base class.
  // It's not enough to just test whether the class is polymorphic because we
  // would be fine slicing B to A if no method in B (or its bases) overrides
  // anything in A:
  //   class A { virtual void f(); };
  //   class B : public A {};
  // because in that case calling A::f is the same as calling B::f.
  DiagnoseSlicedOverriddenMethods(*Call, *DerivedDecl, *BaseDecl);

  // Warn when slicing member variables.
  const auto &BaseLayout =
      BaseDecl->getASTContext().getASTRecordLayout(BaseDecl);
  const auto &DerivedLayout =
      DerivedDecl->getASTContext().getASTRecordLayout(DerivedDecl);
  const CharUnits StateSize =
      DerivedLayout.getDataSize() - BaseLayout.getDataSize();
  if (StateSize.isPositive()) {
    diag(Call->getExprLoc(), "slicing object from type %0 to %1 discards "
                             "%2 bytes of state")
        << DerivedDecl << BaseDecl << static_cast<int>(StateSize.getQuantity());
  }
}
void ExprEngine::
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
                              ExplodedNode *Pred,
                              ExplodedNodeSet &Dst) {
  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);

  QualType T = Ex->getTypeOfArgument();
  
  if (Ex->getKind() == UETT_SizeOf) {
    if (!T->isIncompleteType() && !T->isConstantSizeType()) {
      assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
      
      // FIXME: Add support for VLA type arguments and VLA expressions.
      // When that happens, we should probably refactor VLASizeChecker's code.
      return;
    }
    else if (T->getAs<ObjCObjectType>()) {
      // Some code tries to take the sizeof an ObjCObjectType, relying that
      // the compiler has laid out its representation.  Just report Unknown
      // for these.
      return;
    }
  }
  
  APSInt Value = Ex->EvaluateKnownConstInt(getContext());
  CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
  
  ProgramStateRef state = Pred->getState();
  state = state->BindExpr(Ex, Pred->getLocationContext(),
                          svalBuilder.makeIntVal(amt.getQuantity(),
                                                     Ex->getType()));
  Bldr.generateNode(Ex, Pred, state);
}
void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
                                         CharUnits begin, CharUnits end) {
  // Require the type to be naturally aligned.
  if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {

    // Try splitting vector types.
    if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
      auto split = splitLegalVectorType(CGM, end - begin, vecTy);
      auto eltTy = split.first;
      auto numElts = split.second;

      auto eltSize = (end - begin) / numElts;
      assert(eltSize == getTypeStoreSize(CGM, eltTy));
      for (size_t i = 0, e = numElts; i != e; ++i) {
        addLegalTypedData(eltTy, begin, begin + eltSize);
        begin += eltSize;
      }
      assert(begin == end);
      return;
    }

    return addOpaqueData(begin, end);
  }

  addEntry(type, begin, end);
}
llvm::Type *
CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
                                        const ASTRecordLayout &Layout) {
  Fields[Field] = 0;
  if (Field->isBitField()) {
    uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());

    // Ignore zero sized bit fields.
    if (FieldSize == 0)
      return 0;

    unsigned StorageBits = llvm::RoundUpToAlignment(
      FieldSize, Types.getTarget().getCharAlign());
    CharUnits NumBytesToAppend
      = Types.getContext().toCharUnitsFromBits(StorageBits);

    llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
    if (NumBytesToAppend > CharUnits::One())
      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());

    // Add the bit field info.
    BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize,
                                                StorageBits,
                                                Alignment.getQuantity());
    return FieldTy;
  }

  // This is a regular union field.
  return Types.ConvertTypeForMem(Field->getType());
}
void CGRecordLowering::insertPadding() {
  std::vector<std::pair<CharUnits, CharUnits> > Padding;
  CharUnits Size = CharUnits::Zero();
  for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
                                               MemberEnd = Members.end();
       Member != MemberEnd; ++Member) {
    if (!Member->Data)
      continue;
    CharUnits Offset = Member->Offset;
    assert(Offset >= Size);
    // Insert padding if we need to.
    if (Offset !=
        Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
      Padding.push_back(std::make_pair(Size, Offset - Size));
    Size = Offset + getSize(Member->Data);
  }
  if (Padding.empty())
    return;
  // Add the padding to the Members list and sort it.
  for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
        Pad = Padding.begin(), PadEnd = Padding.end();
        Pad != PadEnd; ++Pad)
    Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
  std::stable_sort(Members.begin(), Members.end());
}
llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
                                                  llvm::Value *NewPtr,
                                                  llvm::Value *NumElements,
                                                  const CXXNewExpr *expr,
                                                  QualType ElementType) {
  assert(NeedsArrayCookie(expr));

  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();

  ASTContext &Ctx = getContext();
  QualType SizeTy = Ctx.getSizeType();
  CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);

  // The size of the cookie.
  CharUnits CookieSize =
    std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));

  // Compute an offset to the cookie.
  llvm::Value *CookiePtr = NewPtr;
  CharUnits CookieOffset = CookieSize - SizeSize;
  if (!CookieOffset.isZero())
    CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
                                                 CookieOffset.getQuantity());

  // Write the number of elements into the appropriate slot.
  llvm::Value *NumElementsPtr
    = CGF.Builder.CreateBitCast(CookiePtr,
                                CGF.ConvertType(SizeTy)->getPointerTo(AS));
  CGF.Builder.CreateStore(NumElements, NumElementsPtr);

  // Finally, compute a pointer to the actual data buffer by skipping
  // over the cookie completely.
  return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
                                                CookieSize.getQuantity());  
}
llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
                                              llvm::Value *NewPtr,
                                              llvm::Value *NumElements,
                                              const CXXNewExpr *expr,
                                              QualType ElementType) {
  assert(NeedsArrayCookie(expr));

  // NewPtr is a char*.

  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();

  ASTContext &Ctx = getContext();
  CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
  llvm::IntegerType *SizeTy =
    cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));

  // The cookie is always at the start of the buffer.
  llvm::Value *CookiePtr = NewPtr;

  // The first element is the element size.
  CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS));
  llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy,
                          Ctx.getTypeSizeInChars(ElementType).getQuantity());
  CGF.Builder.CreateStore(ElementSize, CookiePtr);

  // The second element is the element count.
  CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1);
  CGF.Builder.CreateStore(NumElements, CookiePtr);

  // Finally, compute a pointer to the actual data buffer by skipping
  // over the cookie completely.
  CharUnits CookieSize = 2 * SizeSize;
  return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
                                                CookieSize.getQuantity());
}
Exemple #9
0
RegionOffset MemRegion::getAsOffset() const {
    const MemRegion *R = this;
    int64_t Offset = 0;

    while (1) {
        switch (R->getKind()) {
        default:
            return RegionOffset(0);
        case SymbolicRegionKind:
        case AllocaRegionKind:
        case CompoundLiteralRegionKind:
        case CXXThisRegionKind:
        case StringRegionKind:
        case VarRegionKind:
        case CXXTempObjectRegionKind:
            goto Finish;
        case ElementRegionKind: {
            const ElementRegion *ER = cast<ElementRegion>(R);
            QualType EleTy = ER->getValueType();

            if (!IsCompleteType(getContext(), EleTy))
                return RegionOffset(0);

            SVal Index = ER->getIndex();
            if (const nonloc::ConcreteInt *CI=dyn_cast<nonloc::ConcreteInt>(&Index)) {
                int64_t i = CI->getValue().getSExtValue();
                CharUnits Size = getContext().getTypeSizeInChars(EleTy);
                Offset += i * Size.getQuantity() * 8;
            } else {
                // We cannot compute offset for non-concrete index.
                return RegionOffset(0);
            }
            R = ER->getSuperRegion();
            break;
        }
        case FieldRegionKind: {
            const FieldRegion *FR = cast<FieldRegion>(R);
            const RecordDecl *RD = FR->getDecl()->getParent();
            if (!RD->isCompleteDefinition())
                // We cannot compute offset for incomplete type.
                return RegionOffset(0);
            // Get the field number.
            unsigned idx = 0;
            for (RecordDecl::field_iterator FI = RD->field_begin(),
                    FE = RD->field_end(); FI != FE; ++FI, ++idx)
                if (FR->getDecl() == *FI)
                    break;

            const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
            // This is offset in bits.
            Offset += Layout.getFieldOffset(idx);
            R = FR->getSuperRegion();
            break;
        }
        }
    }

Finish:
    return RegionOffset(R, Offset);
}
Exemple #10
0
void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
                                               FunctionArgList &Args) {
  // Emit a call to cudaSetupArgument for each arg in Args.
  llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
  CharUnits Offset = CharUnits::Zero();
  for (const VarDecl *A : Args) {
    CharUnits TyWidth, TyAlign;
    std::tie(TyWidth, TyAlign) =
        CGM.getContext().getTypeInfoInChars(A->getType());
    Offset = Offset.alignTo(TyAlign);
    llvm::Value *Args[] = {
        CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
                                      VoidPtrTy),
        llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
        llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
    };
    llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
    llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
    llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
    llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
    CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
    CGF.EmitBlock(NextBlock);
    Offset += TyWidth;
  }

  // Emit the call to cudaLaunch
  llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
  llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
  CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
  CGF.EmitBranch(EndBlock);

  CGF.EmitBlock(EndBlock);
}
Exemple #11
0
llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
                                                  CharUnits ThisAdjustment) {
  assert(MD->isInstance() && "Member function must not be static!");
  MD = MD->getCanonicalDecl();

  CodeGenTypes &Types = CGM.getTypes();
  llvm::Type *ptrdiff_t = getPtrDiffTy();

  // Get the function pointer (or index if this is a virtual function).
  llvm::Constant *MemPtr[2];
  if (MD->isVirtual()) {
    uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD);

    const ASTContext &Context = getContext();
    CharUnits PointerWidth =
      Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
    uint64_t VTableOffset = (Index * PointerWidth.getQuantity());

    if (IsARM) {
      // ARM C++ ABI 3.2.1:
      //   This ABI specifies that adj contains twice the this
      //   adjustment, plus 1 if the member function is virtual. The
      //   least significant bit of adj then makes exactly the same
      //   discrimination as the least significant bit of ptr does for
      //   Itanium.
      MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
      MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
                                         2 * ThisAdjustment.getQuantity() + 1);
    } else {
      // Itanium C++ ABI 2.3:
      //   For a virtual function, [the pointer field] is 1 plus the
      //   virtual table offset (in bytes) of the function,
      //   represented as a ptrdiff_t.
      MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
      MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
                                         ThisAdjustment.getQuantity());
    }
  } else {
    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
    llvm::Type *Ty;
    // Check whether the function has a computable LLVM signature.
    if (Types.isFuncTypeConvertible(FPT)) {
      // The function has a computable LLVM signature; use the correct type.
      Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
    } else {
      // Use an arbitrary non-function type to tell GetAddrOfFunction that the
      // function type is incomplete.
      Ty = ptrdiff_t;
    }
    llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);

    MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
    MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, (IsARM ? 2 : 1) *
                                       ThisAdjustment.getQuantity());
  }

  return llvm::ConstantStruct::getAnon(MemPtr);
}
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
// it.
llvm::Value *
MicrosoftCXXABI::AdjustVirtualBase(CodeGenFunction &CGF,
                                   const CXXRecordDecl *RD, llvm::Value *Base,
                                   llvm::Value *VirtualBaseAdjustmentOffset,
                                   llvm::Value *VBPtrOffset) {
  CGBuilderTy &Builder = CGF.Builder;
  Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy);
  llvm::BasicBlock *OriginalBB = 0;
  llvm::BasicBlock *SkipAdjustBB = 0;
  llvm::BasicBlock *VBaseAdjustBB = 0;

  // In the unspecified inheritance model, there might not be a vbtable at all,
  // in which case we need to skip the virtual base lookup.  If there is a
  // vbtable, the first entry is a no-op entry that gives back the original
  // base, so look for a virtual base adjustment offset of zero.
  if (VBPtrOffset) {
    OriginalBB = Builder.GetInsertBlock();
    VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust");
    SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust");
    llvm::Value *IsVirtual =
      Builder.CreateICmpNE(VirtualBaseAdjustmentOffset, getZeroInt(),
                           "memptr.is_vbase");
    Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB);
    CGF.EmitBlock(VBaseAdjustBB);
  }

  // If we weren't given a dynamic vbptr offset, RD should be complete and we'll
  // know the vbptr offset.
  if (!VBPtrOffset) {
    CharUnits offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
    VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
  }
  // Load the vbtable pointer from the vbtable offset in the instance.
  llvm::Value *VBPtr =
    Builder.CreateInBoundsGEP(Base, VBPtrOffset, "memptr.vbptr");
  llvm::Value *VBTable =
    Builder.CreateBitCast(VBPtr, CGM.Int8PtrTy->getPointerTo(0));
  VBTable = Builder.CreateLoad(VBTable, "memptr.vbtable");
  // Load an i32 offset from the vb-table.
  llvm::Value *VBaseOffs =
    Builder.CreateInBoundsGEP(VBTable, VirtualBaseAdjustmentOffset);
  VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
  VBaseOffs = Builder.CreateLoad(VBaseOffs, "memptr.vbase_offs");
  // Add it to VBPtr.  GEP will sign extend the i32 value for us.
  llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);

  // Merge control flow with the case where we didn't have to adjust.
  if (VBaseAdjustBB) {
    Builder.CreateBr(SkipAdjustBB);
    CGF.EmitBlock(SkipAdjustBB);
    llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
    Phi->addIncoming(Base, OriginalBB);
    Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
    return Phi;
  }
  return AdjustedBase;
}
Exemple #13
0
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
                                unsigned &Offset, SourceLocation AsmLoc) {
  Offset = 0;
  SmallVector<StringRef, 2> Members;
  Member.split(Members, ".");

  LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
                          LookupOrdinaryName);

  if (!LookupName(BaseResult, getCurScope()))
    return true;
  
  if(!BaseResult.isSingleResult())
    return true;
  NamedDecl *FoundDecl = BaseResult.getFoundDecl();
  for (StringRef NextMember : Members) {
    const RecordType *RT = nullptr;
    if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
      RT = VD->getType()->getAs<RecordType>();
    else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
      MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
      RT = TD->getUnderlyingType()->getAs<RecordType>();
    } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
      RT = TD->getTypeForDecl()->getAs<RecordType>();
    else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl))
      RT = TD->getType()->getAs<RecordType>();
    if (!RT)
      return true;

    if (RequireCompleteType(AsmLoc, QualType(RT, 0),
                            diag::err_asm_incomplete_type))
      return true;

    LookupResult FieldResult(*this, &Context.Idents.get(NextMember),
                             SourceLocation(), LookupMemberName);

    if (!LookupQualifiedName(FieldResult, RT->getDecl()))
      return true;

    if (!FieldResult.isSingleResult())
      return true;
    FoundDecl = FieldResult.getFoundDecl();

    // FIXME: Handle IndirectFieldDecl?
    FieldDecl *FD = dyn_cast<FieldDecl>(FoundDecl);
    if (!FD)
      return true;

    const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
    unsigned i = FD->getFieldIndex();
    CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
    Offset += (unsigned)Result.getQuantity();
  }

  return false;
}
Exemple #14
0
/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
///
/// The idea is that you do something like this:
///   RValue Result = EmitSomething(..., getReturnValueSlot());
///   EmitGCMove(E, Result);
/// If GC doesn't interfere, this will cause the result to be emitted
/// directly into the return value slot.  If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
  if (Dest.requiresGCollection()) {
    CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
    const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
    llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
                                                    Src.getAggregateAddr(),
                                                    SizeVal);
  }
}
void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
  const Expr *E = CE->getSubExpr();
  ASTContext &Ctx = C.getASTContext();
  QualType ToTy = Ctx.getCanonicalType(CE->getType());
  const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());

  if (!ToPTy)
    return;

  QualType ToPointeeTy = ToPTy->getPointeeType();

  // Only perform the check if 'ToPointeeTy' is a complete type.
  if (ToPointeeTy->isIncompleteType())
    return;

  ProgramStateRef state = C.getState();
  const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
  if (!R)
    return;

  const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
  if (!SR)
    return;

  SValBuilder &svalBuilder = C.getSValBuilder();
  SVal extent = SR->getExtent(svalBuilder);
  const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
  if (!extentInt)
    return;

  CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
  CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);

  // Ignore void, and a few other un-sizeable types.
  if (typeSize.isZero())
    return;

  if (regionSize % typeSize == 0)
    return;

  if (evenFlexibleArraySize(Ctx, regionSize, typeSize, ToPointeeTy))
    return;

  if (ExplodedNode *errorNode = C.generateErrorNode()) {
    if (!BT)
      BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
                                    "Cast a region whose size is not a multiple"
                                    " of the destination type size."));
    auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), errorNode);
    R->addRange(CE->getSourceRange());
    C.emitReport(std::move(R));
  }
}
Exemple #16
0
DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
  ASTContext &Ctx = svalBuilder.getContext();
  QualType T = getDesugaredValueType(Ctx);

  if (isa<VariableArrayType>(T))
    return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
  if (isa<IncompleteArrayType>(T))
    return UnknownVal();

  CharUnits size = Ctx.getTypeSizeInChars(T);
  QualType sizeTy = svalBuilder.getArrayIndexType();
  return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
}
// Scale a base value by a scaling factor, and return the scaled
// value as an SVal.  Used by 'computeOffset'.
static inline SVal scaleValue(ProgramStateRef state,
                              NonLoc baseVal, CharUnits scaling,
                              SValBuilder &sb) {
  return sb.evalBinOpNN(state, BO_Mul, baseVal,
                        sb.makeArrayIndex(scaling.getQuantity()),
                        sb.getArrayIndexType());
}
void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
  if (numBytes.isZero())
    return;

  // Append the padding field
  AppendField(NextFieldOffset, getByteArrayType(numBytes));
}
Exemple #19
0
void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
  const Expr *E = CE->getSubExpr();
  ASTContext &Ctx = C.getASTContext();
  QualType ToTy = Ctx.getCanonicalType(CE->getType());
  PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());

  if (!ToPTy)
    return;

  QualType ToPointeeTy = ToPTy->getPointeeType();

  const GRState *state = C.getState();
  const MemRegion *R = state->getSVal(E).getAsRegion();
  if (R == 0)
    return;

  const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
  if (SR == 0)
    return;

  ValueManager &ValMgr = C.getValueManager();
  SVal Extent = SR->getExtent(ValMgr);

  SValuator &SVator = ValMgr.getSValuator();
  const llvm::APSInt *ExtentInt = SVator.getKnownValue(state, Extent);
  if (!ExtentInt)
    return;

  CharUnits RegionSize = CharUnits::fromQuantity(ExtentInt->getSExtValue());
  CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
  
  // Ignore void, and a few other un-sizeable types.
  if (TypeSize.isZero())
    return;
  
  if (RegionSize % TypeSize != 0) {
    if (ExplodedNode *N = C.GenerateSink()) {
      if (!BT)
        BT = new BuiltinBug("Cast region with wrong size.",
                            "Cast a region whose size is not a multiple of the"
                            " destination type size.");
      RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
      R->addRange(CE->getSourceRange());
      C.EmitReport(R);
    }
  }
}
Exemple #20
0
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
                                                llvm::Value *allocPtr,
                                                CharUnits cookieSize) {
  // The element size is right-justified in the cookie.
  llvm::Value *numElementsPtr = allocPtr;
  CharUnits numElementsOffset =
    cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes);
  if (!numElementsOffset.isZero())
    numElementsPtr =
      CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
                                             numElementsOffset.getQuantity());

  unsigned AS = allocPtr->getType()->getPointerAddressSpace();
  numElementsPtr =
    CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
  return CGF.Builder.CreateLoad(numElementsPtr);
}
Exemple #21
0
/// Emit code to cause the variable at the given address to be considered as
/// constant from this point onwards.
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
                              llvm::Constant *Addr) {
  // Don't emit the intrinsic if we're not optimizing.
  if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
    return;

  // Grab the llvm.invariant.start intrinsic.
  llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
  llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);

  // Emit a call with the size in bytes of the object.
  CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
  uint64_t Width = WidthChars.getQuantity();
  llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
                           llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
  CGF.Builder.CreateCall(InvariantStart, Args);
}
Exemple #22
0
ConstantAddress CodeGenModule::getUPCFenceVar() {
  CharUnits Align = getContext().getTypeAlignInChars(getContext().IntTy);
  if (!UPCFenceVar) {
    llvm::GlobalVariable * GV =
      new llvm::GlobalVariable(getModule(), IntTy, false,
                               llvm::GlobalValue::LinkOnceODRLinkage,
                               llvm::ConstantInt::get(IntTy, 0),
                               "__upc_fence_var");

    if(isTargetDarwin())
      GV->setSection("__DATA,upc_shared");
    else
      GV->setSection("upc_shared");
    GV->setAlignment(Align.getQuantity());
    UPCFenceVar = GV;
  }
  return ConstantAddress(UPCFenceVar, Align);
}
Exemple #23
0
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
                                unsigned &Offset, SourceLocation AsmLoc) {
  Offset = 0;
  LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
                          LookupOrdinaryName);

  if (!LookupName(BaseResult, getCurScope()))
    return true;

  if (!BaseResult.isSingleResult())
    return true;

  const RecordType *RT = nullptr;
  NamedDecl *FoundDecl = BaseResult.getFoundDecl();
  if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
    RT = VD->getType()->getAs<RecordType>();
  else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl))
    RT = TD->getUnderlyingType()->getAs<RecordType>();
  else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
    RT = TD->getTypeForDecl()->getAs<RecordType>();
  if (!RT)
    return true;

  if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0))
    return true;

  LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(),
                           LookupMemberName);

  if (!LookupQualifiedName(FieldResult, RT->getDecl()))
    return true;

  // FIXME: Handle IndirectFieldDecl?
  FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
  if (!FD)
    return true;

  const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
  unsigned i = FD->getFieldIndex();
  CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
  Offset = (unsigned)Result.getQuantity();

  return false;
}
void VBTableInfo::EmitVBTableDefinition(
    CodeGenModule &CGM, const CXXRecordDecl *RD,
    llvm::GlobalVariable::LinkageTypes Linkage) const {
  assert(RD->getNumVBases() && ReusingBase->getNumVBases() &&
         "should only emit vbtables for classes with vbtables");

  const ASTRecordLayout &BaseLayout =
    CGM.getContext().getASTRecordLayout(VBPtrSubobject.getBase());
  const ASTRecordLayout &DerivedLayout =
    CGM.getContext().getASTRecordLayout(RD);

  SmallVector<llvm::Constant *, 4> Offsets;

  // The offset from ReusingBase's vbptr to itself always leads.
  CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
  Offsets.push_back(
      llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity()));

  // These are laid out in the same order as in Itanium, which is the same as
  // the order of the vbase iterator.
  for (CXXRecordDecl::base_class_const_iterator I = ReusingBase->vbases_begin(),
       E = ReusingBase->vbases_end(); I != E; ++I) {
    const CXXRecordDecl *VBase = I->getType()->getAsCXXRecordDecl();
    CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
    assert(!Offset.isNegative());
    // Make it relative to the subobject vbptr.
    Offset -= VBPtrSubobject.getBaseOffset() + VBPtrOffset;
    Offsets.push_back(llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity()));
  }

  assert(Offsets.size() ==
         cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
                               ->getElementType())->getNumElements());
  llvm::ArrayType *VBTableType =
    llvm::ArrayType::get(CGM.IntTy, Offsets.size());
  llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
  GV->setInitializer(Init);

  // Set the correct linkage.
  GV->setLinkage(Linkage);

  // Set the right visibility.
  CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForVTable);
}
void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
                                    llvm::Value *Ptr,
                                    const CXXDeleteExpr *expr,
                                    QualType ElementType,
                                    llvm::Value *&NumElements,
                                    llvm::Value *&AllocPtr,
                                    CharUnits &CookieSize) {
  // Derive a char* in the same address space as the pointer.
  unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
  llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);

  // If we don't need an array cookie, bail out early.
  if (!NeedsArrayCookie(expr, ElementType)) {
    AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
    NumElements = 0;
    CookieSize = CharUnits::Zero();
    return;
  }

  QualType SizeTy = getContext().getSizeType();
  CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
  llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
  
  CookieSize
    = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));

  CharUnits NumElementsOffset = CookieSize - SizeSize;

  // Compute the allocated pointer.
  AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
  AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
                                                    -CookieSize.getQuantity());

  llvm::Value *NumElementsPtr = AllocPtr;
  if (!NumElementsOffset.isZero())
    NumElementsPtr =
      CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
                                             NumElementsOffset.getQuantity());
  NumElementsPtr = 
    CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
  NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
}
Exemple #26
0
void ExprEngine::
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
                              ExplodedNode *Pred,
                              ExplodedNodeSet &Dst) {
  // FIXME: Prechecks eventually go in ::Visit().
  ExplodedNodeSet CheckedSet;
  getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, Ex, *this);

  ExplodedNodeSet EvalSet;
  StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);

  QualType T = Ex->getTypeOfArgument();

  for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
       I != E; ++I) {
    if (Ex->getKind() == UETT_SizeOf) {
      if (!T->isIncompleteType() && !T->isConstantSizeType()) {
        assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");

        // FIXME: Add support for VLA type arguments and VLA expressions.
        // When that happens, we should probably refactor VLASizeChecker's code.
        continue;
      } else if (T->getAs<ObjCObjectType>()) {
        // Some code tries to take the sizeof an ObjCObjectType, relying that
        // the compiler has laid out its representation.  Just report Unknown
        // for these.
        continue;
      }
    }

    APSInt Value = Ex->EvaluateKnownConstInt(getContext());
    CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());

    ProgramStateRef state = (*I)->getState();
    state = state->BindExpr(Ex, (*I)->getLocationContext(),
                            svalBuilder.makeIntVal(amt.getQuantity(),
                                                   Ex->getType()));
    Bldr.generateNode(Ex, *I, state);
  }

  getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
}
Exemple #27
0
void ElementRegion::dumpToStream(raw_ostream &os, int level) const {
#ifdef FSS_FORMAT
  ASTContext &C = getContext();
  QualType elemType = getElementType();
  CharUnits size = C.getTypeSizeInChars(elemType);

  if (Index.isZeroConstant() || size.getQuantity() == 0) {
    superRegion->dumpToStream(os, level + 1);
  }
  else {
    os << '('; 
    superRegion->dumpToStream(os, level + 1);
    os << " + "
       << '(' << Index << " * " << size.getQuantity() << "))";
  }
#else
  os << "element{" << superRegion << ','
     << Index << ',' << getElementType().getAsString() << '}';
#endif
}
ConstantAggregateBuilderBase::PlaceholderPosition
ConstantAggregateBuilderBase::addPlaceholderWithSize(llvm::Type *type) {
  // Bring the offset up to the last field.
  CharUnits offset = getNextOffsetFromGlobal();

  // Create the placeholder.
  auto position = addPlaceholder();

  // Advance the offset past that field.
  auto &layout = Builder.CGM.getDataLayout();
  if (!Packed)
    offset = offset.alignTo(CharUnits::fromQuantity(
                                layout.getABITypeAlignment(type)));
  offset += CharUnits::fromQuantity(layout.getTypeStoreSize(type));

  CachedOffsetEnd = Builder.Buffer.size();
  CachedOffsetFromGlobal = offset;

  return position;
}
Exemple #29
0
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
/// zeros in it, emit a memset and avoid storing the individual zeros.
///
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
                                     CodeGenFunction &CGF) {
  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
  // volatile stores.
  if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;

  // C++ objects with a user-declared constructor don't need zero'ing.
  if (CGF.getContext().getLangOptions().CPlusPlus)
    if (const RecordType *RT = CGF.getContext()
                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
      if (RD->hasUserDeclaredConstructor())
        return;
    }

  // If the type is 16-bytes or smaller, prefer individual stores over memset.
  std::pair<CharUnits, CharUnits> TypeInfo =
    CGF.getContext().getTypeInfoInChars(E->getType());
  if (TypeInfo.first <= CharUnits::fromQuantity(16))
    return;

  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
  // we prefer to emit memset + individual stores for the rest.
  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
  if (NumNonZeroBytes*4 > TypeInfo.first)
    return;
  
  // Okay, it seems like a good idea to use an initial memset, emit the call.
  llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
  CharUnits Align = TypeInfo.second;

  llvm::Value *Loc = Slot.getAddr();
  llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
  
  Loc = CGF.Builder.CreateBitCast(Loc, BP);
  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, 
                           Align.getQuantity(), false);
  
  // Tell the AggExprEmitter that the slot is known zero.
  Slot.setZeroed();
}
Exemple #30
0
/// Check if we are casting to a struct with a flexible array at the end.
/// \code
/// struct foo {
///   size_t len;
///   struct bar data[];
/// };
/// \endcode
/// or
/// \code
/// struct foo {
///   size_t len;
///   struct bar data[0];
/// }
/// \endcode
/// In these cases it is also valid to allocate size of struct foo + a multiple
/// of struct bar.
static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
                                  CharUnits TypeSize, QualType ToPointeeTy) {
  const RecordType *RT = ToPointeeTy->getAs<RecordType>();
  if (!RT)
    return false;

  const RecordDecl *RD = RT->getDecl();
  RecordDecl::field_iterator Iter(RD->field_begin());
  RecordDecl::field_iterator End(RD->field_end());
  const FieldDecl *Last = 0;
  for (; Iter != End; ++Iter)
    Last = *Iter;
  assert(Last && "empty structs should already be handled");

  const Type *ElemType = Last->getType()->getArrayElementTypeNoTypeQual();
  CharUnits FlexSize;
  if (const ConstantArrayType *ArrayTy =
        Ctx.getAsConstantArrayType(Last->getType())) {
    FlexSize = Ctx.getTypeSizeInChars(ElemType);
    if (ArrayTy->getSize() == 1 && TypeSize > FlexSize)
      TypeSize -= FlexSize;
    else if (ArrayTy->getSize() != 0)
      return false;
  } else if (RD->hasFlexibleArrayMember()) {
    FlexSize = Ctx.getTypeSizeInChars(ElemType);
  } else {
    return false;
  }

  if (FlexSize.isZero())
    return false;

  CharUnits Left = RegionSize - TypeSize;
  if (Left.isNegative())
    return false;

  if (Left % FlexSize == 0)
    return true;

  return false;
}