Пример #1
0
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
                         ConstantAddress DeclPtr) {
  assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
  assert(!D.getType()->isReferenceType() && 
         "Should not call EmitDeclInit on a reference!");
  
  QualType type = D.getType();

  // Deduce UPC strict or relaxed from context, if needed
  if (CGF.getContext().getLangOpts().UPC) {
      Qualifiers Quals = type.getQualifiers();
      if (Quals.hasShared() && !Quals.hasStrict() && !Quals.hasRelaxed()) {
        if (D.isUPCInitStrict())
          Quals.addStrict();
        else
          Quals.addRelaxed();

        type = CGF.getContext().getQualifiedType(type.getUnqualifiedType(), Quals);
      }
  }

  LValue lv;
  if(type.getQualifiers().hasShared())
    lv = CGF.EmitSharedVarDeclLValue(DeclPtr, type);
  else
    lv = CGF.MakeAddrLValue(DeclPtr, type);

  const Expr *Init = D.getInit();
  switch (CGF.getEvaluationKind(type)) {
  case TEK_Scalar: {
    CodeGenModule &CGM = CGF.CGM;
    if (lv.isObjCStrong())
      CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
                                                DeclPtr, D.getTLSKind());
    else if (lv.isObjCWeak())
      CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
                                              DeclPtr);
    else
      CGF.EmitScalarInit(Init, &D, lv, false);
    return;
  }
  case TEK_Complex:
    CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
    return;
  case TEK_Aggregate:
    CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
                                          AggValueSlot::DoesNotNeedGCBarriers,
                                                  AggValueSlot::IsNotAliased));
    return;
  }
  llvm_unreachable("bad evaluation kind");
}
Пример #2
0
void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
  llvm::SmallVector<llvm::APInt, 3> Dimensions;
  for (;;) {
    if (ElementTy->isConstantArrayType()) {
      const ConstantArrayType *CAT =
      static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
      Dimensions.push_back(CAT->getSize());
      ElementTy = CAT->getElementType();
    } else if (ElementTy->isVariableArrayType()) {
      assert(false && "Don't know how to mangle VLAs!");
    } else if (ElementTy->isDependentSizedArrayType()) {
      // The dependent expression has to be folded into a constant (TODO).
      assert(false && "Don't know how to mangle dependent-sized arrays!");
    } else if (ElementTy->isIncompleteArrayType()) continue;
    else break;
  }
  mangleQualifiers(ElementTy.getQualifiers(), false);
  // If there are any additional dimensions, mangle them now.
  if (Dimensions.size() > 0) {
    Out << 'Y';
    // <dimension-count> ::= <number> # number of extra dimensions
    mangleNumber(Dimensions.size());
    for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
      mangleNumber(Dimensions[Dim].getLimitedValue());
    }
  }
  mangleType(ElementTy.getLocalUnqualifiedType());
}
Пример #3
0
static bool rewriteToObjCProperty(const ObjCMethodDecl *Getter,
                                  const ObjCMethodDecl *Setter,
                                  const NSAPI &NS, edit::Commit &commit) {
  ASTContext &Context = NS.getASTContext();
  std::string PropertyString = "@property";
  const ParmVarDecl *argDecl = *Setter->param_begin();
  QualType ArgType = Context.getCanonicalType(argDecl->getType());
  Qualifiers::ObjCLifetime propertyLifetime = ArgType.getObjCLifetime();
  
  if (ArgType->isObjCRetainableType() &&
      propertyLifetime == Qualifiers::OCL_Strong) {
    if (const ObjCObjectPointerType *ObjPtrTy =
        ArgType->getAs<ObjCObjectPointerType>()) {
      ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
      if (IDecl &&
          IDecl->lookupNestedProtocol(&Context.Idents.get("NSCopying")))
        PropertyString += "(copy)";
    }
  }
  else if (propertyLifetime == Qualifiers::OCL_Weak)
    // TODO. More precise determination of 'weak' attribute requires
    // looking into setter's implementation for backing weak ivar.
    PropertyString += "(weak)";
  else
    PropertyString += "(unsafe_unretained)";
  
  // strip off any ARC lifetime qualifier.
  QualType CanResultTy = Context.getCanonicalType(Getter->getResultType());
  if (CanResultTy.getQualifiers().hasObjCLifetime()) {
    Qualifiers Qs = CanResultTy.getQualifiers();
    Qs.removeObjCLifetime();
    CanResultTy = Context.getQualifiedType(CanResultTy.getUnqualifiedType(), Qs);
  }
  PropertyString += " ";
  PropertyString += CanResultTy.getAsString(Context.getPrintingPolicy());
  PropertyString += " ";
  PropertyString += Getter->getNameAsString();
  commit.replace(CharSourceRange::getCharRange(Getter->getLocStart(),
                                               Getter->getDeclaratorEndLoc()),
                 PropertyString);
  SourceLocation EndLoc = Setter->getDeclaratorEndLoc();
  // Get location past ';'
  EndLoc = EndLoc.getLocWithOffset(1);
  commit.remove(CharSourceRange::getCharRange(Setter->getLocStart(), EndLoc));
  return true;
}
Пример #4
0
static ExprResult
BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
                        const CXXScopeSpec &SS, FieldDecl *Field,
                        DeclAccessPair FoundDecl,
                        const DeclarationNameInfo &MemberNameInfo) {
  // x.a is an l-value if 'a' has a reference type. Otherwise:
  // x.a is an l-value/x-value/pr-value if the base is (and note
  //   that *x is always an l-value), except that if the base isn't
  //   an ordinary object then we must have an rvalue.
  ExprValueKind VK = VK_LValue;
  ExprObjectKind OK = OK_Ordinary;
  if (!IsArrow) {
    if (BaseExpr->getObjectKind() == OK_Ordinary)
      VK = BaseExpr->getValueKind();
    else
      VK = VK_RValue;
  }
  if (VK != VK_RValue && Field->isBitField())
    OK = OK_BitField;
  
  // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
  QualType MemberType = Field->getType();
  if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) {
    MemberType = Ref->getPointeeType();
    VK = VK_LValue;
  } else {
    QualType BaseType = BaseExpr->getType();
    if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();

    Qualifiers BaseQuals = BaseType.getQualifiers();

    // CVR attributes from the base are picked up by members,
    // except that 'mutable' members don't pick up 'const'.
    if (Field->isMutable()) BaseQuals.removeConst();

    Qualifiers MemberQuals
    = S.Context.getCanonicalType(MemberType).getQualifiers();

    assert(!MemberQuals.hasAddressSpace());


    Qualifiers Combined = BaseQuals + MemberQuals;
    if (Combined != MemberQuals)
      MemberType = S.Context.getQualifiedType(MemberType, Combined);
  }

  S.UnusedPrivateFields.remove(Field);

  ExprResult Base =
  S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
                                  FoundDecl, Field);
  if (Base.isInvalid())
    return ExprError();
  return S.Owned(BuildMemberExpr(S, S.Context, Base.take(), IsArrow,
                                 Field, FoundDecl, MemberNameInfo,
                                 MemberType, VK, OK));
}
Пример #5
0
 void VisitTypedefType(const TypedefType *T) {
   AddDecl(T->getDecl());
   QualType UnderlyingType = T->getDecl()->getUnderlyingType();
   VisitQualifiers(UnderlyingType.getQualifiers());
   while (const TypedefType *Underlying =
              dyn_cast<TypedefType>(UnderlyingType.getTypePtr())) {
     UnderlyingType = Underlying->getDecl()->getUnderlyingType();
   }
   AddType(UnderlyingType.getTypePtr());
   VisitType(T);
 }
Пример #6
0
// <type>                   ::= <pointer-to-member-type>
// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
//                                                          <class name> <type>
void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
  QualType PointeeType = T->getPointeeType();
  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
    Out << '8';
    mangleName(cast<RecordType>(T->getClass())->getDecl());
    mangleType(FPT, NULL, false, true);
  } else {
    mangleQualifiers(PointeeType.getQualifiers(), true);
    mangleName(cast<RecordType>(T->getClass())->getDecl());
    mangleType(PointeeType.getLocalUnqualifiedType());
  }
}
Пример #7
0
unsigned clang_getAddressSpace(CXType CT) {
  QualType T = GetQualType(CT);

  // For non language-specific address space, use separate helper function.
  if (T.getAddressSpace() >= LangAS::FirstTargetAddressSpace) {
    return T.getQualifiers().getAddressSpaceAttributePrintValue();
  }
  // FIXME: this function returns either a LangAS or a target AS
  // Those values can overlap which makes this function rather unpredictable
  // for any caller
  return (unsigned)T.getAddressSpace();
}
Пример #8
0
llvm::Value *CodeGenFunction::EmitUPCPointerDiff(
    llvm::Value *Pointer1, llvm::Value *Pointer2, const Expr *E) {

  const BinaryOperator *expr = cast<BinaryOperator>(E);
  Expr *LHSOperand = expr->getLHS();
  QualType PtrTy = LHSOperand->getType();

  llvm::Value *Phase1 = EmitUPCPointerGetPhase(Pointer1);
  llvm::Value *Thread1 = EmitUPCPointerGetThread(Pointer1);
  llvm::Value *Addr1 = EmitUPCPointerGetAddr(Pointer1);

  llvm::Value *Phase2 = EmitUPCPointerGetPhase(Pointer2);
  llvm::Value *Thread2 = EmitUPCPointerGetThread(Pointer2);
  llvm::Value *Addr2 = EmitUPCPointerGetAddr(Pointer2);

  QualType PointeeTy = PtrTy->getAs<PointerType>()->getPointeeType();
  QualType ElemTy;
  llvm::Value *Dim;
  llvm::tie(ElemTy, Dim) = unwrapArray(*this, PointeeTy);
  Qualifiers Quals = ElemTy.getQualifiers();

  llvm::Constant *ElemSize = 
    llvm::ConstantInt::get(SizeTy, getContext().getTypeSizeInChars(ElemTy).getQuantity());
  llvm::Value *AddrByteDiff = Builder.CreateSub(Addr1, Addr2, "addr.diff");
  llvm::Value *AddrDiff = Builder.CreateExactSDiv(AddrByteDiff, ElemSize);

  llvm::Value *Result;

  if (Quals.getLayoutQualifier() == 0) {
    Result = AddrDiff;
  } else {
    llvm::Constant *B = llvm::ConstantInt::get(SizeTy, Quals.getLayoutQualifier());
    llvm::Value *Threads = Builder.CreateZExt(EmitUPCThreads(), SizeTy);

    llvm::Value *ThreadDiff = Builder.CreateMul(Builder.CreateSub(Thread1, Thread2, "thread.diff"), B);
    llvm::Value *PhaseDiff = Builder.CreateSub(Phase1, Phase2, "phase.diff");
    llvm::Value *BlockDiff =
      Builder.CreateMul(Builder.CreateSub(AddrDiff, PhaseDiff), Threads, "block.diff");

    Result = Builder.CreateAdd(BlockDiff, Builder.CreateAdd(ThreadDiff, PhaseDiff), "ptr.diff");
  }

  if (Dim) {
    Result = Builder.CreateExactSDiv(Result, Dim, "diff.dim");
  }

  // FIXME: Divide by the array dimension
  return Result;
}
Пример #9
0
CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const {
  ASTContext &Context = getASTContext();
  QualType Class = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
  DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
  
  llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
  DeclContext::lookup_const_iterator Op, OpEnd;
  for (llvm::tie(Op, OpEnd) = this->lookup(Name); Op != OpEnd; ++Op) {
    // C++ [class.copy]p9:
    //   A user-declared copy assignment operator is a non-static non-template
    //   member function of class X with exactly one parameter of type X, X&,
    //   const X&, volatile X& or const volatile X&.
    const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
    if (!Method || Method->isStatic() || Method->getPrimaryTemplate())
      continue;
    
    const FunctionProtoType *FnType 
      = Method->getType()->getAs<FunctionProtoType>();
    assert(FnType && "Overloaded operator has no prototype.");
    // Don't assert on this; an invalid decl might have been left in the AST.
    if (FnType->getNumArgs() != 1 || FnType->isVariadic())
      continue;
    
    QualType ArgType = FnType->getArgType(0);
    Qualifiers Quals;
    if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()) {
      ArgType = Ref->getPointeeType();
      // If we have a const argument and we have a reference to a non-const,
      // this function does not match.
      if (ArgIsConst && !ArgType.isConstQualified())
        continue;
      
      Quals = ArgType.getQualifiers();
    } else {
      // By-value copy-assignment operators are treated like const X&
      // copy-assignment operators.
      Quals = Qualifiers::fromCVRMask(Qualifiers::Const);
    }
    
    if (!Context.hasSameUnqualifiedType(ArgType, Class))
      continue;

    // Save this copy-assignment operator. It might be "the one".
    Found.push_back(std::make_pair(const_cast<CXXMethodDecl *>(Method), Quals));
  }
  
  // Use a simplistic form of overload resolution to find the candidate.
  return GetBestOverloadCandidateSimple(Found);
}
Пример #10
0
static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
  QualType PointeeTy = PointerTy->getPointeeType();
  const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
  if (!BuiltinTy)
    return false;
    
  // Check the qualifiers.
  Qualifiers Quals = PointeeTy.getQualifiers();
  Quals.removeConst();
    
  if (!Quals.empty())
    return false;
    
  return TypeInfoIsInStandardLibrary(BuiltinTy);
}
Пример #11
0
  void printType_Default(llvm::raw_ostream& o, const Value& V) {
    using namespace clang;
    QualType QT = V.getType().getNonReferenceType();
    std::string ValueTyStr;
    if (const TypedefType* TDTy = dyn_cast<TypedefType>(QT))
      ValueTyStr = TDTy->getDecl()->getQualifiedNameAsString();
    else if (const TagType* TTy = dyn_cast<TagType>(QT))
      ValueTyStr = TTy->getDecl()->getQualifiedNameAsString();

    if (ValueTyStr.empty())
      ValueTyStr = QT.getAsString();
    else if (QT.hasQualifiers())
      ValueTyStr = QT.getQualifiers().getAsString() + " " + ValueTyStr;

    o << "(";
    o << ValueTyStr;
    if (V.getType()->isReferenceType())
      o << " &";
    o << ") ";
  }
Пример #12
0
void CodeGenFunction::EmitUPCAggregateCopy(llvm::Value *Dest, llvm::Value *Src,
                                           QualType DestTy, QualType SrcTy,
                                           SourceLocation Loc) {
  const ASTContext& Context = getContext();
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType SizeType = Context.getSizeType();
  assert(DestTy->getCanonicalTypeUnqualified() == SrcTy->getCanonicalTypeUnqualified());
  llvm::Constant *Len =
    llvm::ConstantInt::get(ConvertType(SizeType),
                           Context.getTypeSizeInChars(DestTy).getQuantity());
  llvm::SmallString<16> Name;
  const char *OpName;
  QualType DestArgTy, SrcArgTy;
  if (DestTy.getQualifiers().hasShared() && SrcTy.getQualifiers().hasShared()) {
    // both shared
    OpName = "copy";
    DestArgTy = SrcArgTy = ArgTy;
  } else if (DestTy.getQualifiers().hasShared()) {
    OpName = "put";
    DestArgTy = ArgTy;
    SrcArgTy = Context.VoidPtrTy;
  } else if (SrcTy.getQualifiers().hasShared()) {
    OpName = "get";
    DestArgTy = Context.VoidPtrTy;
    SrcArgTy = ArgTy;
  } else {
    llvm_unreachable("expected at least one shared argument");
  }

  Name += "__";
  Name += OpName;
  if (DestTy.getQualifiers().hasStrict() || SrcTy.getQualifiers().hasStrict())
    Name += 's';
  if (CGM.getCodeGenOpts().UPCDebug) Name += "g";
  Name += "blk";
  CallArgList Args;
  Args.add(RValue::get(Dest), DestArgTy);
  Args.add(RValue::get(Src), SrcArgTy);
  Args.add(RValue::get(Len), SizeType);
  if (CGM.getCodeGenOpts().UPCDebug) {
    getFileAndLine(*this, Loc, &Args);
    Name += '5';
  } else {
    Name += '3';
  }
  EmitUPCCall(*this, Name, Context.VoidTy, Args);
}
Пример #13
0
llvm::Value *CodeGenFunction::EmitUPCPointerArithmetic(
    llvm::Value *Pointer, llvm::Value *Index, QualType PtrTy, QualType IndexTy, bool IsSubtraction) {

  llvm::Value *Phase = EmitUPCPointerGetPhase(Pointer);
  llvm::Value *Thread = EmitUPCPointerGetThread(Pointer);
  llvm::Value *Addr = EmitUPCPointerGetAddr(Pointer);

  bool isSigned = IndexTy->isSignedIntegerOrEnumerationType();

  unsigned width = cast<llvm::IntegerType>(Index->getType())->getBitWidth();
  if (width != PointerWidthInBits) {
    // Zero-extend or sign-extend the pointer value according to
    // whether the index is signed or not.
    Index = Builder.CreateIntCast(Index, PtrDiffTy, isSigned,
                                      "idx.ext");
  }

  QualType PointeeTy = PtrTy->getAs<PointerType>()->getPointeeType();
  QualType ElemTy;
  llvm::Value *Dim;
  llvm::tie(ElemTy, Dim) = unwrapArray(*this, PointeeTy);
  if (Dim) {
    Index = Builder.CreateMul(Index, Dim, "idx.dim", !isSigned, isSigned);
  }
  Qualifiers Quals = ElemTy.getQualifiers();

  if (IsSubtraction)
    Index = Builder.CreateNeg(Index);

  if (Quals.getLayoutQualifier() == 0) {
    // UPC 1.2 6.4.2p2
    // If the shared array is declared with indefinite block size,
    // the result of the pointer-to-shared arithmetic is identical
    // to that described for normal C pointers in [IOS/IEC00 Sec 6.5.2]
    // except that the thread of the new pointer shall be the
    // same as that of the original pointer and the phase
    // component is defined to always be zero.

    uint64_t ElemSize = getContext().getTypeSizeInChars(ElemTy).getQuantity();
    llvm::Value *ByteIndex = Builder.CreateMul(Index, llvm::ConstantInt::get(SizeTy, ElemSize));
    Addr = Builder.CreateAdd(Addr, ByteIndex, "add.addr");
  } else {
    llvm::Value *OldPhase = Phase;
    llvm::Constant *B = llvm::ConstantInt::get(SizeTy, Quals.getLayoutQualifier());
    llvm::Value *Threads = Builder.CreateZExt(EmitUPCThreads(), SizeTy);
    llvm::Value *GlobalBlockSize = Builder.CreateNUWMul(Threads, B);
    // Combine the Phase and Thread into a single unit
    llvm::Value *TmpPhaseThread =
      Builder.CreateNUWAdd(Builder.CreateNUWMul(Thread, B),
                           Phase);

    TmpPhaseThread = Builder.CreateAdd(TmpPhaseThread, Index);

    // Div is the number of (B * THREADS) blocks that we need to jump
    // Rem is Thread * B + Phase
    llvm::Value *Div = Builder.CreateSDiv(TmpPhaseThread, GlobalBlockSize);
    llvm::Value *Rem = Builder.CreateSRem(TmpPhaseThread, GlobalBlockSize);
    // Fix the result of the division/modulus
    llvm::Value *Test = Builder.CreateICmpSLT(Rem, llvm::ConstantInt::get(SizeTy, 0));
    Rem = Builder.CreateSelect(Test, Builder.CreateAdd(Rem, GlobalBlockSize), Rem);
    llvm::Value *DecDiv = Builder.CreateSub(Div, llvm::ConstantInt::get(SizeTy, 1));
    Div = Builder.CreateSelect(Test, DecDiv, Div);

    // Split out the Phase and Thread components
    Thread = Builder.CreateUDiv(Rem, B);
    Phase = Builder.CreateURem(Rem, B);

    uint64_t ElemSize = getContext().getTypeSizeInChars(ElemTy).getQuantity();
    // Compute the final Addr.
    llvm::Value *AddrInc =
      Builder.CreateMul(Builder.CreateAdd(Builder.CreateSub(Phase, OldPhase),
                                          Builder.CreateMul(Div, B)),
                        llvm::ConstantInt::get(SizeTy, ElemSize));
    Addr = Builder.CreateAdd(Addr, AddrInc);
  }

  return EmitUPCPointer(Phase, Thread, Addr);
}
Пример #14
0
void USRGenerator::VisitType(QualType T) {
  // This method mangles in USR information for types.  It can possibly
  // just reuse the naming-mangling logic used by codegen, although the
  // requirements for USRs might not be the same.
  ASTContext &Ctx = *Context;

  do {
    T = Ctx.getCanonicalType(T);
    Qualifiers Q = T.getQualifiers();
    unsigned qVal = 0;
    if (Q.hasConst())
      qVal |= 0x1;
    if (Q.hasVolatile())
      qVal |= 0x2;
    if (Q.hasRestrict())
      qVal |= 0x4;
    if(qVal)
      Out << ((char) ('0' + qVal));

    // Mangle in ObjC GC qualifiers?

    if (const PackExpansionType *Expansion = T->getAs<PackExpansionType>()) {
      Out << 'P';
      T = Expansion->getPattern();
    }
    
    if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
      unsigned char c = '\0';
      switch (BT->getKind()) {
        case BuiltinType::Void:
          c = 'v'; break;
        case BuiltinType::Bool:
          c = 'b'; break;
        case BuiltinType::Char_U:
        case BuiltinType::UChar:
          c = 'c'; break;
        case BuiltinType::Char16:
          c = 'q'; break;
        case BuiltinType::Char32:
          c = 'w'; break;
        case BuiltinType::UShort:
          c = 's'; break;
        case BuiltinType::UInt:
          c = 'i'; break;
        case BuiltinType::ULong:
          c = 'l'; break;
        case BuiltinType::ULongLong:
          c = 'k'; break;
        case BuiltinType::UInt128:
          c = 'j'; break;
        case BuiltinType::Char_S:
        case BuiltinType::SChar:
          c = 'C'; break;
        case BuiltinType::WChar_S:
        case BuiltinType::WChar_U:
          c = 'W'; break;
        case BuiltinType::Short:
          c = 'S'; break;
        case BuiltinType::Int:
          c = 'I'; break;
        case BuiltinType::Long:
          c = 'L'; break;
        case BuiltinType::LongLong:
          c = 'K'; break;
        case BuiltinType::Int128:
          c = 'J'; break;
        case BuiltinType::Half:
          c = 'h'; break;
        case BuiltinType::Float:
          c = 'f'; break;
        case BuiltinType::Double:
          c = 'd'; break;
        case BuiltinType::LongDouble:
          c = 'D'; break;
        case BuiltinType::NullPtr:
          c = 'n'; break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
        case BuiltinType::Dependent:
        case BuiltinType::OCLImage1d:
        case BuiltinType::OCLImage1dArray:
        case BuiltinType::OCLImage1dBuffer:
        case BuiltinType::OCLImage2d:
        case BuiltinType::OCLImage2dArray:
        case BuiltinType::OCLImage3d:
        case BuiltinType::OCLEvent:
        case BuiltinType::OCLSampler:
          IgnoreResults = true;
          return;
        case BuiltinType::ObjCId:
          c = 'o'; break;
        case BuiltinType::ObjCClass:
          c = 'O'; break;
        case BuiltinType::ObjCSel:
          c = 'e'; break;
      }
      Out << c;
      return;
    }

    // If we have already seen this (non-built-in) type, use a substitution
    // encoding.
    llvm::DenseMap<const Type *, unsigned>::iterator Substitution
      = TypeSubstitutions.find(T.getTypePtr());
    if (Substitution != TypeSubstitutions.end()) {
      Out << 'S' << Substitution->second << '_';
      return;
    } else {
      // Record this as a substitution.
      unsigned Number = TypeSubstitutions.size();
      TypeSubstitutions[T.getTypePtr()] = Number;
    }
    
    if (const PointerType *PT = T->getAs<PointerType>()) {
      Out << '*';
      T = PT->getPointeeType();
      continue;
    }
    if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
      Out << '&';
      T = RT->getPointeeType();
      continue;
    }
    if (const FunctionProtoType *FT = T->getAs<FunctionProtoType>()) {
      Out << 'F';
      VisitType(FT->getResultType());
      for (FunctionProtoType::arg_type_iterator
            I = FT->arg_type_begin(), E = FT->arg_type_end(); I!=E; ++I) {
        VisitType(*I);
      }
      if (FT->isVariadic())
        Out << '.';
      return;
    }
    if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
      Out << 'B';
      T = BT->getPointeeType();
      continue;
    }
    if (const ComplexType *CT = T->getAs<ComplexType>()) {
      Out << '<';
      T = CT->getElementType();
      continue;
    }
    if (const TagType *TT = T->getAs<TagType>()) {
      Out << '$';
      VisitTagDecl(TT->getDecl());
      return;
    }
    if (const TemplateTypeParmType *TTP = T->getAs<TemplateTypeParmType>()) {
      Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
      return;
    }
    if (const TemplateSpecializationType *Spec
                                    = T->getAs<TemplateSpecializationType>()) {
      Out << '>';
      VisitTemplateName(Spec->getTemplateName());
      Out << Spec->getNumArgs();
      for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
        VisitTemplateArgument(Spec->getArg(I));
      return;
    }
    
    // Unhandled type.
    Out << ' ';
    break;
  } while (true);
}
Пример #15
0
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
  // (Length of globals? Chunks of zeroed-out space?).
  //
  // If we can, prefer a copy from a global; this is a lot less code for long
  // globals, and it's easier for the current optimizers to analyze.
  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
    llvm::GlobalVariable* GV =
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
                             llvm::GlobalValue::InternalLinkage, C, "");
    EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
    return;
  }
#endif
  if (E->hadArrayRangeDesignator())
    CGF.ErrorUnsupported(E, "GNU array range designator extension");

  llvm::Value *DestPtr = Dest.getAddr();

  // Handle initialization of an array.
  if (E->getType()->isArrayType()) {
    llvm::PointerType *APType =
      cast<llvm::PointerType>(DestPtr->getType());
    llvm::ArrayType *AType =
      cast<llvm::ArrayType>(APType->getElementType());

    uint64_t NumInitElements = E->getNumInits();

    if (E->getNumInits() > 0) {
      QualType T1 = E->getType();
      QualType T2 = E->getInit(0)->getType();
      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
        EmitAggLoadOfLValue(E->getInit(0));
        return;
      }
    }

    uint64_t NumArrayElements = AType->getNumElements();
    assert(NumInitElements <= NumArrayElements);

    QualType elementType = E->getType().getCanonicalType();
    elementType = CGF.getContext().getQualifiedType(
                    cast<ArrayType>(elementType)->getElementType(),
                    elementType.getQualifiers() + Dest.getQualifiers());

    // DestPtr is an array*.  Construct an elementType* by drilling
    // down a level.
    llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
    llvm::Value *indices[] = { zero, zero };
    llvm::Value *begin =
      Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");

    // Exception safety requires us to destroy all the
    // already-constructed members if an initializer throws.
    // For that, we'll need an EH cleanup.
    QualType::DestructionKind dtorKind = elementType.isDestructedType();
    llvm::AllocaInst *endOfInit = 0;
    EHScopeStack::stable_iterator cleanup;
    llvm::Instruction *cleanupDominator = 0;
    if (CGF.needsEHCleanup(dtorKind)) {
      // In principle we could tell the cleanup where we are more
      // directly, but the control flow can get so varied here that it
      // would actually be quite complex.  Therefore we go through an
      // alloca.
      endOfInit = CGF.CreateTempAlloca(begin->getType(),
                                       "arrayinit.endOfInit");
      cleanupDominator = Builder.CreateStore(begin, endOfInit);
      CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
                                           CGF.getDestroyer(dtorKind));
      cleanup = CGF.EHStack.stable_begin();

    // Otherwise, remember that we didn't need a cleanup.
    } else {
      dtorKind = QualType::DK_none;
    }

    llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);

    // The 'current element to initialize'.  The invariants on this
    // variable are complicated.  Essentially, after each iteration of
    // the loop, it points to the last initialized element, except
    // that it points to the beginning of the array before any
    // elements have been initialized.
    llvm::Value *element = begin;

    // Emit the explicit initializers.
    for (uint64_t i = 0; i != NumInitElements; ++i) {
      // Advance to the next element.
      if (i > 0) {
        element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");

        // Tell the cleanup that it needs to destroy up to this
        // element.  TODO: some of these stores can be trivially
        // observed to be unnecessary.
        if (endOfInit) Builder.CreateStore(element, endOfInit);
      }

      LValue elementLV = CGF.MakeAddrLValue(element, elementType);
      EmitInitializationToLValue(E->getInit(i), elementLV);
    }

    // Check whether there's a non-trivial array-fill expression.
    // Note that this will be a CXXConstructExpr even if the element
    // type is an array (or array of array, etc.) of class type.
    Expr *filler = E->getArrayFiller();
    bool hasTrivialFiller = true;
    if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
      assert(cons->getConstructor()->isDefaultConstructor());
      hasTrivialFiller = cons->getConstructor()->isTrivial();
    }

    // Any remaining elements need to be zero-initialized, possibly
    // using the filler expression.  We can skip this if the we're
    // emitting to zeroed memory.
    if (NumInitElements != NumArrayElements &&
        !(Dest.isZeroed() && hasTrivialFiller &&
          CGF.getTypes().isZeroInitializable(elementType))) {

      // Use an actual loop.  This is basically
      //   do { *array++ = filler; } while (array != end);

      // Advance to the start of the rest of the array.
      if (NumInitElements) {
        element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
        if (endOfInit) Builder.CreateStore(element, endOfInit);
      }

      // Compute the end of the array.
      llvm::Value *end = Builder.CreateInBoundsGEP(begin,
                        llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
                                                   "arrayinit.end");

      llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
      llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");

      // Jump into the body.
      CGF.EmitBlock(bodyBB);
      llvm::PHINode *currentElement =
        Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
      currentElement->addIncoming(element, entryBB);

      // Emit the actual filler expression.
      LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
      if (filler)
        EmitInitializationToLValue(filler, elementLV);
      else
        EmitNullInitializationToLValue(elementLV);

      // Move on to the next element.
      llvm::Value *nextElement =
        Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");

      // Tell the EH cleanup that we finished with the last element.
      if (endOfInit) Builder.CreateStore(nextElement, endOfInit);

      // Leave the loop if we're done.
      llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
                                               "arrayinit.done");
      llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
      Builder.CreateCondBr(done, endBB, bodyBB);
      currentElement->addIncoming(nextElement, Builder.GetInsertBlock());

      CGF.EmitBlock(endBB);
    }

    // Leave the partial-array cleanup if we entered one.
    if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);

    return;
  }

  assert(E->getType()->isRecordType() && "Only support structs/unions here!");

  // Do struct initialization; this code just sets each individual member
  // to the approprate value.  This makes bitfield support automatic;
  // the disadvantage is that the generated code is more difficult for
  // the optimizer, especially with bitfields.
  unsigned NumInitElements = E->getNumInits();
  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
  
  if (record->isUnion()) {
    // Only initialize one field of a union. The field itself is
    // specified by the initializer list.
    if (!E->getInitializedFieldInUnion()) {
      // Empty union; we have nothing to do.

#ifndef NDEBUG
      // Make sure that it's really an empty and not a failure of
      // semantic analysis.
      for (RecordDecl::field_iterator Field = record->field_begin(),
                                   FieldEnd = record->field_end();
           Field != FieldEnd; ++Field)
        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
      return;
    }

    // FIXME: volatility
    FieldDecl *Field = E->getInitializedFieldInUnion();

    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
    if (NumInitElements) {
      // Store the initializer into the field
      EmitInitializationToLValue(E->getInit(0), FieldLoc);
    } else {
      // Default-initialize to null.
      EmitNullInitializationToLValue(FieldLoc);
    }

    return;
  }

  // We'll need to enter cleanup scopes in case any of the member
  // initializers throw an exception.
  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
  llvm::Instruction *cleanupDominator = 0;

  // Here we iterate over the fields; this makes it simpler to both
  // default-initialize fields and skip over unnamed fields.
  unsigned curInitIndex = 0;
  for (RecordDecl::field_iterator field = record->field_begin(),
                               fieldEnd = record->field_end();
       field != fieldEnd; ++field) {
    // We're done once we hit the flexible array member.
    if (field->getType()->isIncompleteArrayType())
      break;

    // Always skip anonymous bitfields.
    if (field->isUnnamedBitfield())
      continue;

    // We're done if we reach the end of the explicit initializers, we
    // have a zeroed object, and the rest of the fields are
    // zero-initializable.
    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
        CGF.getTypes().isZeroInitializable(E->getType()))
      break;
    
    // FIXME: volatility
    LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
    // We never generate write-barries for initialized fields.
    LV.setNonGC(true);
    
    if (curInitIndex < NumInitElements) {
      // Store the initializer into the field.
      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
    } else {
      // We're out of initalizers; default-initialize to null
      EmitNullInitializationToLValue(LV);
    }

    // Push a destructor if necessary.
    // FIXME: if we have an array of structures, all explicitly
    // initialized, we can end up pushing a linear number of cleanups.
    bool pushedCleanup = false;
    if (QualType::DestructionKind dtorKind
          = field->getType().isDestructedType()) {
      assert(LV.isSimple());
      if (CGF.needsEHCleanup(dtorKind)) {
        if (!cleanupDominator)
          cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder

        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
                        CGF.getDestroyer(dtorKind), false);
        cleanups.push_back(CGF.EHStack.stable_begin());
        pushedCleanup = true;
      }
    }
    
    // If the GEP didn't get used because of a dead zero init or something
    // else, clean it up for -O0 builds and general tidiness.
    if (!pushedCleanup && LV.isSimple()) 
      if (llvm::GetElementPtrInst *GEP =
            dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
        if (GEP->use_empty())
          GEP->eraseFromParent();
  }

  // Deactivate all the partial cleanups in reverse order, which
  // generally means popping them.
  for (unsigned i = cleanups.size(); i != 0; --i)
    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);

  // Destroy the placeholder if we made one.
  if (cleanupDominator)
    cleanupDominator->eraseFromParent();
}
Пример #16
0
void USRGenerator::VisitType(QualType T) {
  // This method mangles in USR information for types.  It can possibly
  // just reuse the naming-mangling logic used by codegen, although the
  // requirements for USRs might not be the same.
  ASTContext &Ctx = *Context;

  do {
    T = Ctx.getCanonicalType(T);
    Qualifiers Q = T.getQualifiers();
    unsigned qVal = 0;
    if (Q.hasConst())
      qVal |= 0x1;
    if (Q.hasVolatile())
      qVal |= 0x2;
    if (Q.hasRestrict())
      qVal |= 0x4;
    if(qVal)
      Out << ((char) ('0' + qVal));

    // Mangle in ObjC GC qualifiers?

    if (const PackExpansionType *Expansion = T->getAs<PackExpansionType>()) {
      Out << 'P';
      T = Expansion->getPattern();
    }
    
    if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
      unsigned char c = '\0';
      switch (BT->getKind()) {
        case BuiltinType::Void:
          c = 'v'; break;
        case BuiltinType::Bool:
          c = 'b'; break;
        case BuiltinType::UChar:
          c = 'c'; break;
        case BuiltinType::Char16:
          c = 'q'; break;
        case BuiltinType::Char32:
          c = 'w'; break;
        case BuiltinType::UShort:
          c = 's'; break;
        case BuiltinType::UInt:
          c = 'i'; break;
        case BuiltinType::ULong:
          c = 'l'; break;
        case BuiltinType::ULongLong:
          c = 'k'; break;
        case BuiltinType::UInt128:
          c = 'j'; break;
        case BuiltinType::Char_U:
        case BuiltinType::Char_S:
          c = 'C'; break;
        case BuiltinType::SChar:
          c = 'r'; break;
        case BuiltinType::WChar_S:
        case BuiltinType::WChar_U:
          c = 'W'; break;
        case BuiltinType::Short:
          c = 'S'; break;
        case BuiltinType::Int:
          c = 'I'; break;
        case BuiltinType::Long:
          c = 'L'; break;
        case BuiltinType::LongLong:
          c = 'K'; break;
        case BuiltinType::Int128:
          c = 'J'; break;
        case BuiltinType::Half:
          c = 'h'; break;
        case BuiltinType::Float:
          c = 'f'; break;
        case BuiltinType::Double:
          c = 'd'; break;
        case BuiltinType::LongDouble:
          c = 'D'; break;
        case BuiltinType::NullPtr:
          c = 'n'; break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
        case BuiltinType::Dependent:
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
        case BuiltinType::Id:
#include "clang/AST/OpenCLImageTypes.def"
        case BuiltinType::OCLEvent:
        case BuiltinType::OCLClkEvent:
        case BuiltinType::OCLQueue:
        case BuiltinType::OCLNDRange:
        case BuiltinType::OCLReserveID:
        case BuiltinType::OCLSampler:
          IgnoreResults = true;
          return;
        case BuiltinType::ObjCId:
          c = 'o'; break;
        case BuiltinType::ObjCClass:
          c = 'O'; break;
        case BuiltinType::ObjCSel:
          c = 'e'; break;
      }
      Out << c;
      return;
    }

    // If we have already seen this (non-built-in) type, use a substitution
    // encoding.
    llvm::DenseMap<const Type *, unsigned>::iterator Substitution
      = TypeSubstitutions.find(T.getTypePtr());
    if (Substitution != TypeSubstitutions.end()) {
      Out << 'S' << Substitution->second << '_';
      return;
    } else {
      // Record this as a substitution.
      unsigned Number = TypeSubstitutions.size();
      TypeSubstitutions[T.getTypePtr()] = Number;
    }
    
    if (const PointerType *PT = T->getAs<PointerType>()) {
      Out << '*';
      T = PT->getPointeeType();
      continue;
    }
    if (const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>()) {
      Out << '*';
      T = OPT->getPointeeType();
      continue;
    }
    if (const RValueReferenceType *RT = T->getAs<RValueReferenceType>()) {
      Out << "&&";
      T = RT->getPointeeType();
      continue;
    }
    if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
      Out << '&';
      T = RT->getPointeeType();
      continue;
    }
    if (const FunctionProtoType *FT = T->getAs<FunctionProtoType>()) {
      Out << 'F';
      VisitType(FT->getReturnType());
      for (const auto &I : FT->param_types())
        VisitType(I);
      if (FT->isVariadic())
        Out << '.';
      return;
    }
    if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
      Out << 'B';
      T = BT->getPointeeType();
      continue;
    }
    if (const ComplexType *CT = T->getAs<ComplexType>()) {
      Out << '<';
      T = CT->getElementType();
      continue;
    }
    if (const TagType *TT = T->getAs<TagType>()) {
      Out << '$';
      VisitTagDecl(TT->getDecl());
      return;
    }
    if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) {
      Out << '$';
      VisitObjCInterfaceDecl(OIT->getDecl());
      return;
    }
    if (const ObjCObjectType *OIT = T->getAs<ObjCObjectType>()) {
      Out << 'Q';
      VisitType(OIT->getBaseType());
      for (auto *Prot : OIT->getProtocols())
        VisitObjCProtocolDecl(Prot);
      return;
    }
    if (const TemplateTypeParmType *TTP = T->getAs<TemplateTypeParmType>()) {
      Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
      return;
    }
    if (const TemplateSpecializationType *Spec
                                    = T->getAs<TemplateSpecializationType>()) {
      Out << '>';
      VisitTemplateName(Spec->getTemplateName());
      Out << Spec->getNumArgs();
      for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
        VisitTemplateArgument(Spec->getArg(I));
      return;
    }
    if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
      Out << '^';
      // FIXME: Encode the qualifier, don't just print it.
      PrintingPolicy PO(Ctx.getLangOpts());
      PO.SuppressTagKeyword = true;
      PO.SuppressUnwrittenScope = true;
      PO.ConstantArraySizeAsWritten = false;
      PO.AnonymousTagLocations = false;
      DNT->getQualifier()->print(Out, PO);
      Out << ':' << DNT->getIdentifier()->getName();
      return;
    }
    if (const InjectedClassNameType *InjT = T->getAs<InjectedClassNameType>()) {
      T = InjT->getInjectedSpecializationType();
      continue;
    }
    
    // Unhandled type.
    Out << ' ';
    break;
  } while (true);
}
Пример #17
0
ExprResult
Sema::BuildAnonymousStructUnionMemberReference(SourceLocation loc,
                                               IndirectFieldDecl *indirectField,
                                               Expr *baseObjectExpr,
                                               SourceLocation opLoc) {
  // First, build the expression that refers to the base object.
  
  bool baseObjectIsPointer = false;
  Qualifiers baseQuals;
  
  // Case 1:  the base of the indirect field is not a field.
  VarDecl *baseVariable = indirectField->getVarDecl();
  CXXScopeSpec EmptySS;
  if (baseVariable) {
    assert(baseVariable->getType()->isRecordType());
    
    // In principle we could have a member access expression that
    // accesses an anonymous struct/union that's a static member of
    // the base object's class.  However, under the current standard,
    // static data members cannot be anonymous structs or unions.
    // Supporting this is as easy as building a MemberExpr here.
    assert(!baseObjectExpr && "anonymous struct/union is static data member?");
    
    DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
    
    ExprResult result 
      = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
    if (result.isInvalid()) return ExprError();
    
    baseObjectExpr = result.take();    
    baseObjectIsPointer = false;
    baseQuals = baseObjectExpr->getType().getQualifiers();
    
    // Case 2: the base of the indirect field is a field and the user
    // wrote a member expression.
  } else if (baseObjectExpr) {
    // The caller provided the base object expression. Determine
    // whether its a pointer and whether it adds any qualifiers to the
    // anonymous struct/union fields we're looking into.
    QualType objectType = baseObjectExpr->getType();
    
    if (const PointerType *ptr = objectType->getAs<PointerType>()) {
      baseObjectIsPointer = true;
      objectType = ptr->getPointeeType();
    } else {
      baseObjectIsPointer = false;
    }
    baseQuals = objectType.getQualifiers();
  }  

  // Build the implicit member references to the field of the
  // anonymous struct/union.
  Expr *result = baseObjectExpr;
  IndirectFieldDecl::chain_iterator
  FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
  
  // Build the first member access in the chain with full information.
  if (!baseVariable) {
    FieldDecl *field = cast<FieldDecl>(*FI);
    
    // FIXME: use the real found-decl info!
    DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
    
    // Make a nameInfo that properly uses the anonymous name.
    DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
    
    result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
                                     EmptySS, field, foundDecl,
                                     memberNameInfo).take();
    baseObjectIsPointer = false;
    
    // FIXME: check qualified member access
  }
  
  // In all cases, we should now skip the first declaration in the chain.
  ++FI;
  
  while (FI != FEnd) {
    FieldDecl *field = cast<FieldDecl>(*FI++);
    
    // FIXME: these are somewhat meaningless
    DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
    DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
    
    result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
                                     EmptySS, field, 
                                     foundDecl, memberNameInfo).take();
  }
  
  return Owned(result);
}
Пример #18
0
/// \brief Return the fully qualified type, including fully-qualified
/// versions of any template parameters.
QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx) {
  // In case of myType* we need to strip the pointer first, fully
  // qualify and attach the pointer once again.
  if (isa<PointerType>(QT.getTypePtr())) {
    // Get the qualifiers.
    Qualifiers Quals = QT.getQualifiers();
    QT = getFullyQualifiedType(QT->getPointeeType(), Ctx);
    QT = Ctx.getPointerType(QT);
    // Add back the qualifiers.
    QT = Ctx.getQualifiedType(QT, Quals);
    return QT;
  }

  // In case of myType& we need to strip the reference first, fully
  // qualify and attach the reference once again.
  if (isa<ReferenceType>(QT.getTypePtr())) {
    // Get the qualifiers.
    bool IsLValueRefTy = isa<LValueReferenceType>(QT.getTypePtr());
    Qualifiers Quals = QT.getQualifiers();
    QT = getFullyQualifiedType(QT->getPointeeType(), Ctx);
    // Add the r- or l-value reference type back to the fully
    // qualified one.
    if (IsLValueRefTy)
      QT = Ctx.getLValueReferenceType(QT);
    else
      QT = Ctx.getRValueReferenceType(QT);
    // Add back the qualifiers.
    QT = Ctx.getQualifiedType(QT, Quals);
    return QT;
  }

  // Remove the part of the type related to the type being a template
  // parameter (we won't report it as part of the 'type name' and it
  // is actually make the code below to be more complex (to handle
  // those)
  while (isa<SubstTemplateTypeParmType>(QT.getTypePtr())) {
    // Get the qualifiers.
    Qualifiers Quals = QT.getQualifiers();

    QT = dyn_cast<SubstTemplateTypeParmType>(QT.getTypePtr())->desugar();

    // Add back the qualifiers.
    QT = Ctx.getQualifiedType(QT, Quals);
  }

  NestedNameSpecifier *Prefix = nullptr;
  Qualifiers PrefixQualifiers;
  ElaboratedTypeKeyword Keyword = ETK_None;
  if (const auto *ETypeInput = dyn_cast<ElaboratedType>(QT.getTypePtr())) {
    QT = ETypeInput->getNamedType();
    Keyword = ETypeInput->getKeyword();
  }
  // Create a nested name specifier if needed (i.e. if the decl context
  // is not the global scope.
  Prefix = createNestedNameSpecifierForScopeOf(Ctx, QT.getTypePtr(),
                                               true /*FullyQualified*/);

  // move the qualifiers on the outer type (avoid 'std::const string'!)
  if (Prefix) {
    PrefixQualifiers = QT.getLocalQualifiers();
    QT = QualType(QT.getTypePtr(), 0);
  }

  // In case of template specializations iterate over the arguments and
  // fully qualify them as well.
  if (isa<const TemplateSpecializationType>(QT.getTypePtr()) ||
      isa<const RecordType>(QT.getTypePtr())) {
    // We are asked to fully qualify and we have a Record Type (which
    // may pont to a template specialization) or Template
    // Specialization Type. We need to fully qualify their arguments.

    Qualifiers Quals = QT.getLocalQualifiers();
    const Type *TypePtr = getFullyQualifiedTemplateType(Ctx, QT.getTypePtr());
    QT = Ctx.getQualifiedType(TypePtr, Quals);
  }
  if (Prefix || Keyword != ETK_None) {
    QT = Ctx.getElaboratedType(Keyword, Prefix, QT);
    QT = Ctx.getQualifiedType(QT, PrefixQualifiers);
  }
  return QT;
}
Пример #19
0
llvm::Value *CodeGenFunction::EmitUPCPointerCompare(
    llvm::Value *Pointer1, llvm::Value *Pointer2, const BinaryOperator *E) {

  QualType PtrTy = E->getLHS()->getType();

  QualType PointeeTy = PtrTy->getAs<PointerType>()->getPointeeType();
  QualType ElemTy = PointeeTy;
  while (const ArrayType *AT = getContext().getAsArrayType(ElemTy))
    ElemTy = AT->getElementType();
  Qualifiers Quals = ElemTy.getQualifiers();

  // Use the standard transformations so we only
  // have to implement < and ==.
  bool Flip = false;
  switch (E->getOpcode()) {
  case BO_EQ: break;
  case BO_NE: Flip = true; break;
  case BO_LT: break;
  case BO_GT: std::swap(Pointer1, Pointer2); break;
  case BO_LE: std::swap(Pointer1, Pointer2); Flip = true; break;
  case BO_GE: Flip = true; break;
  default: llvm_unreachable("expected a comparison operator");
  }

  llvm::Value *Phase1 = EmitUPCPointerGetPhase(Pointer1);
  llvm::Value *Thread1 = EmitUPCPointerGetThread(Pointer1);
  llvm::Value *Addr1 = EmitUPCPointerGetAddr(Pointer1);

  llvm::Value *Phase2 = EmitUPCPointerGetPhase(Pointer2);
  llvm::Value *Thread2 = EmitUPCPointerGetThread(Pointer2);
  llvm::Value *Addr2 = EmitUPCPointerGetAddr(Pointer2);

  llvm::Value *Result;
  // Equality has to work correctly even if the pointers
  // are not in the same array.
  if (E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) {
    Result = Builder.CreateAnd(Builder.CreateICmpEQ(Addr1, Addr2),
                               Builder.CreateICmpEQ(Thread1, Thread2));
  } else if (Quals.getLayoutQualifier() == 0) {
    Result = Builder.CreateICmpULT(Addr1, Addr2);
  } else {
    llvm::IntegerType *BoolTy = llvm::Type::getInt1Ty(CGM.getLLVMContext());
    llvm::Constant *LTResult = llvm::ConstantInt::get(BoolTy, 1);
    llvm::Constant *GTResult = llvm::ConstantInt::get(BoolTy, 0);

    llvm::Constant *ElemSize = 
      llvm::ConstantInt::get(SizeTy, getContext().getTypeSizeInChars(ElemTy).getQuantity());
    llvm::Value *AddrByteDiff = Builder.CreateSub(Addr1, Addr2, "addr.diff");
    llvm::Value *PhaseDiff = Builder.CreateSub(Phase1, Phase2, "phase.diff");
    llvm::Value *PhaseByteDiff = Builder.CreateMul(PhaseDiff, ElemSize);
    llvm::Value *TestBlockLT = Builder.CreateICmpSLT(AddrByteDiff, PhaseByteDiff);
    llvm::Value *TestBlockEQ = Builder.CreateICmpEQ(AddrByteDiff, PhaseByteDiff);
    llvm::Value *TestThreadLT = Builder.CreateICmpULT(Thread1, Thread2);
    llvm::Value *TestThreadEQ = Builder.CreateICmpEQ(Thread1, Thread2);
    
    // Compare the block first, then the thread, then the phase
    Result = Builder.CreateSelect(TestBlockLT,
      LTResult,
      Builder.CreateSelect(TestBlockEQ,
        Builder.CreateSelect(TestThreadLT,
          LTResult,
          Builder.CreateSelect(TestThreadEQ,
            Builder.CreateICmpULT(Phase1, Phase2),
            GTResult)),
        GTResult));
  }

  if (Flip)
    Result = Builder.CreateNot(Result);
  return Result;
}