示例#1
0
FieldDecl *
NameResolver::HandleFieldDecl(const SourceLocation &pos,
                              const NameToken &nameToken,
                              TypeSpecifier &spec)
{
  TypeExpr te = resolve(spec);
  FieldDecl *decl = new (pool_) FieldDecl(pos, nameToken, te);

  if (!te.resolved())
    tr_.addPending(decl);

  Atom *name = nameToken.atom;

  FieldSymbol *sym = new (pool_) FieldSymbol(decl, layout_scope_, name);
  decl->setSymbol(sym);
  if (te.resolved())
    sym->setType(te.resolved());

  if (Symbol *other = layout_scope_->localLookup(name)) {
    cc_.report(decl->loc(), rmsg::redefined_layout_decl)
      << "field"
      << name
      << other->kindName()
      << cc_.note(other->node()->loc(), rmsg::previous_location);
  } else {
    layout_scope_->addSymbol(sym);
  }

  return decl;
}
bool CheckFinalizerVisitor::VisitMemberExpr(MemberExpr* member) {
  FieldDecl* field = dyn_cast<FieldDecl>(member->getMemberDecl());
  if (!field)
    return true;

  RecordInfo* info = cache_->Lookup(field->getParent());
  if (!info)
    return true;

  RecordInfo::Fields::iterator it = info->GetFields().find(field);
  if (it == info->GetFields().end())
    return true;

  if (seen_members_.find(member) != seen_members_.end())
    return true;

  bool as_eagerly_finalized = false;
  if (blacklist_context_ &&
      MightBeCollected(&it->second, &as_eagerly_finalized)) {
    finalized_fields_.push_back(
        Error(member, as_eagerly_finalized, &it->second));
    seen_members_.insert(member);
  }
  return true;
}
示例#3
0
RecordInfo::Fields* RecordInfo::CollectFields() {
  // Compute the collection locally to avoid inconsistent states.
  Fields* fields = new Fields;
  if (!record_->hasDefinition())
    return fields;
  TracingStatus fields_status = TracingStatus::Unneeded();
  for (RecordDecl::field_iterator it = record_->field_begin();
       it != record_->field_end();
       ++it) {
    FieldDecl* field = *it;
    // Ignore fields annotated with the GC_PLUGIN_IGNORE macro.
    if (Config::IsIgnoreAnnotated(field))
      continue;
    // Check if the unexpanded type should be recorded; needed
    // to track iterator aliases only
    const Type* unexpandedType = field->getType().getSplitUnqualifiedType().Ty;
    Edge* edge = CreateEdgeFromOriginalType(unexpandedType);
    if (!edge)
      edge = CreateEdge(field->getType().getTypePtrOrNull());
    if (edge) {
      fields_status = fields_status.LUB(edge->NeedsTracing(Edge::kRecursive));
      fields->insert(std::make_pair(field, FieldPoint(field, edge)));
    }
  }
  fields_need_tracing_ = fields_status;
  return fields;
}
示例#4
0
void
CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
  const RecordType *RT = Ty->getAsStructureType();
  assert(RT && "Can only expand structure types.");

  RecordDecl *RD = RT->getDecl();
  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
  llvm::Value *Addr = RV.getAggregateAddr();
  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
         i != e; ++i) {
    FieldDecl *FD = *i;
    QualType FT = FD->getType();

    // FIXME: What are the right qualifiers here?
    LValue LV = EmitLValueForField(Addr, FD, false, 0);
    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
    } else {
      RValue RV = EmitLoadOfLValue(LV, FT);
      assert(RV.isScalar() &&
             "Unexpected non-scalar rvalue during struct expansion.");
      Args.push_back(RV.getScalarVal());
    }
  }
}
示例#5
0
llvm::Function::arg_iterator
CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
                                    llvm::Function::arg_iterator AI) {
  const RecordType *RT = Ty->getAsStructureType();
  assert(RT && "Can only expand structure types.");

  RecordDecl *RD = RT->getDecl();
  assert(LV.isSimple() &&
         "Unexpected non-simple lvalue during struct expansion.");
  llvm::Value *Addr = LV.getAddress();
  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
         i != e; ++i) {
    FieldDecl *FD = *i;
    QualType FT = FD->getType();

    // FIXME: What are the right qualifiers here?
    LValue LV = EmitLValueForField(Addr, FD, false, 0);
    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
      AI = ExpandTypeFromArgs(FT, LV, AI);
    } else {
      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
      ++AI;
    }
  }

  return AI;
}
示例#6
0
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
                                unsigned &Offset, SourceLocation AsmLoc) {
  Offset = 0;
  SmallVector<StringRef, 2> Members;
  Member.split(Members, ".");

  LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
                          LookupOrdinaryName);

  if (!LookupName(BaseResult, getCurScope()))
    return true;
  
  if(!BaseResult.isSingleResult())
    return true;
  NamedDecl *FoundDecl = BaseResult.getFoundDecl();
  for (StringRef NextMember : Members) {
    const RecordType *RT = nullptr;
    if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
      RT = VD->getType()->getAs<RecordType>();
    else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
      MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
      RT = TD->getUnderlyingType()->getAs<RecordType>();
    } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
      RT = TD->getTypeForDecl()->getAs<RecordType>();
    else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl))
      RT = TD->getType()->getAs<RecordType>();
    if (!RT)
      return true;

    if (RequireCompleteType(AsmLoc, QualType(RT, 0),
                            diag::err_asm_incomplete_type))
      return true;

    LookupResult FieldResult(*this, &Context.Idents.get(NextMember),
                             SourceLocation(), LookupMemberName);

    if (!LookupQualifiedName(FieldResult, RT->getDecl()))
      return true;

    if (!FieldResult.isSingleResult())
      return true;
    FoundDecl = FieldResult.getFoundDecl();

    // FIXME: Handle IndirectFieldDecl?
    FieldDecl *FD = dyn_cast<FieldDecl>(FoundDecl);
    if (!FD)
      return true;

    const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
    unsigned i = FD->getFieldIndex();
    CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
    Offset += (unsigned)Result.getQuantity();
  }

  return false;
}
示例#7
0
void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
                                 ExplodedNodeSet &Dst) {
  const LocationContext *LocCtxt = Pred->getLocationContext();

  // Get the region of the lambda itself.
  const MemRegion *R = svalBuilder.getRegionManager().getCXXTempObjectRegion(
      LE, LocCtxt);
  SVal V = loc::MemRegionVal(R);
  
  ProgramStateRef State = Pred->getState();
  
  // If we created a new MemRegion for the lambda, we should explicitly bind
  // the captures.
  CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
  for (LambdaExpr::const_capture_init_iterator i = LE->capture_init_begin(),
                                               e = LE->capture_init_end();
       i != e; ++i, ++CurField) {
    FieldDecl *FieldForCapture = *CurField;
    SVal FieldLoc = State->getLValue(FieldForCapture, V);

    SVal InitVal;
    if (!FieldForCapture->hasCapturedVLAType()) {
      Expr *InitExpr = *i;
      assert(InitExpr && "Capture missing initialization expression");
      InitVal = State->getSVal(InitExpr, LocCtxt);
    } else {
      // The field stores the length of a captured variable-length array.
      // These captures don't have initialization expressions; instead we
      // get the length from the VLAType size expression.
      Expr *SizeExpr = FieldForCapture->getCapturedVLAType()->getSizeExpr();
      InitVal = State->getSVal(SizeExpr, LocCtxt);
    }

    State = State->bindLoc(FieldLoc, InitVal);
  }

  // Decay the Loc into an RValue, because there might be a
  // MaterializeTemporaryExpr node above this one which expects the bound value
  // to be an RValue.
  SVal LambdaRVal = State->getSVal(R);

  ExplodedNodeSet Tmp;
  StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
  // FIXME: is this the right program point kind?
  Bldr.generateNode(LE, Pred,
                    State->BindExpr(LE, LocCtxt, LambdaRVal),
                    nullptr, ProgramPoint::PostLValueKind);

  // FIXME: Move all post/pre visits to ::Visit().
  getCheckerManager().runCheckersForPostStmt(Dst, Tmp, LE, *this);
}
示例#8
0
void VarCheckVisitor::checkField(clang::Decl* d)
{
  auto loc = getDeclLocation(d);

  FieldDecl* f = (FieldDecl*)d;
  string name = f->getName();
  boost::regex r{MEMBER_VAR};

  if(!boost::regex_match(name, r)) {
    lineIssues_.push_back(Issue(loc.first,loc.second,"Incorrect Field Name",
    "Field names should be in lower camel case with a trailing underscore.",
    WARNING));
  }
}
示例#9
0
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
                                unsigned &Offset, SourceLocation AsmLoc) {
  Offset = 0;
  LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
                          LookupOrdinaryName);

  if (!LookupName(BaseResult, getCurScope()))
    return true;

  if (!BaseResult.isSingleResult())
    return true;

  const RecordType *RT = nullptr;
  NamedDecl *FoundDecl = BaseResult.getFoundDecl();
  if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
    RT = VD->getType()->getAs<RecordType>();
  else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl))
    RT = TD->getUnderlyingType()->getAs<RecordType>();
  else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
    RT = TD->getTypeForDecl()->getAs<RecordType>();
  if (!RT)
    return true;

  if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0))
    return true;

  LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(),
                           LookupMemberName);

  if (!LookupQualifiedName(FieldResult, RT->getDecl()))
    return true;

  // FIXME: Handle IndirectFieldDecl?
  FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
  if (!FD)
    return true;

  const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
  unsigned i = FD->getFieldIndex();
  CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
  Offset = (unsigned)Result.getQuantity();

  return false;
}
示例#10
0
RecordInfo::Fields* RecordInfo::CollectFields() {
  // Compute the collection locally to avoid inconsistent states.
  Fields* fields = new Fields;
  if (!record_->hasDefinition())
    return fields;
  TracingStatus fields_status = TracingStatus::Unneeded();
  for (RecordDecl::field_iterator it = record_->field_begin();
       it != record_->field_end();
       ++it) {
    FieldDecl* field = *it;
    // Ignore fields annotated with the GC_PLUGIN_IGNORE macro.
    if (Config::IsIgnoreAnnotated(field))
      continue;
    if (Edge* edge = CreateEdge(field->getType().getTypePtrOrNull())) {
      fields_status = fields_status.LUB(edge->NeedsTracing(Edge::kRecursive));
      fields->insert(std::make_pair(field, FieldPoint(field, edge)));
    }
  }
  fields_need_tracing_ = fields_status;
  return fields;
}
示例#11
0
  void VisitMemberExpr(MemberExpr *Node) {
    // this is copied from somewhere
    PrintExpr(Node->getBase());

    MemberExpr *ParentMember = dyn_cast<MemberExpr>(Node->getBase());
    FieldDecl  *ParentDecl   = ParentMember
      ? dyn_cast<FieldDecl>(ParentMember->getMemberDecl()) : nullptr;

    if (!ParentDecl || !ParentDecl->isAnonymousStructOrUnion())
      OS << (Node->isArrow() ? "->" : ".");

    if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
      if (FD->isAnonymousStructOrUnion())
        return;

    if (NestedNameSpecifier *Qualifier = Node->getQualifier())
      Qualifier->print(OS, Policy);
    if (Node->hasTemplateKeyword())
      OS << "template ";
    OS << Node->getMemberNameInfo();
    if (Node->hasExplicitTemplateArgs())
      TemplateSpecializationType::PrintTemplateArgumentList(OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
  }
示例#12
0
bool ATSCollectionVisitor::VisitMemberExpr(MemberExpr *ME)
{
  ValueDecl *OrigDecl = ME->getMemberDecl();
  FieldDecl *FD = dyn_cast<FieldDecl>(OrigDecl);

  if (!FD) {
    // in C++, getMemberDecl returns a CXXMethodDecl.
    if (TransformationManager::isCXXLangOpt())
      return true;
    TransAssert(0 && "Bad FD!\n");
  }

  const Type *T = FD->getType().getTypePtr();
  if (!T->isScalarType())
    return true;

  RecordDecl *RD = FD->getParent();
  TransAssert(RD && "NULL RecordDecl!");
  if (!RD->isStruct() && !RD->isUnion())
    return true;

  ConsumerInstance->addOneExpr(ME);
  return true;
}
示例#13
0
/// HandleDeclInMainFile - This is called for each top-level decl defined in the
/// main file of the input.
void RewriteBlocks::HandleDeclInMainFile(Decl *D) {
  if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
    // Since function prototypes don't have ParmDecl's, we check the function
    // prototype. This enables us to rewrite function declarations and
    // definitions using the same code.
    RewriteFunctionProtoType(FD->getType(), FD);
    
    if (CompoundStmt *Body = FD->getBody()) {
      CurFunctionDef = FD;
      FD->setBody(cast_or_null<CompoundStmt>(RewriteFunctionBody(Body)));
      // This synthesizes and inserts the block "impl" struct, invoke function,
      // and any copy/dispose helper functions.
      InsertBlockLiteralsWithinFunction(FD);
      CurFunctionDef = 0;
    } 
    return;
  }
  if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
    RewriteMethodDecl(MD);
    if (Stmt *Body = MD->getBody()) {
      CurMethodDef = MD;
      RewriteFunctionBody(Body);
      InsertBlockLiteralsWithinMethod(MD);
      CurMethodDef = 0;
    }
  }
  if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
    if (isBlockPointerType(VD->getType())) {
      RewriteBlockPointerDecl(VD);
      if (VD->getInit()) {
        if (BlockExpr *CBE = dyn_cast<BlockExpr>(VD->getInit())) {
          RewriteFunctionBody(CBE->getBody());

          // We've just rewritten the block body in place.
          // Now we snarf the rewritten text and stash it away for later use.
          std::string S = Rewrite.getRewritenText(CBE->getSourceRange());
          RewrittenBlockExprs[CBE] = S;
          std::string Init = SynthesizeBlockInitExpr(CBE, VD);
          // Do the rewrite, using S.size() which contains the rewritten size.
          ReplaceText(CBE->getLocStart(), S.size(), Init.c_str(), Init.size());
          SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), 
                                  VD->getNameAsCString());
        } else if (CastExpr *CE = dyn_cast<CastExpr>(VD->getInit())) {
          RewriteCastExpr(CE);
        }
      }
    } else if (VD->getType()->isFunctionPointerType()) {
      CheckFunctionPointerDecl(VD->getType(), VD);
      if (VD->getInit()) {
        if (CastExpr *CE = dyn_cast<CastExpr>(VD->getInit())) {
          RewriteCastExpr(CE);
        }
      }
    }
    return;
  }
  if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
    if (isBlockPointerType(TD->getUnderlyingType()))
      RewriteBlockPointerDecl(TD);
    else if (TD->getUnderlyingType()->isFunctionPointerType()) 
      CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
    return;
  }
  if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
    if (RD->isDefinition()) {
      for (RecordDecl::field_iterator i = RD->field_begin(*Context), 
             e = RD->field_end(*Context); i != e; ++i) {
        FieldDecl *FD = *i;
        if (isBlockPointerType(FD->getType()))
          RewriteBlockPointerDecl(FD);
      }
    }
    return;
  }
}
示例#14
0
ExprResult
Sema::BuildAnonymousStructUnionMemberReference(SourceLocation loc,
                                               IndirectFieldDecl *indirectField,
                                               Expr *baseObjectExpr,
                                               SourceLocation opLoc) {
  // First, build the expression that refers to the base object.
  
  bool baseObjectIsPointer = false;
  Qualifiers baseQuals;
  
  // Case 1:  the base of the indirect field is not a field.
  VarDecl *baseVariable = indirectField->getVarDecl();
  CXXScopeSpec EmptySS;
  if (baseVariable) {
    assert(baseVariable->getType()->isRecordType());
    
    // In principle we could have a member access expression that
    // accesses an anonymous struct/union that's a static member of
    // the base object's class.  However, under the current standard,
    // static data members cannot be anonymous structs or unions.
    // Supporting this is as easy as building a MemberExpr here.
    assert(!baseObjectExpr && "anonymous struct/union is static data member?");
    
    DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
    
    ExprResult result 
      = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
    if (result.isInvalid()) return ExprError();
    
    baseObjectExpr = result.take();    
    baseObjectIsPointer = false;
    baseQuals = baseObjectExpr->getType().getQualifiers();
    
    // Case 2: the base of the indirect field is a field and the user
    // wrote a member expression.
  } else if (baseObjectExpr) {
    // The caller provided the base object expression. Determine
    // whether its a pointer and whether it adds any qualifiers to the
    // anonymous struct/union fields we're looking into.
    QualType objectType = baseObjectExpr->getType();
    
    if (const PointerType *ptr = objectType->getAs<PointerType>()) {
      baseObjectIsPointer = true;
      objectType = ptr->getPointeeType();
    } else {
      baseObjectIsPointer = false;
    }
    baseQuals = objectType.getQualifiers();
  }  

  // Build the implicit member references to the field of the
  // anonymous struct/union.
  Expr *result = baseObjectExpr;
  IndirectFieldDecl::chain_iterator
  FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
  
  // Build the first member access in the chain with full information.
  if (!baseVariable) {
    FieldDecl *field = cast<FieldDecl>(*FI);
    
    // FIXME: use the real found-decl info!
    DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
    
    // Make a nameInfo that properly uses the anonymous name.
    DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
    
    result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
                                     EmptySS, field, foundDecl,
                                     memberNameInfo).take();
    baseObjectIsPointer = false;
    
    // FIXME: check qualified member access
  }
  
  // In all cases, we should now skip the first declaration in the chain.
  ++FI;
  
  while (FI != FEnd) {
    FieldDecl *field = cast<FieldDecl>(*FI++);
    
    // FIXME: these are somewhat meaningless
    DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
    DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
    
    result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
                                     EmptySS, field, 
                                     foundDecl, memberNameInfo).take();
  }
  
  return Owned(result);
}
bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
  assert(!Alignment.isZero() && "Did not set alignment!");

  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);

  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
  if (RD)
    if (!LayoutNonVirtualBases(RD, Layout))
      return false;

  unsigned FieldNo = 0;
  
  for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end();
       FI != FE; ++FI, ++FieldNo) {
    FieldDecl *FD = *FI;

    // If this field is a bitfield, layout all of the consecutive
    // non-zero-length bitfields and the last zero-length bitfield; these will
    // all share storage.
    if (FD->isBitField()) {
      // If all we have is a zero-width bitfield, skip it.
      if (FD->getBitWidthValue(Types.getContext()) == 0)
        continue;

      // Layout this range of bitfields.
      if (!LayoutBitfields(Layout, FieldNo, FI, FE)) {
        assert(!Packed &&
               "Could not layout bitfields even with a packed LLVM struct!");
        return false;
      }
      assert(FI != FE && "Advanced past the last bitfield");
      continue;
    }

    if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) {
      assert(!Packed &&
             "Could not layout fields even with a packed LLVM struct!");
      return false;
    }
  }

  if (RD) {
    // We've laid out the non-virtual bases and the fields, now compute the
    // non-virtual base field types.
    if (!ComputeNonVirtualBaseType(RD)) {
      assert(!Packed && "Could not layout even with a packed LLVM struct!");
      return false;
    }

    // Lay out the virtual bases.  The MS ABI uses a different
    // algorithm here due to the lack of primary virtual bases.
    if (Types.getTarget().getCXXABI().hasPrimaryVBases()) {
      RD->getIndirectPrimaryBases(IndirectPrimaryBases);
      if (Layout.isPrimaryBaseVirtual())
        IndirectPrimaryBases.insert(Layout.getPrimaryBase());

      if (!LayoutVirtualBases(RD, Layout))
        return false;
    } else {
      if (!MSLayoutVirtualBases(RD, Layout))
        return false;
    }
  }
  
  // Append tail padding if necessary.
  AppendTailPadding(Layout.getSize());

  return true;
}
示例#16
0
bool TransferFunctions::checkImageAccess(Expr *E, MemoryAccess curMemAcc) {
  // discard implicit casts and paren expressions
  E = E->IgnoreParenImpCasts();

  // match Image(), Accessor(), Mask(), and Domain() calls
  if (isa<CXXOperatorCallExpr>(E)) {
    CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(E);

    if (isa<MemberExpr>(COCE->getArg(0))) {
      MemberExpr *ME = dyn_cast<MemberExpr>(COCE->getArg(0));

      if (isa<FieldDecl>(ME->getMemberDecl())) {
        FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
        MemoryAccess memAcc = KS.imagesToAccess[FD];
        MemoryAccessDetail memAccDetail = KS.imagesToAccessDetail[FD];

        memAcc = (MemoryAccess) (memAcc|curMemAcc);
        KS.imagesToAccess[FD] = memAcc;

        // access to Image
        if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
              KS.compilerClasses.Image)) {
          KS.Diags.Report(E->getLocStart(), KS.DiagIDImageAccess) <<
            FD->getNameAsString();

          exit(EXIT_FAILURE);
        }

        // access to Accessor
        if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
              KS.compilerClasses.Accessor)) {
          if (curMemAcc & READ_ONLY) KS.num_img_loads++;
          if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;

          switch (COCE->getNumArgs()) {
            default:
              break;
            case 1:
              memAccDetail = (MemoryAccessDetail) (memAccDetail|NO_STRIDE);
              if (KS.kernelType < PointOperator) KS.kernelType = PointOperator;
              break;
            case 2:
              // TODO: check for Mask or Domain as parameter and check if we
              // need only STRIDE_X or STRIDE_Y
              memAccDetail = (MemoryAccessDetail) (memAccDetail|STRIDE_XY);
              if (KS.kernelType < LocalOperator) KS.kernelType = LocalOperator;
              break;
            case 3:
              memAccDetail = (MemoryAccessDetail)
                (memAccDetail|checkStride(COCE->getArg(1), COCE->getArg(2)));
              if (memAccDetail > NO_STRIDE && KS.kernelType < LocalOperator) {
                KS.kernelType = LocalOperator;
              }
              break;
          }
          KS.imagesToAccessDetail[FD] = memAccDetail;

          return true;
        }

        // access to Mask
        if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
              KS.compilerClasses.Mask)) {
          if (curMemAcc & READ_ONLY) KS.num_mask_loads++;
          if (curMemAcc & WRITE_ONLY) KS.num_mask_stores++;

          if (KS.inLambdaFunction) {
            // TODO: check for Mask as parameter and check if we need only
            // STRIDE_X or STRIDE_Y
            memAccDetail = (MemoryAccessDetail) (memAccDetail|STRIDE_XY);
            if (KS.kernelType < LocalOperator) KS.kernelType = LocalOperator;
          } else {
            assert(COCE->getNumArgs()==3 &&
                "Mask access requires x and y parameters!");
            memAccDetail = (MemoryAccessDetail)
              (memAccDetail|checkStride(COCE->getArg(1), COCE->getArg(2)));
            if (memAccDetail > NO_STRIDE && KS.kernelType < LocalOperator) {
              KS.kernelType = LocalOperator;
            }
          }
          KS.imagesToAccessDetail[FD] = memAccDetail;

          return false;
        }

        // access to Domain
        if (KS.compilerClasses.isTypeOfClass(FD->getType(),
              KS.compilerClasses.Domain)) {
          if (curMemAcc & READ_ONLY) KS.num_mask_loads++;
          if (curMemAcc & WRITE_ONLY) KS.num_mask_stores++;

          if (KS.inLambdaFunction) {
            // TODO: check for Domain as parameter and check if we need only
            // STRIDE_X or STRIDE_Y
            memAccDetail = (MemoryAccessDetail) (memAccDetail|STRIDE_XY);
            if (KS.kernelType < LocalOperator) KS.kernelType = LocalOperator;
          } else {
            assert(COCE->getNumArgs()==3 &&
                "Domain access requires x and y parameters!");
            memAccDetail = (MemoryAccessDetail)
              (memAccDetail|checkStride(COCE->getArg(1), COCE->getArg(2)));
            if (memAccDetail > NO_STRIDE && KS.kernelType < LocalOperator) {
              KS.kernelType = LocalOperator;
            }
          }
          KS.imagesToAccessDetail[FD] = memAccDetail;

          return false;
        }
      }
    }
  }

  // match Image->getPixel(), output(), and outputAtPixel() calls
  if (isa<CXXMemberCallExpr>(E)) {
    CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(E);

    if (isa<MemberExpr>(CMCE->getCallee())) {
      MemberExpr *ME = dyn_cast<MemberExpr>(CMCE->getCallee());

      if (isa<MemberExpr>(ME->getBase())) {
        MemberExpr *MEAcc = dyn_cast<MemberExpr>(ME->getBase());

        if (isa<FieldDecl>(MEAcc->getMemberDecl())) {
          FieldDecl *FD = dyn_cast<FieldDecl>(MEAcc->getMemberDecl());

          // Image
          if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
                KS.compilerClasses.Image)) {
            KS.Diags.Report(E->getLocStart(), KS.DiagIDImageAccess) <<
              FD->getNameAsString();

            exit(EXIT_FAILURE);
          }

          // Accessor
          if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
                KS.compilerClasses.Accessor)) {
            // Accessor->getPixel()
            if (ME->getMemberNameInfo().getAsString()=="getPixel") {
              MemoryAccess memAcc = KS.imagesToAccess[FD];
              MemoryAccessDetail memAccDetail = KS.imagesToAccessDetail[FD];

              memAcc = (MemoryAccess) (memAcc|curMemAcc);
              KS.imagesToAccess[FD] = memAcc;

              memAccDetail = (MemoryAccessDetail) (memAccDetail|USER_XY);
              KS.imagesToAccessDetail[FD] = memAccDetail;
              KS.kernelType = UserOperator;

              if (curMemAcc & READ_ONLY) KS.num_img_loads++;
              if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;

              return true;
            }
          }
        }
      }

      // output()
      if (ME->getMemberNameInfo().getAsString()=="output") {
        if (curMemAcc & READ_ONLY) KS.num_img_loads++;
        if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;
        MemoryAccessDetail cur = KS.outputAccessDetail;
        KS.outputAccessDetail = (MemoryAccessDetail)(cur|NO_STRIDE);
        if (KS.kernelType < PointOperator) KS.kernelType = PointOperator;

        return true;
      }

      // outputAtPixel()
      if (ME->getMemberNameInfo().getAsString()=="outputAtPixel") {
        if (curMemAcc & READ_ONLY) KS.num_img_loads++;
        if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;
        MemoryAccessDetail cur = KS.outputAccessDetail;
        KS.outputAccessDetail = (MemoryAccessDetail)(cur|USER_XY);
        KS.kernelType = UserOperator;

        return true;
      }
    }
  }

  return false;
}
/// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
                                            unsigned &FirstFieldNo,
                                            RecordDecl::field_iterator &FI,
                                            RecordDecl::field_iterator FE) {
  assert(FI != FE);
  uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
  uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);

  unsigned CharAlign = Types.getTarget().getCharAlign();
  assert(FirstFieldOffset % CharAlign == 0 &&
         "First field offset is misaligned");
  CharUnits FirstFieldOffsetInBytes
    = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

  unsigned StorageAlignment
    = llvm::MinAlign(Alignment.getQuantity(),
                     FirstFieldOffsetInBytes.getQuantity());

  if (FirstFieldOffset < NextFieldOffsetInBits) {
    CharUnits FieldOffsetInCharUnits =
      Types.getContext().toCharUnitsFromBits(FirstFieldOffset);

    // Try to resize the last base field.
    if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
      llvm_unreachable("We must be able to resize the last base if we need to "
                       "pack bits into it.");

    NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
    assert(FirstFieldOffset >= NextFieldOffsetInBits);
  }

  // Append padding if necessary.
  AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
                CharUnits::One());

  // Find the last bitfield in a contiguous run of bitfields.
  RecordDecl::field_iterator BFI = FI;
  unsigned LastFieldNo = FirstFieldNo;
  uint64_t NextContiguousFieldOffset = FirstFieldOffset;
  for (RecordDecl::field_iterator FJ = FI;
       (FJ != FE && (*FJ)->isBitField() &&
        NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
        (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
    NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
    ++LastFieldNo;

    // We must use packed structs for packed fields, and also unnamed bit
    // fields since they don't affect the struct alignment.
    if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
      return false;
  }
  RecordDecl::field_iterator BFE = llvm::next(FI);
  --LastFieldNo;
  assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
  FieldDecl *LastFD = *FI;

  // Find the last bitfield's offset, add its size, and round it up to the
  // character alignment to compute the storage required.
  uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
  uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
  uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
  CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
    llvm::RoundUpToAlignment(TotalBits, CharAlign));
  uint64_t StorageBits = Types.getContext().toBits(StorageBytes);

  // Grow the storage to encompass any known padding in the layout when doing
  // so will make the storage a power-of-two. There are two cases when we can
  // do this. The first is when we have a subsequent field and can widen up to
  // its offset. The second is when the data size of the AST record layout is
  // past the end of the current storage. The latter is true when there is tail
  // padding on a struct and no members of a super class can be packed into it.
  //
  // Note that we widen the storage as much as possible here to express the
  // maximum latitude the language provides, and rely on the backend to lower
  // these in conjunction with shifts and masks to narrower operations where
  // beneficial.
  uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
  if (BFE != FE)
    // If there are more fields to be laid out, the offset at the end of the
    // bitfield is the offset of the next field in the record.
    EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
  assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
         "End offset is not past the end of the known storage bits.");
  uint64_t SpaceBits = EndOffset - FirstFieldOffset;
  uint64_t LongBits = Types.getTarget().getLongWidth();
  uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
                         llvm::NextPowerOf2(StorageBits % LongBits - 1);
  assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
  if (WidenedBits <= SpaceBits) {
    StorageBits = WidenedBits;
    StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
    assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
  }

  unsigned FieldIndex = FieldTypes.size();
  AppendBytes(StorageBytes);

  // Now walk the bitfields associating them with this field of storage and
  // building up the bitfield specific info.
  unsigned FieldNo = FirstFieldNo;
  for (; BFI != BFE; ++BFI, ++FieldNo) {
    FieldDecl *FD = *BFI;
    uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
    uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
    Fields[FD] = FieldIndex;
    BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
                                             StorageBits, StorageAlignment);
  }
  FirstFieldNo = LastFieldNo;
  return true;
}
void BlinkGCPluginConsumer::NotePartObjectContainsGCRoot(FieldPoint* point) {
  FieldDecl* field = point->field();
  ReportDiagnostic(field->getLocStart(),
                   diag_part_object_contains_gc_root_note_)
      << field << field->getParent();
}
示例#19
0
文件: CGExprAgg.cpp 项目: CPFL/guc
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
  // (Length of globals? Chunks of zeroed-out space?).
  //
  // If we can, prefer a copy from a global; this is a lot less code for long
  // globals, and it's easier for the current optimizers to analyze.
  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
    llvm::GlobalVariable* GV =
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
                             llvm::GlobalValue::InternalLinkage, C, "");
    EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
    return;
  }
#endif
  if (E->hadArrayRangeDesignator()) {
    CGF.ErrorUnsupported(E, "GNU array range designator extension");
  }

  // Handle initialization of an array.
  if (E->getType()->isArrayType()) {
    const llvm::PointerType *APType =
      cast<llvm::PointerType>(DestPtr->getType());
    const llvm::ArrayType *AType =
      cast<llvm::ArrayType>(APType->getElementType());

    uint64_t NumInitElements = E->getNumInits();

    if (E->getNumInits() > 0) {
      QualType T1 = E->getType();
      QualType T2 = E->getInit(0)->getType();
      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
        EmitAggLoadOfLValue(E->getInit(0));
        return;
      }
    }

    uint64_t NumArrayElements = AType->getNumElements();
    QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
    ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();

    // FIXME: were we intentionally ignoring address spaces and GC attributes?

    for (uint64_t i = 0; i != NumArrayElements; ++i) {
      llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
      LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
      if (i < NumInitElements)
        EmitInitializationToLValue(E->getInit(i), LV, ElementType);

      else
        EmitNullInitializationToLValue(LV, ElementType);
    }
    return;
  }

  assert(E->getType()->isRecordType() && "Only support structs/unions here!");

  // Do struct initialization; this code just sets each individual member
  // to the approprate value.  This makes bitfield support automatic;
  // the disadvantage is that the generated code is more difficult for
  // the optimizer, especially with bitfields.
  unsigned NumInitElements = E->getNumInits();
  RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
  
  // If we're initializing the whole aggregate, just do it in place.
  // FIXME: This is a hack around an AST bug (PR6537).
  if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
    EmitInitializationToLValue(E->getInit(0),
                               CGF.MakeAddrLValue(DestPtr, E->getType()),
                               E->getType());
    return;
  }
  
  
  if (E->getType()->isUnionType()) {
    // Only initialize one field of a union. The field itself is
    // specified by the initializer list.
    if (!E->getInitializedFieldInUnion()) {
      // Empty union; we have nothing to do.

#ifndef NDEBUG
      // Make sure that it's really an empty and not a failure of
      // semantic analysis.
      for (RecordDecl::field_iterator Field = SD->field_begin(),
                                   FieldEnd = SD->field_end();
           Field != FieldEnd; ++Field)
        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
      return;
    }

    // FIXME: volatility
    FieldDecl *Field = E->getInitializedFieldInUnion();
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);

    if (NumInitElements) {
      // Store the initializer into the field
      EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
    } else {
      // Default-initialize to null
      EmitNullInitializationToLValue(FieldLoc, Field->getType());
    }

    return;
  }

  // Here we iterate over the fields; this makes it simpler to both
  // default-initialize fields and skip over unnamed fields.
  unsigned CurInitVal = 0;
  for (RecordDecl::field_iterator Field = SD->field_begin(),
                               FieldEnd = SD->field_end();
       Field != FieldEnd; ++Field) {
    // We're done once we hit the flexible array member
    if (Field->getType()->isIncompleteArrayType())
      break;

    if (Field->isUnnamedBitfield())
      continue;

    // FIXME: volatility
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
    // We never generate write-barries for initialized fields.
    FieldLoc.setNonGC(true);
    if (CurInitVal < NumInitElements) {
      // Store the initializer into the field.
      EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
                                 Field->getType());
    } else {
      // We're out of initalizers; default-initialize to null
      EmitNullInitializationToLValue(FieldLoc, Field->getType());
    }
  }
}
示例#20
0
/// CreateType - get structure or union type.
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
                                     llvm::DICompileUnit Unit) {
  RecordDecl *Decl = Ty->getDecl();
  
  unsigned Tag;
  if (Decl->isStruct())
    Tag = llvm::dwarf::DW_TAG_structure_type;
  else if (Decl->isUnion())
    Tag = llvm::dwarf::DW_TAG_union_type;
  else {
    assert(Decl->isClass() && "Unknown RecordType!");
    Tag = llvm::dwarf::DW_TAG_class_type;
  }

  SourceManager &SM = M->getContext().getSourceManager();

  // Get overall information about the record type for the debug info.
  std::string Name = Decl->getNameAsString();

  llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(Decl->getLocation());
  unsigned Line = SM.getInstantiationLineNumber(Decl->getLocation());
  
  
  // Records and classes and unions can all be recursive.  To handle them, we
  // first generate a debug descriptor for the struct as a forward declaration.
  // Then (if it is a definition) we go through and get debug info for all of
  // its members.  Finally, we create a descriptor for the complete type (which
  // may refer to the forward decl if the struct is recursive) and replace all
  // uses of the forward declaration with the final definition.
  llvm::DIType FwdDecl =
    DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
                                     llvm::DIType(), llvm::DIArray());
  
  // If this is just a forward declaration, return it.
  if (!Decl->getDefinition(M->getContext()))
    return FwdDecl;

  // Otherwise, insert it into the TypeCache so that recursive uses will find
  // it.
  TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;

  // Convert all the elements.
  llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;

  const ASTRecordLayout &RL = M->getContext().getASTRecordLayout(Decl);

  unsigned FieldNo = 0;
  for (RecordDecl::field_iterator I = Decl->field_begin(M->getContext()),
                                  E = Decl->field_end(M->getContext()); 
       I != E; ++I, ++FieldNo) {
    FieldDecl *Field = *I;
    llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);

    std::string FieldName = Field->getNameAsString();

    // Get the location for the field.
    SourceLocation FieldDefLoc = Field->getLocation();
    llvm::DICompileUnit FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
    unsigned FieldLine = SM.getInstantiationLineNumber(FieldDefLoc);

    QualType FType = Field->getType();
    uint64_t FieldSize = 0;
    unsigned FieldAlign = 0;
    if (!FType->isIncompleteArrayType()) {
    
      // Bit size, align and offset of the type.
      FieldSize = M->getContext().getTypeSize(FType);
      Expr *BitWidth = Field->getBitWidth();
      if (BitWidth)
        FieldSize = 
          BitWidth->getIntegerConstantExprValue(M->getContext()).getZExtValue();
      
      FieldAlign =  M->getContext().getTypeAlign(FType);
    }

    uint64_t FieldOffset = RL.getFieldOffset(FieldNo);    
    
    // Create a DW_TAG_member node to remember the offset of this field in the
    // struct.  FIXME: This is an absolutely insane way to capture this
    // information.  When we gut debug info, this should be fixed.
    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
                                             FieldName, FieldDefUnit,
                                             FieldLine, FieldSize, FieldAlign,
                                             FieldOffset, 0, FieldTy);
    EltTys.push_back(FieldTy);
  }
  
  llvm::DIArray Elements =
    DebugFactory.GetOrCreateArray(&EltTys[0], EltTys.size());

  // Bit size, align and offset of the type.
  uint64_t Size = M->getContext().getTypeSize(Ty);
  uint64_t Align = M->getContext().getTypeAlign(Ty);
  
  llvm::DIType RealDecl =
    DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
                                     Align, 0, 0, llvm::DIType(), Elements);

  // Now that we have a real decl for the struct, replace anything using the
  // old decl with the new one.  This will recursively update the debug info.
  FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
  FwdDecl.getGV()->eraseFromParent();
  
  return RealDecl;
}
示例#21
0
void DeclContextPrinter::PrintDeclContext(const DeclContext* DC, 
                                          unsigned Indentation) {
  // Print DeclContext name.
  switch (DC->getDeclKind()) {
  case Decl::TranslationUnit:
    Out << "[translation unit] " << DC;
    break;
  case Decl::Namespace: {
    Out << "[namespace] ";
    const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
    Out << ND->getNameAsString();
    break;
  }
  case Decl::Enum: {
    const EnumDecl* ED = cast<EnumDecl>(DC);
    if (ED->isDefinition())
      Out << "[enum] ";
    else
      Out << "<enum> ";
    Out << ED->getNameAsString();
    break;
  }
  case Decl::Record: {
    const RecordDecl* RD = cast<RecordDecl>(DC);
    if (RD->isDefinition())
      Out << "[struct] ";
    else
      Out << "<struct> ";
    Out << RD->getNameAsString();
    break;
  }
  case Decl::CXXRecord: {
    const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
    if (RD->isDefinition())
      Out << "[class] ";
    else
      Out << "<class> ";
    Out << RD->getNameAsString() << " " << DC;
    break;
  }
  case Decl::ObjCMethod:
    Out << "[objc method]";
    break;
  case Decl::ObjCInterface:
    Out << "[objc interface]";
    break;
  case Decl::ObjCCategory:
    Out << "[objc category]";
    break;
  case Decl::ObjCProtocol:
    Out << "[objc protocol]";
    break;
  case Decl::ObjCImplementation:
    Out << "[objc implementation]";
    break;
  case Decl::ObjCCategoryImpl:
    Out << "[objc categoryimpl]";
    break;
  case Decl::LinkageSpec:
    Out << "[linkage spec]";
    break;
  case Decl::Block:
    Out << "[block]";
    break;
  case Decl::Function: {
    const FunctionDecl* FD = cast<FunctionDecl>(DC);
    if (FD->isThisDeclarationADefinition())
      Out << "[function] ";
    else
      Out << "<function> ";
    Out << FD->getNameAsString();
    // Print the parameters.
    Out << "(";
    bool PrintComma = false;
    for (FunctionDecl::param_const_iterator I = FD->param_begin(), 
           E = FD->param_end(); I != E; ++I) {
      if (PrintComma)
        Out << ", ";
      else
        PrintComma = true;
      Out << (*I)->getNameAsString();
    }
    Out << ")";
    break;
  }
  case Decl::CXXMethod: {
    const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
    if (D->isOutOfLineDefinition())
      Out << "[c++ method] ";
    else if (D->isImplicit())
      Out << "(c++ method) ";
    else
      Out << "<c++ method> ";
    Out << D->getNameAsString();
    // Print the parameters.
    Out << "(";
    bool PrintComma = false;
    for (FunctionDecl::param_const_iterator I = D->param_begin(), 
           E = D->param_end(); I != E; ++I) {
      if (PrintComma)
        Out << ", ";
      else
        PrintComma = true;
      Out << (*I)->getNameAsString();
    }
    Out << ")";

    // Check the semantic DeclContext.
    const DeclContext* SemaDC = D->getDeclContext();
    const DeclContext* LexicalDC = D->getLexicalDeclContext();
    if (SemaDC != LexicalDC)
      Out << " [[" << SemaDC << "]]";

    break;
  }
  case Decl::CXXConstructor: {
    const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
    if (D->isOutOfLineDefinition())
      Out << "[c++ ctor] ";
    else if (D->isImplicit())
      Out << "(c++ ctor) ";
    else
      Out << "<c++ ctor> ";
    Out << D->getNameAsString();
    // Print the parameters.
    Out << "(";
    bool PrintComma = false;
    for (FunctionDecl::param_const_iterator I = D->param_begin(), 
           E = D->param_end(); I != E; ++I) {
      if (PrintComma)
        Out << ", ";
      else
        PrintComma = true;
      Out << (*I)->getNameAsString();
    }
    Out << ")";

    // Check the semantic DC.
    const DeclContext* SemaDC = D->getDeclContext();
    const DeclContext* LexicalDC = D->getLexicalDeclContext();
    if (SemaDC != LexicalDC)
      Out << " [[" << SemaDC << "]]";
    break;
  }
  case Decl::CXXDestructor: {
    const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
    if (D->isOutOfLineDefinition())
      Out << "[c++ dtor] ";
    else if (D->isImplicit())
      Out << "(c++ dtor) ";
    else
      Out << "<c++ dtor> ";
    Out << D->getNameAsString();
    // Check the semantic DC.
    const DeclContext* SemaDC = D->getDeclContext();
    const DeclContext* LexicalDC = D->getLexicalDeclContext();
    if (SemaDC != LexicalDC)
      Out << " [[" << SemaDC << "]]";
    break;
  }
  case Decl::CXXConversion: {
    const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
    if (D->isOutOfLineDefinition())
      Out << "[c++ conversion] ";
    else if (D->isImplicit())
      Out << "(c++ conversion) ";
    else
      Out << "<c++ conversion> ";
    Out << D->getNameAsString();
    // Check the semantic DC.
    const DeclContext* SemaDC = D->getDeclContext();
    const DeclContext* LexicalDC = D->getLexicalDeclContext();
    if (SemaDC != LexicalDC)
      Out << " [[" << SemaDC << "]]";
    break;
  }

  default:
    assert(0 && "a decl that inherits DeclContext isn't handled");
  }

  Out << "\n";

  // Print decls in the DeclContext.
  // FIXME: Should not use a NULL DeclContext!
  ASTContext *Context = 0;
  for (DeclContext::decl_iterator I = DC->decls_begin(*Context), 
         E = DC->decls_end(*Context);
       I != E; ++I) {
    for (unsigned i = 0; i < Indentation; ++i)
      Out << "  ";

    Decl::Kind DK = I->getKind();
    switch (DK) {
    case Decl::Namespace:
    case Decl::Enum:
    case Decl::Record:
    case Decl::CXXRecord:
    case Decl::ObjCMethod:
    case Decl::ObjCInterface:
    case Decl::ObjCCategory: 
    case Decl::ObjCProtocol:
    case Decl::ObjCImplementation:
    case Decl::ObjCCategoryImpl:
    case Decl::LinkageSpec:
    case Decl::Block:
    case Decl::Function:
    case Decl::CXXMethod:
    case Decl::CXXConstructor:
    case Decl::CXXDestructor:
    case Decl::CXXConversion:
    {
      DeclContext* DC = cast<DeclContext>(*I);
      PrintDeclContext(DC, Indentation+2);
      break;
    }
    case Decl::Field: {
      FieldDecl* FD = cast<FieldDecl>(*I);
      Out << "<field> " << FD->getNameAsString() << "\n";
      break;
    }
    case Decl::Typedef: {
      TypedefDecl* TD = cast<TypedefDecl>(*I);
      Out << "<typedef> " << TD->getNameAsString() << "\n";
      break;
    }
    case Decl::EnumConstant: {
      EnumConstantDecl* ECD = cast<EnumConstantDecl>(*I);
      Out << "<enum constant> " << ECD->getNameAsString() << "\n";
      break;
    }
    case Decl::Var: {
      VarDecl* VD = cast<VarDecl>(*I);
      Out << "<var> " << VD->getNameAsString() << "\n";
      break;
    }
    case Decl::ImplicitParam: {
      ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(*I);
      Out << "<implicit parameter> " << IPD->getNameAsString() << "\n";
      break;
    }
    case Decl::ParmVar: {
      ParmVarDecl* PVD = cast<ParmVarDecl>(*I);
      Out << "<parameter> " << PVD->getNameAsString() << "\n";
      break;
    }
    case Decl::OriginalParmVar: {
      OriginalParmVarDecl* OPVD = cast<OriginalParmVarDecl>(*I);
      Out << "<original parameter> " << OPVD->getNameAsString() << "\n";
      break;
    }
    case Decl::ObjCProperty: {
      ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(*I);
      Out << "<objc property> " << OPD->getNameAsString() << "\n";
      break;
    }
    default:
      fprintf(stderr, "DeclKind: %d \"%s\"\n", DK, I->getDeclKindName());
      assert(0 && "decl unhandled");
    }
  }
}