示例#1
0
RecordInfo::Fields* RecordInfo::CollectFields() {
  // Compute the collection locally to avoid inconsistent states.
  Fields* fields = new Fields;
  if (!record_->hasDefinition())
    return fields;
  TracingStatus fields_status = TracingStatus::Unneeded();
  for (RecordDecl::field_iterator it = record_->field_begin();
       it != record_->field_end();
       ++it) {
    FieldDecl* field = *it;
    // Ignore fields annotated with the GC_PLUGIN_IGNORE macro.
    if (Config::IsIgnoreAnnotated(field))
      continue;
    // Check if the unexpanded type should be recorded; needed
    // to track iterator aliases only
    const Type* unexpandedType = field->getType().getSplitUnqualifiedType().Ty;
    Edge* edge = CreateEdgeFromOriginalType(unexpandedType);
    if (!edge)
      edge = CreateEdge(field->getType().getTypePtrOrNull());
    if (edge) {
      fields_status = fields_status.LUB(edge->NeedsTracing(Edge::kRecursive));
      fields->insert(std::make_pair(field, FieldPoint(field, edge)));
    }
  }
  fields_need_tracing_ = fields_status;
  return fields;
}
示例#2
0
void
CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
  const RecordType *RT = Ty->getAsStructureType();
  assert(RT && "Can only expand structure types.");

  RecordDecl *RD = RT->getDecl();
  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
  llvm::Value *Addr = RV.getAggregateAddr();
  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
         i != e; ++i) {
    FieldDecl *FD = *i;
    QualType FT = FD->getType();

    // FIXME: What are the right qualifiers here?
    LValue LV = EmitLValueForField(Addr, FD, false, 0);
    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
    } else {
      RValue RV = EmitLoadOfLValue(LV, FT);
      assert(RV.isScalar() &&
             "Unexpected non-scalar rvalue during struct expansion.");
      Args.push_back(RV.getScalarVal());
    }
  }
}
示例#3
0
llvm::Function::arg_iterator
CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
                                    llvm::Function::arg_iterator AI) {
  const RecordType *RT = Ty->getAsStructureType();
  assert(RT && "Can only expand structure types.");

  RecordDecl *RD = RT->getDecl();
  assert(LV.isSimple() &&
         "Unexpected non-simple lvalue during struct expansion.");
  llvm::Value *Addr = LV.getAddress();
  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
         i != e; ++i) {
    FieldDecl *FD = *i;
    QualType FT = FD->getType();

    // FIXME: What are the right qualifiers here?
    LValue LV = EmitLValueForField(Addr, FD, false, 0);
    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
      AI = ExpandTypeFromArgs(FT, LV, AI);
    } else {
      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
      ++AI;
    }
  }

  return AI;
}
RecordInfo::Fields* RecordInfo::CollectFields() {
  // Compute the collection locally to avoid inconsistent states.
  Fields* fields = new Fields;
  if (!record_->hasDefinition())
    return fields;
  TracingStatus fields_status = TracingStatus::Unneeded();
  for (RecordDecl::field_iterator it = record_->field_begin();
       it != record_->field_end();
       ++it) {
    FieldDecl* field = *it;
    // Ignore fields annotated with the GC_PLUGIN_IGNORE macro.
    if (Config::IsIgnoreAnnotated(field))
      continue;
    if (Edge* edge = CreateEdge(field->getType().getTypePtrOrNull())) {
      fields_status = fields_status.LUB(edge->NeedsTracing(Edge::kRecursive));
      fields->insert(std::make_pair(field, FieldPoint(field, edge)));
    }
  }
  fields_need_tracing_ = fields_status;
  return fields;
}
示例#5
0
bool ATSCollectionVisitor::VisitMemberExpr(MemberExpr *ME)
{
  ValueDecl *OrigDecl = ME->getMemberDecl();
  FieldDecl *FD = dyn_cast<FieldDecl>(OrigDecl);

  if (!FD) {
    // in C++, getMemberDecl returns a CXXMethodDecl.
    if (TransformationManager::isCXXLangOpt())
      return true;
    TransAssert(0 && "Bad FD!\n");
  }

  const Type *T = FD->getType().getTypePtr();
  if (!T->isScalarType())
    return true;

  RecordDecl *RD = FD->getParent();
  TransAssert(RD && "NULL RecordDecl!");
  if (!RD->isStruct() && !RD->isUnion())
    return true;

  ConsumerInstance->addOneExpr(ME);
  return true;
}
示例#6
0
文件: CGExprAgg.cpp 项目: CPFL/guc
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
  // (Length of globals? Chunks of zeroed-out space?).
  //
  // If we can, prefer a copy from a global; this is a lot less code for long
  // globals, and it's easier for the current optimizers to analyze.
  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
    llvm::GlobalVariable* GV =
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
                             llvm::GlobalValue::InternalLinkage, C, "");
    EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
    return;
  }
#endif
  if (E->hadArrayRangeDesignator()) {
    CGF.ErrorUnsupported(E, "GNU array range designator extension");
  }

  // Handle initialization of an array.
  if (E->getType()->isArrayType()) {
    const llvm::PointerType *APType =
      cast<llvm::PointerType>(DestPtr->getType());
    const llvm::ArrayType *AType =
      cast<llvm::ArrayType>(APType->getElementType());

    uint64_t NumInitElements = E->getNumInits();

    if (E->getNumInits() > 0) {
      QualType T1 = E->getType();
      QualType T2 = E->getInit(0)->getType();
      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
        EmitAggLoadOfLValue(E->getInit(0));
        return;
      }
    }

    uint64_t NumArrayElements = AType->getNumElements();
    QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
    ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();

    // FIXME: were we intentionally ignoring address spaces and GC attributes?

    for (uint64_t i = 0; i != NumArrayElements; ++i) {
      llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
      LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
      if (i < NumInitElements)
        EmitInitializationToLValue(E->getInit(i), LV, ElementType);

      else
        EmitNullInitializationToLValue(LV, ElementType);
    }
    return;
  }

  assert(E->getType()->isRecordType() && "Only support structs/unions here!");

  // Do struct initialization; this code just sets each individual member
  // to the approprate value.  This makes bitfield support automatic;
  // the disadvantage is that the generated code is more difficult for
  // the optimizer, especially with bitfields.
  unsigned NumInitElements = E->getNumInits();
  RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
  
  // If we're initializing the whole aggregate, just do it in place.
  // FIXME: This is a hack around an AST bug (PR6537).
  if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
    EmitInitializationToLValue(E->getInit(0),
                               CGF.MakeAddrLValue(DestPtr, E->getType()),
                               E->getType());
    return;
  }
  
  
  if (E->getType()->isUnionType()) {
    // Only initialize one field of a union. The field itself is
    // specified by the initializer list.
    if (!E->getInitializedFieldInUnion()) {
      // Empty union; we have nothing to do.

#ifndef NDEBUG
      // Make sure that it's really an empty and not a failure of
      // semantic analysis.
      for (RecordDecl::field_iterator Field = SD->field_begin(),
                                   FieldEnd = SD->field_end();
           Field != FieldEnd; ++Field)
        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
      return;
    }

    // FIXME: volatility
    FieldDecl *Field = E->getInitializedFieldInUnion();
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);

    if (NumInitElements) {
      // Store the initializer into the field
      EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
    } else {
      // Default-initialize to null
      EmitNullInitializationToLValue(FieldLoc, Field->getType());
    }

    return;
  }

  // Here we iterate over the fields; this makes it simpler to both
  // default-initialize fields and skip over unnamed fields.
  unsigned CurInitVal = 0;
  for (RecordDecl::field_iterator Field = SD->field_begin(),
                               FieldEnd = SD->field_end();
       Field != FieldEnd; ++Field) {
    // We're done once we hit the flexible array member
    if (Field->getType()->isIncompleteArrayType())
      break;

    if (Field->isUnnamedBitfield())
      continue;

    // FIXME: volatility
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
    // We never generate write-barries for initialized fields.
    FieldLoc.setNonGC(true);
    if (CurInitVal < NumInitElements) {
      // Store the initializer into the field.
      EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
                                 Field->getType());
    } else {
      // We're out of initalizers; default-initialize to null
      EmitNullInitializationToLValue(FieldLoc, Field->getType());
    }
  }
}
示例#7
0
/// CreateType - get structure or union type.
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
                                     llvm::DICompileUnit Unit) {
  RecordDecl *Decl = Ty->getDecl();
  
  unsigned Tag;
  if (Decl->isStruct())
    Tag = llvm::dwarf::DW_TAG_structure_type;
  else if (Decl->isUnion())
    Tag = llvm::dwarf::DW_TAG_union_type;
  else {
    assert(Decl->isClass() && "Unknown RecordType!");
    Tag = llvm::dwarf::DW_TAG_class_type;
  }

  SourceManager &SM = M->getContext().getSourceManager();

  // Get overall information about the record type for the debug info.
  std::string Name = Decl->getNameAsString();

  llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(Decl->getLocation());
  unsigned Line = SM.getInstantiationLineNumber(Decl->getLocation());
  
  
  // Records and classes and unions can all be recursive.  To handle them, we
  // first generate a debug descriptor for the struct as a forward declaration.
  // Then (if it is a definition) we go through and get debug info for all of
  // its members.  Finally, we create a descriptor for the complete type (which
  // may refer to the forward decl if the struct is recursive) and replace all
  // uses of the forward declaration with the final definition.
  llvm::DIType FwdDecl =
    DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
                                     llvm::DIType(), llvm::DIArray());
  
  // If this is just a forward declaration, return it.
  if (!Decl->getDefinition(M->getContext()))
    return FwdDecl;

  // Otherwise, insert it into the TypeCache so that recursive uses will find
  // it.
  TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;

  // Convert all the elements.
  llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;

  const ASTRecordLayout &RL = M->getContext().getASTRecordLayout(Decl);

  unsigned FieldNo = 0;
  for (RecordDecl::field_iterator I = Decl->field_begin(M->getContext()),
                                  E = Decl->field_end(M->getContext()); 
       I != E; ++I, ++FieldNo) {
    FieldDecl *Field = *I;
    llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);

    std::string FieldName = Field->getNameAsString();

    // Get the location for the field.
    SourceLocation FieldDefLoc = Field->getLocation();
    llvm::DICompileUnit FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
    unsigned FieldLine = SM.getInstantiationLineNumber(FieldDefLoc);

    QualType FType = Field->getType();
    uint64_t FieldSize = 0;
    unsigned FieldAlign = 0;
    if (!FType->isIncompleteArrayType()) {
    
      // Bit size, align and offset of the type.
      FieldSize = M->getContext().getTypeSize(FType);
      Expr *BitWidth = Field->getBitWidth();
      if (BitWidth)
        FieldSize = 
          BitWidth->getIntegerConstantExprValue(M->getContext()).getZExtValue();
      
      FieldAlign =  M->getContext().getTypeAlign(FType);
    }

    uint64_t FieldOffset = RL.getFieldOffset(FieldNo);    
    
    // Create a DW_TAG_member node to remember the offset of this field in the
    // struct.  FIXME: This is an absolutely insane way to capture this
    // information.  When we gut debug info, this should be fixed.
    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
                                             FieldName, FieldDefUnit,
                                             FieldLine, FieldSize, FieldAlign,
                                             FieldOffset, 0, FieldTy);
    EltTys.push_back(FieldTy);
  }
  
  llvm::DIArray Elements =
    DebugFactory.GetOrCreateArray(&EltTys[0], EltTys.size());

  // Bit size, align and offset of the type.
  uint64_t Size = M->getContext().getTypeSize(Ty);
  uint64_t Align = M->getContext().getTypeAlign(Ty);
  
  llvm::DIType RealDecl =
    DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
                                     Align, 0, 0, llvm::DIType(), Elements);

  // Now that we have a real decl for the struct, replace anything using the
  // old decl with the new one.  This will recursively update the debug info.
  FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
  FwdDecl.getGV()->eraseFromParent();
  
  return RealDecl;
}
示例#8
0
bool TransferFunctions::checkImageAccess(Expr *E, MemoryAccess curMemAcc) {
  // discard implicit casts and paren expressions
  E = E->IgnoreParenImpCasts();

  // match Image(), Accessor(), Mask(), and Domain() calls
  if (isa<CXXOperatorCallExpr>(E)) {
    CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(E);

    if (isa<MemberExpr>(COCE->getArg(0))) {
      MemberExpr *ME = dyn_cast<MemberExpr>(COCE->getArg(0));

      if (isa<FieldDecl>(ME->getMemberDecl())) {
        FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
        MemoryAccess memAcc = KS.imagesToAccess[FD];
        MemoryAccessDetail memAccDetail = KS.imagesToAccessDetail[FD];

        memAcc = (MemoryAccess) (memAcc|curMemAcc);
        KS.imagesToAccess[FD] = memAcc;

        // access to Image
        if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
              KS.compilerClasses.Image)) {
          KS.Diags.Report(E->getLocStart(), KS.DiagIDImageAccess) <<
            FD->getNameAsString();

          exit(EXIT_FAILURE);
        }

        // access to Accessor
        if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
              KS.compilerClasses.Accessor)) {
          if (curMemAcc & READ_ONLY) KS.num_img_loads++;
          if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;

          switch (COCE->getNumArgs()) {
            default:
              break;
            case 1:
              memAccDetail = (MemoryAccessDetail) (memAccDetail|NO_STRIDE);
              if (KS.kernelType < PointOperator) KS.kernelType = PointOperator;
              break;
            case 2:
              // TODO: check for Mask or Domain as parameter and check if we
              // need only STRIDE_X or STRIDE_Y
              memAccDetail = (MemoryAccessDetail) (memAccDetail|STRIDE_XY);
              if (KS.kernelType < LocalOperator) KS.kernelType = LocalOperator;
              break;
            case 3:
              memAccDetail = (MemoryAccessDetail)
                (memAccDetail|checkStride(COCE->getArg(1), COCE->getArg(2)));
              if (memAccDetail > NO_STRIDE && KS.kernelType < LocalOperator) {
                KS.kernelType = LocalOperator;
              }
              break;
          }
          KS.imagesToAccessDetail[FD] = memAccDetail;

          return true;
        }

        // access to Mask
        if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
              KS.compilerClasses.Mask)) {
          if (curMemAcc & READ_ONLY) KS.num_mask_loads++;
          if (curMemAcc & WRITE_ONLY) KS.num_mask_stores++;

          if (KS.inLambdaFunction) {
            // TODO: check for Mask as parameter and check if we need only
            // STRIDE_X or STRIDE_Y
            memAccDetail = (MemoryAccessDetail) (memAccDetail|STRIDE_XY);
            if (KS.kernelType < LocalOperator) KS.kernelType = LocalOperator;
          } else {
            assert(COCE->getNumArgs()==3 &&
                "Mask access requires x and y parameters!");
            memAccDetail = (MemoryAccessDetail)
              (memAccDetail|checkStride(COCE->getArg(1), COCE->getArg(2)));
            if (memAccDetail > NO_STRIDE && KS.kernelType < LocalOperator) {
              KS.kernelType = LocalOperator;
            }
          }
          KS.imagesToAccessDetail[FD] = memAccDetail;

          return false;
        }

        // access to Domain
        if (KS.compilerClasses.isTypeOfClass(FD->getType(),
              KS.compilerClasses.Domain)) {
          if (curMemAcc & READ_ONLY) KS.num_mask_loads++;
          if (curMemAcc & WRITE_ONLY) KS.num_mask_stores++;

          if (KS.inLambdaFunction) {
            // TODO: check for Domain as parameter and check if we need only
            // STRIDE_X or STRIDE_Y
            memAccDetail = (MemoryAccessDetail) (memAccDetail|STRIDE_XY);
            if (KS.kernelType < LocalOperator) KS.kernelType = LocalOperator;
          } else {
            assert(COCE->getNumArgs()==3 &&
                "Domain access requires x and y parameters!");
            memAccDetail = (MemoryAccessDetail)
              (memAccDetail|checkStride(COCE->getArg(1), COCE->getArg(2)));
            if (memAccDetail > NO_STRIDE && KS.kernelType < LocalOperator) {
              KS.kernelType = LocalOperator;
            }
          }
          KS.imagesToAccessDetail[FD] = memAccDetail;

          return false;
        }
      }
    }
  }

  // match Image->getPixel(), output(), and outputAtPixel() calls
  if (isa<CXXMemberCallExpr>(E)) {
    CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(E);

    if (isa<MemberExpr>(CMCE->getCallee())) {
      MemberExpr *ME = dyn_cast<MemberExpr>(CMCE->getCallee());

      if (isa<MemberExpr>(ME->getBase())) {
        MemberExpr *MEAcc = dyn_cast<MemberExpr>(ME->getBase());

        if (isa<FieldDecl>(MEAcc->getMemberDecl())) {
          FieldDecl *FD = dyn_cast<FieldDecl>(MEAcc->getMemberDecl());

          // Image
          if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
                KS.compilerClasses.Image)) {
            KS.Diags.Report(E->getLocStart(), KS.DiagIDImageAccess) <<
              FD->getNameAsString();

            exit(EXIT_FAILURE);
          }

          // Accessor
          if (KS.compilerClasses.isTypeOfTemplateClass(FD->getType(),
                KS.compilerClasses.Accessor)) {
            // Accessor->getPixel()
            if (ME->getMemberNameInfo().getAsString()=="getPixel") {
              MemoryAccess memAcc = KS.imagesToAccess[FD];
              MemoryAccessDetail memAccDetail = KS.imagesToAccessDetail[FD];

              memAcc = (MemoryAccess) (memAcc|curMemAcc);
              KS.imagesToAccess[FD] = memAcc;

              memAccDetail = (MemoryAccessDetail) (memAccDetail|USER_XY);
              KS.imagesToAccessDetail[FD] = memAccDetail;
              KS.kernelType = UserOperator;

              if (curMemAcc & READ_ONLY) KS.num_img_loads++;
              if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;

              return true;
            }
          }
        }
      }

      // output()
      if (ME->getMemberNameInfo().getAsString()=="output") {
        if (curMemAcc & READ_ONLY) KS.num_img_loads++;
        if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;
        MemoryAccessDetail cur = KS.outputAccessDetail;
        KS.outputAccessDetail = (MemoryAccessDetail)(cur|NO_STRIDE);
        if (KS.kernelType < PointOperator) KS.kernelType = PointOperator;

        return true;
      }

      // outputAtPixel()
      if (ME->getMemberNameInfo().getAsString()=="outputAtPixel") {
        if (curMemAcc & READ_ONLY) KS.num_img_loads++;
        if (curMemAcc & WRITE_ONLY) KS.num_img_stores++;
        MemoryAccessDetail cur = KS.outputAccessDetail;
        KS.outputAccessDetail = (MemoryAccessDetail)(cur|USER_XY);
        KS.kernelType = UserOperator;

        return true;
      }
    }
  }

  return false;
}
示例#9
0
/// HandleDeclInMainFile - This is called for each top-level decl defined in the
/// main file of the input.
void RewriteBlocks::HandleDeclInMainFile(Decl *D) {
  if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
    // Since function prototypes don't have ParmDecl's, we check the function
    // prototype. This enables us to rewrite function declarations and
    // definitions using the same code.
    RewriteFunctionProtoType(FD->getType(), FD);
    
    if (CompoundStmt *Body = FD->getBody()) {
      CurFunctionDef = FD;
      FD->setBody(cast_or_null<CompoundStmt>(RewriteFunctionBody(Body)));
      // This synthesizes and inserts the block "impl" struct, invoke function,
      // and any copy/dispose helper functions.
      InsertBlockLiteralsWithinFunction(FD);
      CurFunctionDef = 0;
    } 
    return;
  }
  if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
    RewriteMethodDecl(MD);
    if (Stmt *Body = MD->getBody()) {
      CurMethodDef = MD;
      RewriteFunctionBody(Body);
      InsertBlockLiteralsWithinMethod(MD);
      CurMethodDef = 0;
    }
  }
  if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
    if (isBlockPointerType(VD->getType())) {
      RewriteBlockPointerDecl(VD);
      if (VD->getInit()) {
        if (BlockExpr *CBE = dyn_cast<BlockExpr>(VD->getInit())) {
          RewriteFunctionBody(CBE->getBody());

          // We've just rewritten the block body in place.
          // Now we snarf the rewritten text and stash it away for later use.
          std::string S = Rewrite.getRewritenText(CBE->getSourceRange());
          RewrittenBlockExprs[CBE] = S;
          std::string Init = SynthesizeBlockInitExpr(CBE, VD);
          // Do the rewrite, using S.size() which contains the rewritten size.
          ReplaceText(CBE->getLocStart(), S.size(), Init.c_str(), Init.size());
          SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), 
                                  VD->getNameAsCString());
        } else if (CastExpr *CE = dyn_cast<CastExpr>(VD->getInit())) {
          RewriteCastExpr(CE);
        }
      }
    } else if (VD->getType()->isFunctionPointerType()) {
      CheckFunctionPointerDecl(VD->getType(), VD);
      if (VD->getInit()) {
        if (CastExpr *CE = dyn_cast<CastExpr>(VD->getInit())) {
          RewriteCastExpr(CE);
        }
      }
    }
    return;
  }
  if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
    if (isBlockPointerType(TD->getUnderlyingType()))
      RewriteBlockPointerDecl(TD);
    else if (TD->getUnderlyingType()->isFunctionPointerType()) 
      CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
    return;
  }
  if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
    if (RD->isDefinition()) {
      for (RecordDecl::field_iterator i = RD->field_begin(*Context), 
             e = RD->field_end(*Context); i != e; ++i) {
        FieldDecl *FD = *i;
        if (isBlockPointerType(FD->getType()))
          RewriteBlockPointerDecl(FD);
      }
    }
    return;
  }
}