Example #1
0
File: CGDecl.cpp Project: aaasz/SHP
void CodeGenFunction::EmitDecl(const Decl &D) {
  switch (D.getKind()) {
  default:
    CGM.ErrorUnsupported(&D, "decl");
    return;
  case Decl::ParmVar:
    assert(0 && "Parmdecls should not be in declstmts!");
  case Decl::Function:  // void X();
  case Decl::Record:    // struct/union/class X;
  case Decl::Enum:      // enum X;
  case Decl::EnumConstant: // enum ? { X = ? }
  case Decl::CXXRecord: // struct/union/class X; [C++]
  case Decl::Using:          // using X; [C++]
  case Decl::UsingShadow:
  case Decl::UsingDirective: // using namespace X; [C++]
  case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
    // None of these decls require codegen support.
    return;

  case Decl::Var: {
    const VarDecl &VD = cast<VarDecl>(D);
    assert(VD.isBlockVarDecl() &&
           "Should not see file-scope variables inside a function!");
    return EmitBlockVarDecl(VD);
  }

  case Decl::Typedef: {   // typedef int X;
    const TypedefDecl &TD = cast<TypedefDecl>(D);
    QualType Ty = TD.getUnderlyingType();

    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  }
  }
}
Example #2
0
File: CGDecl.cpp Project: aaasz/SHP
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;
  bool IsSimpleConstantInitializer = false;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      
      // If this value is an array or struct, is POD, and if the initializer is
      // a staticly determinable constant, try to optimize it.
      if (D.getInit() && !isByRef &&
          (Ty->isArrayType() || Ty->isRecordType()) &&
          Ty->isPODType() &&
          D.getInit()->isConstantInitializer(getContext())) {
        // If this variable is marked 'const', emit the value as a global.
        if (CGM.getCodeGenOpts().MergeAllConstants &&
            Ty.isConstant(getContext())) {
          EmitStaticBlockVarDecl(D);
          return;
        }
        
        IsSimpleConstantInitializer = true;
      }
      
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      if (isByRef)
        LTy = BuildByRefType(&D);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString());

      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class,
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }

    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");

      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);

      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;

      {
        // Push a cleanup block and restore the stack there.
        DelayedCleanupBlock scope(*this);

        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }

    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");

    // Allocate memory for the array.
    llvm::AllocaInst *VLA = 
      Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
    VLA->setAlignment(getContext().getDeclAlignInBytes(&D));

    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");

    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef)
      Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 
                                    D.getNameAsString());

    bool isVolatile =
      getContext().getCanonicalType(D.getType()).isVolatileQualified();
    
    // If the initializer was a simple constant initializer, we can optimize it
    // in various ways.
    if (IsSimpleConstantInitializer) {
      llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
      assert(Init != 0 && "Wasn't a simple constant init?");
      
      llvm::Value *AlignVal = 
        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align);
      const llvm::Type *IntPtr =
        llvm::IntegerType::get(VMContext, LLVMPointerWidth);
      llvm::Value *SizeVal =
        llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty));

      const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
      if (Loc->getType() != BP)
        Loc = Builder.CreateBitCast(Loc, BP, "tmp");
      
      // If the initializer is all zeros, codegen with memset.
      if (isa<llvm::ConstantAggregateZero>(Init)) {
        llvm::Value *Zero =
          llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
        Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
      } else {
        // Otherwise, create a temporary global with the initializer then 
        // memcpy from the global to the alloca.
        std::string Name = GetStaticDeclName(*this, D, ".");
        llvm::GlobalVariable *GV =
          new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
                                   llvm::GlobalValue::InternalLinkage,
                                   Init, Name, 0, false, 0);
        GV->setAlignment(Align);

        llvm::Value *SrcPtr = GV;
        if (SrcPtr->getType() != BP)
          SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
        
        Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
      }
    } else if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, isVolatile);
    } else {
      EmitAggExpr(Init, Loc, isVolatile);
    }
  }

  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    Builder.CreateStore(DeclPtr, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
    DtorTy = getContext().getBaseElementType(Array);
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        
        if (const ConstantArrayType *Array = 
              getContext().getAsConstantArrayType(Ty)) {
          {
            DelayedCleanupBlock Scope(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          
            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          }
        } else {
          {
            DelayedCleanupBlock Scope(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);

            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
          }
        }
      }
  }

  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();

    llvm::Constant* F = CGM.GetAddrOfFunction(FD);
    assert(F && "Could not find function!");

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    //
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    {
      DelayedCleanupBlock scope(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
    if (Exceptions) {
      EHCleanupBlock Cleanup(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    {
      DelayedCleanupBlock scope(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
    // FIXME: Turn this on and audit the codegen
    if (0 && Exceptions) {
      EHCleanupBlock Cleanup(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
  }
}
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
  assert(type->isVariablyModifiedType() &&
         "Must pass variably modified type to EmitVLASizes!");

  EnsureInsertPoint();

  // We're going to walk down into the type and look for VLA
  // expressions.
  type = type.getCanonicalType();
  do {
    assert(type->isVariablyModifiedType());

    const Type *ty = type.getTypePtr();
    switch (ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
      llvm_unreachable("unexpected dependent or non-canonical type!");

    // These types are never variably-modified.
    case Type::Builtin:
    case Type::Complex:
    case Type::Vector:
    case Type::ExtVector:
    case Type::Record:
    case Type::Enum:
    case Type::ObjCObject:
    case Type::ObjCInterface:
    case Type::ObjCObjectPointer:
      llvm_unreachable("type class is never variably-modified!");

    case Type::Pointer:
      type = cast<PointerType>(ty)->getPointeeType();
      break;

    case Type::BlockPointer:
      type = cast<BlockPointerType>(ty)->getPointeeType();
      break;

    case Type::LValueReference:
    case Type::RValueReference:
      type = cast<ReferenceType>(ty)->getPointeeType();
      break;

    case Type::MemberPointer:
      type = cast<MemberPointerType>(ty)->getPointeeType();
      break;

    case Type::ConstantArray:
    case Type::IncompleteArray:
      // Losing element qualification here is fine.
      type = cast<ArrayType>(ty)->getElementType();
      break;

    case Type::VariableArray: {
      // Losing element qualification here is fine.
      const VariableArrayType *vat = cast<VariableArrayType>(ty);

      // Unknown size indication requires no size computation.
      // Otherwise, evaluate and record it.
      if (const Expr *size = vat->getSizeExpr()) {
        // It's possible that we might have emitted this already,
        // e.g. with a typedef and a pointer to it.
        llvm::Value *&entry = VLASizeMap[size];
        if (!entry) {
          // Always zexting here would be wrong if it weren't
          // undefined behavior to have a negative bound.
          entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
                                        /*signed*/ false);
        }
      }
      type = vat->getElementType();
      break;
    }

    case Type::FunctionProto: 
    case Type::FunctionNoProto:
      type = cast<FunctionType>(ty)->getResultType();
      break;

    case Type::Atomic:
      type = cast<AtomicType>(ty)->getValueType();
      break;
    }
  } while (type->isVariablyModifiedType());
}
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
                                    llvm::Function *Fn,
                                    const CGFunctionInfo &FnInfo,
                                    const FunctionArgList &Args,
                                    SourceLocation StartLoc) {
  const Decl *D = GD.getDecl();
  
  DidCallStackSave = false;
  CurCodeDecl = CurFuncDecl = D;
  FnRetTy = RetTy;
  CurFn = Fn;
  CurFnInfo = &FnInfo;
  assert(CurFn->isDeclaration() && "Function already has body?");

  // Pass inline keyword to optimizer if it appears explicitly on any
  // declaration.
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
    for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
           RE = FD->redecls_end(); RI != RE; ++RI)
      if (RI->isInlineSpecified()) {
        Fn->addFnAttr(llvm::Attribute::InlineHint);
        break;
      }

  if (getContext().getLangOptions().OpenCL) {
    // Add metadata for a kernel function.
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
      if (FD->hasAttr<OpenCLKernelAttr>()) {
        llvm::LLVMContext &Context = getLLVMContext();
        llvm::NamedMDNode *OpenCLMetadata = 
          CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
          
        llvm::Value *Op = Fn;
        OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
      }
  }

  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);

  // Create a marker to make it easy to insert allocas into the entryblock
  // later.  Don't create this with the builder, because we don't want it
  // folded.
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
  if (Builder.isNamePreserving())
    AllocaInsertPt->setName("allocapt");

  ReturnBlock = getJumpDestInCurrentScope("return");

  Builder.SetInsertPoint(EntryBB);

  // Emit subprogram debug descriptor.
  if (CGDebugInfo *DI = getDebugInfo()) {
    // FIXME: what is going on here and why does it ignore all these
    // interesting type properties?
    QualType FnType =
      getContext().getFunctionType(RetTy, 0, 0,
                                   FunctionProtoType::ExtProtoInfo());

    DI->setLocation(StartLoc);
    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
  }

  if (ShouldInstrumentFunction())
    EmitFunctionInstrumentation("__cyg_profile_func_enter");

  if (CGM.getCodeGenOpts().InstrumentForProfiling)
    EmitMCountInstrumentation();

  if (RetTy->isVoidType()) {
    // Void type; nothing to return.
    ReturnValue = 0;
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
    // Indirect aggregate return; emit returned value directly into sret slot.
    // This reduces code size, and affects correctness in C++.
    ReturnValue = CurFn->arg_begin();
  } else {
    ReturnValue = CreateIRTemp(RetTy, "retval");

    // Tell the epilog emitter to autorelease the result.  We do this
    // now so that various specialized functions can suppress it
    // during their IR-generation.
    if (getLangOptions().ObjCAutoRefCount &&
        !CurFnInfo->isReturnsRetained() &&
        RetTy->isObjCRetainableType())
      AutoreleaseResult = true;
  }

  EmitStartEHSpec(CurCodeDecl);

  PrologueCleanupDepth = EHStack.stable_begin();
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);

  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);

  // If any of the arguments have a variably modified type, make sure to
  // emit the type size.
  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
       i != e; ++i) {
    QualType Ty = (*i)->getType();

    if (Ty->isVariablyModifiedType())
      EmitVariablyModifiedType(Ty);
  }
  // Emit a location at the end of the prologue.
  if (CGDebugInfo *DI = getDebugInfo())
    DI->EmitLocation(Builder, StartLoc);
}
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
                                    llvm::Function *Fn,
                                    const CGFunctionInfo &FnInfo,
                                    const FunctionArgList &Args,
                                    SourceLocation StartLoc) {
  const Decl *D = GD.getDecl();
  
  DidCallStackSave = false;
  CurCodeDecl = CurFuncDecl = D;
  FnRetTy = RetTy;
  CurFn = Fn;
  CurFnInfo = &FnInfo;
  assert(CurFn->isDeclaration() && "Function already has body?");

  // Pass inline keyword to optimizer if it appears explicitly on any
  // declaration.
  if (!CGM.getCodeGenOpts().NoInline) 
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
      for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
             RE = FD->redecls_end(); RI != RE; ++RI)
        if (RI->isInlineSpecified()) {
          Fn->addFnAttr(llvm::Attribute::InlineHint);
          break;
        }

  if (getContext().getLangOpts().OpenCL) {
    // Add metadata for a kernel function.
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
      EmitOpenCLKernelMetadata(FD, Fn);
  }

  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);

  // Create a marker to make it easy to insert allocas into the entryblock
  // later.  Don't create this with the builder, because we don't want it
  // folded.
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
  if (Builder.isNamePreserving())
    AllocaInsertPt->setName("allocapt");

  ReturnBlock = getJumpDestInCurrentScope("return");

  Builder.SetInsertPoint(EntryBB);

  // Emit subprogram debug descriptor.
  if (CGDebugInfo *DI = getDebugInfo()) {
    unsigned NumArgs = 0;
    QualType *ArgsArray = new QualType[Args.size()];
    for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
	 i != e; ++i) {
      ArgsArray[NumArgs++] = (*i)->getType();
    }

    QualType FnType =
      getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
                                   FunctionProtoType::ExtProtoInfo());

    delete[] ArgsArray;

    DI->setLocation(StartLoc);
    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
  }

  if (ShouldInstrumentFunction())
    EmitFunctionInstrumentation("__cyg_profile_func_enter");

  if (CGM.getCodeGenOpts().InstrumentForProfiling)
    EmitMCountInstrumentation();

  if (RetTy->isVoidType()) {
    // Void type; nothing to return.
    ReturnValue = 0;
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
    // Indirect aggregate return; emit returned value directly into sret slot.
    // This reduces code size, and affects correctness in C++.
    ReturnValue = CurFn->arg_begin();
  } else {
    ReturnValue = CreateIRTemp(RetTy, "retval");

    // Tell the epilog emitter to autorelease the result.  We do this
    // now so that various specialized functions can suppress it
    // during their IR-generation.
    if (getLangOpts().ObjCAutoRefCount &&
        !CurFnInfo->isReturnsRetained() &&
        RetTy->isObjCRetainableType())
      AutoreleaseResult = true;
  }

  EmitStartEHSpec(CurCodeDecl);

  PrologueCleanupDepth = EHStack.stable_begin();
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);

  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
    if (MD->getParent()->isLambda() &&
        MD->getOverloadedOperator() == OO_Call) {
      // We're in a lambda; figure out the captures.
      MD->getParent()->getCaptureFields(LambdaCaptureFields,
                                        LambdaThisCaptureField);
      if (LambdaThisCaptureField) {
        // If this lambda captures this, load it.
        QualType LambdaTagType =
            getContext().getTagDeclType(LambdaThisCaptureField->getParent());
        LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
                                                     LambdaTagType);
        LValue ThisLValue = EmitLValueForField(LambdaLV,
                                               LambdaThisCaptureField);
        CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
      }
    } else {
      // Not in a lambda; just use 'this' from the method.
      // FIXME: Should we generate a new load for each use of 'this'?  The
      // fast register allocator would be happier...
      CXXThisValue = CXXABIThisValue;
    }
  }

  // If any of the arguments have a variably modified type, make sure to
  // emit the type size.
  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
       i != e; ++i) {
    QualType Ty = (*i)->getType();

    if (Ty->isVariablyModifiedType())
      EmitVariablyModifiedType(Ty);
  }
  // Emit a location at the end of the prologue.
  if (CGDebugInfo *DI = getDebugInfo())
    DI->EmitLocation(Builder, StartLoc);
}
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
                                    llvm::Function *Fn,
                                    const FunctionArgList &Args,
                                    SourceLocation StartLoc) {
  const Decl *D = GD.getDecl();
  
  DidCallStackSave = false;
  CurCodeDecl = CurFuncDecl = D;
  FnRetTy = RetTy;
  CurFn = Fn;
  assert(CurFn->isDeclaration() && "Function already has body?");

  // Pass inline keyword to optimizer if it appears explicitly on any
  // declaration.
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
    for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
           RE = FD->redecls_end(); RI != RE; ++RI)
      if (RI->isInlineSpecified()) {
        Fn->addFnAttr(llvm::Attribute::InlineHint);
        break;
      }

  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);

  // Create a marker to make it easy to insert allocas into the entryblock
  // later.  Don't create this with the builder, because we don't want it
  // folded.
  llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext));
  AllocaInsertPt = new llvm::BitCastInst(Undef,
                                         llvm::Type::getInt32Ty(VMContext), "",
                                         EntryBB);
  if (Builder.isNamePreserving())
    AllocaInsertPt->setName("allocapt");

  ReturnBlock = createBasicBlock("return");

  Builder.SetInsertPoint(EntryBB);

  QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
                                                 false, false, 0, 0,
                                                 /*FIXME?*/false,
                                                 /*FIXME?*/CC_Default);

  // Emit subprogram debug descriptor.
  if (CGDebugInfo *DI = getDebugInfo()) {
    DI->setLocation(StartLoc);
    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
  }

  // FIXME: Leaked.
  // CC info is ignored, hopefully?
  CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
                                              CC_Default, false);

  if (RetTy->isVoidType()) {
    // Void type; nothing to return.
    ReturnValue = 0;
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
    // Indirect aggregate return; emit returned value directly into sret slot.
    // This reduces code size, and affects correctness in C++.
    ReturnValue = CurFn->arg_begin();
  } else {
    ReturnValue = CreateIRTemp(RetTy, "retval");
  }

  EmitStartEHSpec(CurCodeDecl);
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);

  if (CXXThisDecl)
    CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
  if (CXXVTTDecl)
    CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");

  // If any of the arguments have a variably modified type, make sure to
  // emit the type size.
  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
       i != e; ++i) {
    QualType Ty = i->second;

    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  }
}
Example #7
0
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        LTy = BuildByRefType(Ty, Align);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString().c_str());
      
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class, 
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }
    
    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy =
        llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
      
      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);
      
      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;
      
      {
        // Push a cleanup block and restore the stack there.
        CleanupScope scope(*this);
      
        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }
    
    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);    
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");
    
    // Allocate memory for the array.
    llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext),
                                            VLASize, "vla");
    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");
    
    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else if (isByRef) {
      llvm::Value *Loc;
      bool needsCopyDispose = BlockRequiresCopying(Ty);
      Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      Loc = Builder.CreateLoad(Loc, false);
      Loc = Builder.CreateBitCast(Loc, DeclPtr->getType());
      Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x");
      DI->EmitDeclareOfAutoVariable(&D, Loc, Builder);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef) {
      bool needsCopyDispose = BlockRequiresCopying(Ty);
      Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x");
    }
    if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(), 
                        D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
    } else {
      EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
    }
  }
  
  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty
      = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding");
    Builder.CreateStore(V, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  if (const ArrayType *Array = DtorTy->getAs<ArrayType>())
    DtorTy = Array->getElementType();
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        assert(!Ty->getAs<ArrayType>() && "FIXME - destruction of arrays NYI");
        
        CleanupScope scope(*this);
        EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
      }
  }
    
  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();
    
    llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD));
    assert(F && "Could not find function!");
  
    CleanupScope scope(*this);

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    // 
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy));
    
    CallArgList Args;
    Args.push_back(std::make_pair(RValue::get(DeclPtr), 
                                  getContext().getPointerType(D.getType())));
    
    EmitCall(Info, F, Args);
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    CleanupScope scope(*this);
    llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
    V = Builder.CreateLoad(V, false);
    BuildBlockRelease(V);
  }
}