Esempio n. 1
0
CodeGenFunction::PeepholeProtection
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
  // At the moment, the only aggressive peephole we do in IR gen
  // is trunc(zext) folding, but if we add more, we can easily
  // extend this protection.

  if (!rvalue.isScalar()) return PeepholeProtection();
  llvm::Value *value = rvalue.getScalarVal();
  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();

  // Just make an extra bitcast.
  assert(HaveInsertPoint());
  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
                                                  Builder.GetInsertBlock());

  PeepholeProtection protection;
  protection.Inst = inst;
  return protection;
}
Esempio n. 2
0
static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
                                      QualType ResultType, RValue RV,
                                      const ThunkInfo &Thunk) {
  // Emit the return adjustment.
  bool NullCheckValue = !ResultType->isReferenceType();

  llvm::BasicBlock *AdjustNull = nullptr;
  llvm::BasicBlock *AdjustNotNull = nullptr;
  llvm::BasicBlock *AdjustEnd = nullptr;

  llvm::Value *ReturnValue = RV.getScalarVal();

  if (NullCheckValue) {
    AdjustNull = CGF.createBasicBlock("adjust.null");
    AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
    AdjustEnd = CGF.createBasicBlock("adjust.end");
  
    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
    CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
    CGF.EmitBlock(AdjustNotNull);
  }

  auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
  auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
  ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
                                            Address(ReturnValue, ClassAlign),
                                            Thunk.Return);

  if (NullCheckValue) {
    CGF.Builder.CreateBr(AdjustEnd);
    CGF.EmitBlock(AdjustNull);
    CGF.Builder.CreateBr(AdjustEnd);
    CGF.EmitBlock(AdjustEnd);
  
    llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
    PHI->addIncoming(ReturnValue, AdjustNotNull);
    PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), 
                     AdjustNull);
    ReturnValue = PHI;
  }
  
  return RValue::get(ReturnValue);
}
Esempio n. 3
0
static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
                                      QualType ResultType, RValue RV,
                                      const ThunkInfo &Thunk) {
  // Emit the return adjustment.
  bool NullCheckValue = !ResultType->isReferenceType();
  
  llvm::BasicBlock *AdjustNull = 0;
  llvm::BasicBlock *AdjustNotNull = 0;
  llvm::BasicBlock *AdjustEnd = 0;
  
  llvm::Value *ReturnValue = RV.getScalarVal();

  if (NullCheckValue) {
    AdjustNull = CGF.createBasicBlock("adjust.null");
    AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
    AdjustEnd = CGF.createBasicBlock("adjust.end");
  
    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
    CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
    CGF.EmitBlock(AdjustNotNull);
  }
  
  ReturnValue = PerformTypeAdjustment(CGF, ReturnValue, 
                                      Thunk.Return.NonVirtual, 
                                      Thunk.Return.VBaseOffsetOffset,
                                      /*IsReturnAdjustment*/true);
  
  if (NullCheckValue) {
    CGF.Builder.CreateBr(AdjustEnd);
    CGF.EmitBlock(AdjustNull);
    CGF.Builder.CreateBr(AdjustEnd);
    CGF.EmitBlock(AdjustEnd);
  
    llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
    PHI->addIncoming(ReturnValue, AdjustNotNull);
    PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), 
                     AdjustNull);
    ReturnValue = PHI;
  }
  
  return RValue::get(ReturnValue);
}
Esempio n. 4
0
llvm::Value *CodeGenFunction::EmitUPCCastSharedToLocal(llvm::Value *Value,
                                                       QualType DestTy,
                                                       SourceLocation Loc) {
  const ASTContext& Context = getContext();
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType ResultTy = Context.VoidPtrTy;

  const char *FnName = "__getaddr";

  CallArgList Args;
  Args.add(RValue::get(Value), ArgTy);
  if (CGM.getCodeGenOpts().UPCDebug) {
    getFileAndLine(*this, Loc, &Args);
    FnName = "__getaddrg";
  }

  RValue Result = EmitUPCCall(*this, FnName, ResultTy, Args);

  return Builder.CreateBitCast(Result.getScalarVal(), ConvertType(DestTy));
}
Esempio n. 5
0
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
                                               llvm::Constant *DeclPtr,
                                               bool PerformInit) {

  const Expr *Init = D.getInit();
  QualType T = D.getType();

  if (!T->isReferenceType()) {
    if (PerformInit)
      EmitDeclInit(*this, D, DeclPtr);
    if (CGM.isTypeConstant(D.getType(), true))
      EmitDeclInvariant(*this, D, DeclPtr);
    else
      EmitDeclDestroy(*this, D, DeclPtr);
    return;
  }

  assert(PerformInit && "cannot have constant initializer which needs "
         "destruction for reference");
  unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
  RValue RV = EmitReferenceBindingToExpr(Init, &D);
  EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
Esempio n. 6
0
DominatingValue<RValue>::saved_type
DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
  if (rv.isScalar()) {
    llvm::Value *V = rv.getScalarVal();

    // These automatically dominate and don't need to be saved.
    if (!DominatingLLVMValue::needsSaving(V))
      return saved_type(V, ScalarLiteral);

    // Everything else needs an alloca.
    llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
    CGF.Builder.CreateStore(V, addr);
    return saved_type(addr, ScalarAddress);
  }

  if (rv.isComplex()) {
    CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
    llvm::Type *ComplexTy =
      llvm::StructType::get(V.first->getType(), V.second->getType(),
                            (void*) nullptr);
    llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
    CGF.Builder.CreateStore(V.first,
                            CGF.Builder.CreateStructGEP(ComplexTy, addr, 0));
    CGF.Builder.CreateStore(V.second,
                            CGF.Builder.CreateStructGEP(ComplexTy, addr, 1));
    return saved_type(addr, ComplexAddress);
  }

  assert(rv.isAggregate());
  llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
  if (!DominatingLLVMValue::needsSaving(V))
    return saved_type(V, AggregateLiteral);

  llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
  CGF.Builder.CreateStore(V, addr);
  return saved_type(addr, AggregateAddress);  
}
Esempio n. 7
0
static CodeGenFunction::ComplexPairTy
convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
                      QualType DestType) {
  assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
         "DestType must have complex evaluation kind.");
  CodeGenFunction::ComplexPairTy ComplexVal;
  if (Val.isScalar()) {
    // Convert the input element to the element type of the complex.
    auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
    auto ScalarVal =
        CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
    ComplexVal = CodeGenFunction::ComplexPairTy(
        ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
  } else {
    assert(Val.isComplex() && "Must be a scalar or complex.");
    auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
    auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
    ComplexVal.first = CGF.EmitScalarConversion(
        Val.getComplexVal().first, SrcElementType, DestElementType);
    ComplexVal.second = CGF.EmitScalarConversion(
        Val.getComplexVal().second, SrcElementType, DestElementType);
  }
  return ComplexVal;
}
Esempio n. 8
0
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        LTy = BuildByRefType(Ty, Align);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString().c_str());
      
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class, 
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }
    
    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy =
        llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
      
      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);
      
      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;
      
      {
        // Push a cleanup block and restore the stack there.
        CleanupScope scope(*this);
      
        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }
    
    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);    
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");
    
    // Allocate memory for the array.
    llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext),
                                            VLASize, "vla");
    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");
    
    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else if (isByRef) {
      llvm::Value *Loc;
      bool needsCopyDispose = BlockRequiresCopying(Ty);
      Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      Loc = Builder.CreateLoad(Loc, false);
      Loc = Builder.CreateBitCast(Loc, DeclPtr->getType());
      Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x");
      DI->EmitDeclareOfAutoVariable(&D, Loc, Builder);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef) {
      bool needsCopyDispose = BlockRequiresCopying(Ty);
      Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x");
    }
    if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(), 
                        D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
    } else {
      EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
    }
  }
  
  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty
      = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding");
    Builder.CreateStore(V, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  if (const ArrayType *Array = DtorTy->getAs<ArrayType>())
    DtorTy = Array->getElementType();
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        assert(!Ty->getAs<ArrayType>() && "FIXME - destruction of arrays NYI");
        
        CleanupScope scope(*this);
        EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
      }
  }
    
  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();
    
    llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD));
    assert(F && "Could not find function!");
  
    CleanupScope scope(*this);

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    // 
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy));
    
    CallArgList Args;
    Args.push_back(std::make_pair(RValue::get(DeclPtr), 
                                  getContext().getPointerType(D.getType())));
    
    EmitCall(Info, F, Args);
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    CleanupScope scope(*this);
    llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
    V = Builder.CreateLoad(V, false);
    BuildBlockRelease(V);
  }
}
Esempio n. 9
0
// This function does roughly the same thing as GenerateThunk, but in a
// very different way, so that va_start and va_end work correctly.
// FIXME: This function assumes "this" is the first non-sret LLVM argument of
//        a function, and that there is an alloca built in the entry block
//        for all accesses to "this".
// FIXME: This function assumes there is only one "ret" statement per function.
// FIXME: Cloning isn't correct in the presence of indirect goto!
// FIXME: This implementation of thunks bloats codesize by duplicating the
//        function definition.  There are alternatives:
//        1. Add some sort of stub support to LLVM for cases where we can
//           do a this adjustment, then a sibcall.
//        2. We could transform the definition to take a va_list instead of an
//           actual variable argument list, then have the thunks (including a
//           no-op thunk for the regular definition) call va_start/va_end.
//           There's a bit of per-call overhead for this solution, but it's
//           better for codesize if the definition is long.
llvm::Function *
CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
                                      const CGFunctionInfo &FnInfo,
                                      GlobalDecl GD, const ThunkInfo &Thunk) {
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  QualType ResultType = FPT->getReturnType();

  // Get the original function
  assert(FnInfo.isVariadic());
  llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
  llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
  llvm::Function *BaseFn = cast<llvm::Function>(Callee);

  // Clone to thunk.
  llvm::ValueToValueMapTy VMap;
  llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap);
  Fn->replaceAllUsesWith(NewFn);
  NewFn->takeName(Fn);
  Fn->eraseFromParent();
  Fn = NewFn;

  // "Initialize" CGF (minimally).
  CurFn = Fn;

  // Get the "this" value
  llvm::Function::arg_iterator AI = Fn->arg_begin();
  if (CGM.ReturnTypeUsesSRet(FnInfo))
    ++AI;

  // Find the first store of "this", which will be to the alloca associated
  // with "this".
  Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
  llvm::BasicBlock *EntryBB = &Fn->front();
  llvm::BasicBlock::iterator ThisStore =
      std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
        return isa<llvm::StoreInst>(I) &&
               I.getOperand(0) == ThisPtr.getPointer();
      });
  assert(ThisStore != EntryBB->end() &&
         "Store of this should be in entry block?");
  // Adjust "this", if necessary.
  Builder.SetInsertPoint(&*ThisStore);
  llvm::Value *AdjustedThisPtr =
      CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
  ThisStore->setOperand(0, AdjustedThisPtr);

  if (!Thunk.Return.isEmpty()) {
    // Fix up the returned value, if necessary.
    for (llvm::BasicBlock &BB : *Fn) {
      llvm::Instruction *T = BB.getTerminator();
      if (isa<llvm::ReturnInst>(T)) {
        RValue RV = RValue::get(T->getOperand(0));
        T->eraseFromParent();
        Builder.SetInsertPoint(&BB);
        RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
        Builder.CreateRet(RV.getScalarVal());
        break;
      }
    }
  }

  return Fn;
}
Esempio n. 10
0
File: CGDecl.cpp Progetto: aaasz/SHP
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
  QualType Ty = D.getType();
  bool isByRef = D.hasAttr<BlocksAttr>();
  bool needsDispose = false;
  unsigned Align = 0;
  bool IsSimpleConstantInitializer = false;

  llvm::Value *DeclPtr;
  if (Ty->isConstantSizeType()) {
    if (!Target.useGlobalsForAutomaticVariables()) {
      
      // If this value is an array or struct, is POD, and if the initializer is
      // a staticly determinable constant, try to optimize it.
      if (D.getInit() && !isByRef &&
          (Ty->isArrayType() || Ty->isRecordType()) &&
          Ty->isPODType() &&
          D.getInit()->isConstantInitializer(getContext())) {
        // If this variable is marked 'const', emit the value as a global.
        if (CGM.getCodeGenOpts().MergeAllConstants &&
            Ty.isConstant(getContext())) {
          EmitStaticBlockVarDecl(D);
          return;
        }
        
        IsSimpleConstantInitializer = true;
      }
      
      // A normal fixed sized variable becomes an alloca in the entry block.
      const llvm::Type *LTy = ConvertTypeForMem(Ty);
      if (isByRef)
        LTy = BuildByRefType(&D);
      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
      Alloc->setName(D.getNameAsString());

      Align = getContext().getDeclAlignInBytes(&D);
      if (isByRef)
        Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
      Alloc->setAlignment(Align);
      DeclPtr = Alloc;
    } else {
      // Targets that don't support recursion emit locals as globals.
      const char *Class =
        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
      DeclPtr = CreateStaticBlockVarDecl(D, Class,
                                         llvm::GlobalValue
                                         ::InternalLinkage);
    }

    // FIXME: Can this happen?
    if (Ty->isVariablyModifiedType())
      EmitVLASize(Ty);
  } else {
    EnsureInsertPoint();

    if (!DidCallStackSave) {
      // Save the stack.
      const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");

      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
      llvm::Value *V = Builder.CreateCall(F);

      Builder.CreateStore(V, Stack);

      DidCallStackSave = true;

      {
        // Push a cleanup block and restore the stack there.
        DelayedCleanupBlock scope(*this);

        V = Builder.CreateLoad(Stack, "tmp");
        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
        Builder.CreateCall(F, V);
      }
    }

    // Get the element type.
    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
    const llvm::Type *LElemPtrTy =
      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());

    llvm::Value *VLASize = EmitVLASize(Ty);

    // Downcast the VLA size expression
    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
                                    false, "tmp");

    // Allocate memory for the array.
    llvm::AllocaInst *VLA = 
      Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
    VLA->setAlignment(getContext().getDeclAlignInBytes(&D));

    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
  }

  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  DMEntry = DeclPtr;

  // Emit debug info for local var declaration.
  if (CGDebugInfo *DI = getDebugInfo()) {
    assert(HaveInsertPoint() && "Unexpected unreachable point!");

    DI->setLocation(D.getLocation());
    if (Target.useGlobalsForAutomaticVariables()) {
      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
    } else
      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
  }

  // If this local has an initializer, emit it now.
  const Expr *Init = D.getInit();

  // If we are at an unreachable point, we don't need to emit the initializer
  // unless it contains a label.
  if (!HaveInsertPoint()) {
    if (!ContainsLabel(Init))
      Init = 0;
    else
      EnsureInsertPoint();
  }

  if (Init) {
    llvm::Value *Loc = DeclPtr;
    if (isByRef)
      Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 
                                    D.getNameAsString());

    bool isVolatile =
      getContext().getCanonicalType(D.getType()).isVolatileQualified();
    
    // If the initializer was a simple constant initializer, we can optimize it
    // in various ways.
    if (IsSimpleConstantInitializer) {
      llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
      assert(Init != 0 && "Wasn't a simple constant init?");
      
      llvm::Value *AlignVal = 
        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align);
      const llvm::Type *IntPtr =
        llvm::IntegerType::get(VMContext, LLVMPointerWidth);
      llvm::Value *SizeVal =
        llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty));

      const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
      if (Loc->getType() != BP)
        Loc = Builder.CreateBitCast(Loc, BP, "tmp");
      
      // If the initializer is all zeros, codegen with memset.
      if (isa<llvm::ConstantAggregateZero>(Init)) {
        llvm::Value *Zero =
          llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
        Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
      } else {
        // Otherwise, create a temporary global with the initializer then 
        // memcpy from the global to the alloca.
        std::string Name = GetStaticDeclName(*this, D, ".");
        llvm::GlobalVariable *GV =
          new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
                                   llvm::GlobalValue::InternalLinkage,
                                   Init, Name, 0, false, 0);
        GV->setAlignment(Align);

        llvm::Value *SrcPtr = GV;
        if (SrcPtr->getType() != BP)
          SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
        
        Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
      }
    } else if (Ty->isReferenceType()) {
      RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
    } else if (!hasAggregateLLVMType(Init->getType())) {
      llvm::Value *V = EmitScalarExpr(Init);
      EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
    } else if (Init->getType()->isAnyComplexType()) {
      EmitComplexExprIntoAddr(Init, Loc, isVolatile);
    } else {
      EmitAggExpr(Init, Loc, isVolatile);
    }
  }

  if (isByRef) {
    const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);

    EnsureInsertPoint();
    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
    llvm::Value *V;
    int flag = 0;
    int flags = 0;

    needsDispose = true;

    if (Ty->isBlockPointerType()) {
      flag |= BLOCK_FIELD_IS_BLOCK;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    } else if (BlockRequiresCopying(Ty)) {
      flag |= BLOCK_FIELD_IS_OBJECT;
      flags |= BLOCK_HAS_COPY_DISPOSE;
    }

    // FIXME: Someone double check this.
    if (Ty.isObjCGCWeak())
      flag |= BLOCK_FIELD_IS_WEAK;

    int isa = 0;
    if (flag&BLOCK_FIELD_IS_WEAK)
      isa = 1;
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
    Builder.CreateStore(V, isa_field);

    Builder.CreateStore(DeclPtr, forwarding_field);

    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
    Builder.CreateStore(V, flags_field);

    const llvm::Type *V1;
    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                               (CGM.getTargetData().getTypeStoreSizeInBits(V1)
                                / 8));
    Builder.CreateStore(V, size_field);

    if (flags & BLOCK_HAS_COPY_DISPOSE) {
      BlockHasCopyDispose = true;
      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
                          copy_helper);

      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
                                                  Align),
                          destroy_helper);
    }
  }

  // Handle CXX destruction of variables.
  QualType DtorTy(Ty);
  while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
    DtorTy = getContext().getBaseElementType(Array);
  if (const RecordType *RT = DtorTy->getAs<RecordType>())
    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
      if (!ClassDecl->hasTrivialDestructor()) {
        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
        
        if (const ConstantArrayType *Array = 
              getContext().getAsConstantArrayType(Ty)) {
          {
            DelayedCleanupBlock Scope(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          
            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            QualType BaseElementTy = getContext().getBaseElementType(Array);
            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
            BasePtr = llvm::PointerType::getUnqual(BasePtr);
            llvm::Value *BaseAddrPtr =
              Builder.CreateBitCast(DeclPtr, BasePtr);
            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
          }
        } else {
          {
            DelayedCleanupBlock Scope(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);

            // Make sure to jump to the exit block.
            EmitBranch(Scope.getCleanupExitBlock());
          }
          if (Exceptions) {
            EHCleanupBlock Cleanup(*this);
            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
          }
        }
      }
  }

  // Handle the cleanup attribute
  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
    const FunctionDecl *FD = CA->getFunctionDecl();

    llvm::Constant* F = CGM.GetAddrOfFunction(FD);
    assert(F && "Could not find function!");

    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);

    // In some cases, the type of the function argument will be different from
    // the type of the pointer. An example of this is
    // void f(void* arg);
    // __attribute__((cleanup(f))) void *g;
    //
    // To fix this we insert a bitcast here.
    QualType ArgTy = Info.arg_begin()->type;
    {
      DelayedCleanupBlock scope(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
    if (Exceptions) {
      EHCleanupBlock Cleanup(*this);

      CallArgList Args;
      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
                                                           ConvertType(ArgTy))),
                                    getContext().getPointerType(D.getType())));
      EmitCall(Info, F, Args);
    }
  }

  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
    {
      DelayedCleanupBlock scope(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
    // FIXME: Turn this on and audit the codegen
    if (0 && Exceptions) {
      EHCleanupBlock Cleanup(*this);
      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
      V = Builder.CreateLoad(V);
      BuildBlockRelease(V);
    }
  }
}
Esempio n. 11
0
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
  QualType AllocType = E->getAllocatedType();
  FunctionDecl *NewFD = E->getOperatorNew();
  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();

  CallArgList NewArgs;

  // The allocation size is the first argument.
  QualType SizeTy = getContext().getSizeType();

  llvm::Value *NumElements = 0;
  llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements);
  
  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));

  // Emit the rest of the arguments.
  // FIXME: Ideally, this should just use EmitCallArgs.
  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();

  // First, use the types from the function type.
  // We start at 1 here because the first argument (the allocation size)
  // has already been emitted.
  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
    QualType ArgType = NewFTy->getArgType(i);

    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
           getTypePtr() ==
           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
           "type mismatch in call argument!");

    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
                                     ArgType));

  }

  // Either we've emitted all the call args, or we have a call to a
  // variadic function.
  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
         "Extra arguments in non-variadic function!");

  // If we still have any arguments, emit them using the type of the argument.
  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
       NewArg != NewArgEnd; ++NewArg) {
    QualType ArgType = NewArg->getType();
    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
                                     ArgType));
  }

  // Emit the call to new.
  RValue RV =
    EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
             CGM.GetAddrOfFunction(NewFD), NewArgs, NewFD);

  // If an allocation function is declared with an empty exception specification
  // it returns null to indicate failure to allocate storage. [expr.new]p13.
  // (We don't need to check for null when there's no new initializer and
  // we're allocating a POD type).
  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
    !(AllocType->isPODType() && !E->hasInitializer());

  llvm::BasicBlock *NewNull = 0;
  llvm::BasicBlock *NewNotNull = 0;
  llvm::BasicBlock *NewEnd = 0;

  llvm::Value *NewPtr = RV.getScalarVal();

  if (NullCheckResult) {
    NewNull = createBasicBlock("new.null");
    NewNotNull = createBasicBlock("new.notnull");
    NewEnd = createBasicBlock("new.end");

    llvm::Value *IsNull =
      Builder.CreateICmpEQ(NewPtr,
                           llvm::Constant::getNullValue(NewPtr->getType()),
                           "isnull");

    Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
    EmitBlock(NewNotNull);
  }

  if (uint64_t CookiePadding = CalculateCookiePadding(getContext(), E)) {
    uint64_t CookieOffset = 
      CookiePadding - getContext().getTypeSize(SizeTy) / 8;
    
    llvm::Value *NumElementsPtr = 
      Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset);
    
    NumElementsPtr = Builder.CreateBitCast(NumElementsPtr, 
                                           ConvertType(SizeTy)->getPointerTo());
    Builder.CreateStore(NumElements, NumElementsPtr);

    // Now add the padding to the new ptr.
    NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, CookiePadding);
  }
  
  NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));

  EmitNewInitializer(*this, E, NewPtr, NumElements);

  if (NullCheckResult) {
    Builder.CreateBr(NewEnd);
    NewNotNull = Builder.GetInsertBlock();
    EmitBlock(NewNull);
    Builder.CreateBr(NewEnd);
    EmitBlock(NewEnd);

    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
    PHI->reserveOperandSpace(2);
    PHI->addIncoming(NewPtr, NewNotNull);
    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);

    NewPtr = PHI;
  }

  return NewPtr;
}
Esempio n. 12
0
void
CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
                                               llvm::GlobalVariable *GV) {
  bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
  
  llvm::SmallString<256> GuardVName;
  CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);

  // Create the guard variable.
  const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext);
  llvm::GlobalValue *GuardVariable =
    new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
                             false, GV->getLinkage(),
                             llvm::Constant::getNullValue(Int64Ty),
                             GuardVName.str());

  // Load the first byte of the guard variable.
  const llvm::Type *PtrTy
    = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
  llvm::Value *V = 
    Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");

  llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
  llvm::BasicBlock *EndBlock = createBasicBlock("init.end");

  // Check if the first byte of the guard variable is zero.
  Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"), 
                       InitCheckBlock, EndBlock);

  EmitBlock(InitCheckBlock);

  if (ThreadsafeStatics) {
    // Call __cxa_guard_acquire.
    V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
               
    llvm::BasicBlock *InitBlock = createBasicBlock("init");
  
    Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
                         InitBlock, EndBlock);
  
    EmitBlock(InitBlock);

    if (Exceptions) {
      EHCleanupBlock Cleanup(*this);
    
      // Call __cxa_guard_abort.
      Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
    }
  }

  if (D.getType()->isReferenceType()) {
    QualType T = D.getType();
    // We don't want to pass true for IsInitializer here, because a static
    // reference to a temporary does not extend its lifetime.
    RValue RV = EmitReferenceBindingToExpr(D.getInit(),
                                           /*IsInitializer=*/false);
    EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);

  } else
    EmitDeclInit(*this, D, GV);

  if (ThreadsafeStatics) {
    // Call __cxa_guard_release.
    Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
  } else {
    llvm::Value *One = 
      llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
    Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
  }

  EmitBlock(EndBlock);
}
Esempio n. 13
0
// This function does roughly the same thing as GenerateThunk, but in a
// very different way, so that va_start and va_end work correctly.
// FIXME: This function assumes "this" is the first non-sret LLVM argument of
//        a function, and that there is an alloca built in the entry block
//        for all accesses to "this".
// FIXME: This function assumes there is only one "ret" statement per function.
// FIXME: Cloning isn't correct in the presence of indirect goto!
// FIXME: This implementation of thunks bloats codesize by duplicating the
//        function definition.  There are alternatives:
//        1. Add some sort of stub support to LLVM for cases where we can
//           do a this adjustment, then a sibcall.
//        2. We could transform the definition to take a va_list instead of an
//           actual variable argument list, then have the thunks (including a
//           no-op thunk for the regular definition) call va_start/va_end.
//           There's a bit of per-call overhead for this solution, but it's
//           better for codesize if the definition is long.
void CodeGenFunction::GenerateVarArgsThunk(
                                      llvm::Function *Fn,
                                      const CGFunctionInfo &FnInfo,
                                      GlobalDecl GD, const ThunkInfo &Thunk) {
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  QualType ResultType = FPT->getResultType();

  // Get the original function
  assert(FnInfo.isVariadic());
  llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
  llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
  llvm::Function *BaseFn = cast<llvm::Function>(Callee);

  // Clone to thunk.
  llvm::Function *NewFn = llvm::CloneFunction(BaseFn);
  CGM.getModule().getFunctionList().push_back(NewFn);
  Fn->replaceAllUsesWith(NewFn);
  NewFn->takeName(Fn);
  Fn->eraseFromParent();
  Fn = NewFn;

  // "Initialize" CGF (minimally).
  CurFn = Fn;

  // Get the "this" value
  llvm::Function::arg_iterator AI = Fn->arg_begin();
  if (CGM.ReturnTypeUsesSRet(FnInfo))
    ++AI;

  // Find the first store of "this", which will be to the alloca associated
  // with "this".
  llvm::Value *ThisPtr = &*AI;
  llvm::BasicBlock *EntryBB = Fn->begin();
  llvm::Instruction *ThisStore = 0;
  for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end();
       I != E; I++) {
    if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) {
      ThisStore = cast<llvm::StoreInst>(I);
      break;
    }
  }
  assert(ThisStore && "Store of this should be in entry block?");
  // Adjust "this", if necessary.
  Builder.SetInsertPoint(ThisStore);
  llvm::Value *AdjustedThisPtr = 
    PerformTypeAdjustment(*this, ThisPtr, 
                          Thunk.This.NonVirtual, 
                          Thunk.This.VCallOffsetOffset,
                          /*IsReturnAdjustment*/false);
  ThisStore->setOperand(0, AdjustedThisPtr);

  if (!Thunk.Return.isEmpty()) {
    // Fix up the returned value, if necessary.
    for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
      llvm::Instruction *T = I->getTerminator();
      if (isa<llvm::ReturnInst>(T)) {
        RValue RV = RValue::get(T->getOperand(0));
        T->eraseFromParent();
        Builder.SetInsertPoint(&*I);
        RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
        Builder.CreateRet(RV.getScalarVal());
        break;
      }
    }
  }
}
Esempio n. 14
0
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
  QualType MemTy = AtomicTy;
  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
    MemTy = AT->getValueType();
  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
  uint64_t Size = sizeChars.getQuantity();
  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
  unsigned Align = alignChars.getQuantity();
  unsigned MaxInlineWidthInBits =
    getTarget().getMaxAtomicInlineWidth();
  bool UseLibcall = (Size != Align ||
                     getContext().toBits(sizeChars) > MaxInlineWidthInBits);

  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
              *Val2 = nullptr;
  llvm::Value *Ptr = EmitScalarExpr(E->getPtr());

  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
    assert(!Dest && "Init does not return a value");
    LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
    EmitAtomicInit(E->getVal1(), lvalue);
    return RValue::get(nullptr);
  }

  llvm::Value *Order = EmitScalarExpr(E->getOrder());

  switch (E->getOp()) {
  case AtomicExpr::AO__c11_atomic_init:
    llvm_unreachable("Already handled!");

  case AtomicExpr::AO__c11_atomic_load:
  case AtomicExpr::AO__atomic_load_n:
    break;

  case AtomicExpr::AO__atomic_load:
    Dest = EmitScalarExpr(E->getVal1());
    break;

  case AtomicExpr::AO__atomic_store:
    Val1 = EmitScalarExpr(E->getVal1());
    break;

  case AtomicExpr::AO__atomic_exchange:
    Val1 = EmitScalarExpr(E->getVal1());
    Dest = EmitScalarExpr(E->getVal2());
    break;

  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  case AtomicExpr::AO__atomic_compare_exchange_n:
  case AtomicExpr::AO__atomic_compare_exchange:
    Val1 = EmitScalarExpr(E->getVal1());
    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
      Val2 = EmitScalarExpr(E->getVal2());
    else
      Val2 = EmitValToTemp(*this, E->getVal2());
    OrderFail = EmitScalarExpr(E->getOrderFail());
    if (E->getNumSubExprs() == 6)
      IsWeak = EmitScalarExpr(E->getWeak());
    break;

  case AtomicExpr::AO__c11_atomic_fetch_add:
  case AtomicExpr::AO__c11_atomic_fetch_sub:
    if (MemTy->isPointerType()) {
      // For pointer arithmetic, we're required to do a bit of math:
      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
      // ... but only for the C11 builtins. The GNU builtins expect the
      // user to multiply by sizeof(T).
      QualType Val1Ty = E->getVal1()->getType();
      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
      CharUnits PointeeIncAmt =
          getContext().getTypeSizeInChars(MemTy->getPointeeType());
      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
      Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
      break;
    }
    // Fall through.
  case AtomicExpr::AO__atomic_fetch_add:
  case AtomicExpr::AO__atomic_fetch_sub:
  case AtomicExpr::AO__atomic_add_fetch:
  case AtomicExpr::AO__atomic_sub_fetch:
  case AtomicExpr::AO__c11_atomic_store:
  case AtomicExpr::AO__c11_atomic_exchange:
  case AtomicExpr::AO__atomic_store_n:
  case AtomicExpr::AO__atomic_exchange_n:
  case AtomicExpr::AO__c11_atomic_fetch_and:
  case AtomicExpr::AO__c11_atomic_fetch_or:
  case AtomicExpr::AO__c11_atomic_fetch_xor:
  case AtomicExpr::AO__atomic_fetch_and:
  case AtomicExpr::AO__atomic_fetch_or:
  case AtomicExpr::AO__atomic_fetch_xor:
  case AtomicExpr::AO__atomic_fetch_nand:
  case AtomicExpr::AO__atomic_and_fetch:
  case AtomicExpr::AO__atomic_or_fetch:
  case AtomicExpr::AO__atomic_xor_fetch:
  case AtomicExpr::AO__atomic_nand_fetch:
    Val1 = EmitValToTemp(*this, E->getVal1());
    break;
  }

  QualType RValTy = E->getType().getUnqualifiedType();

  auto GetDest = [&] {
    if (!RValTy->isVoidType() && !Dest) {
      Dest = CreateMemTemp(RValTy, ".atomicdst");
    }
    return Dest;
  };

  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
  if (UseLibcall) {
    bool UseOptimizedLibcall = false;
    switch (E->getOp()) {
    case AtomicExpr::AO__c11_atomic_fetch_add:
    case AtomicExpr::AO__atomic_fetch_add:
    case AtomicExpr::AO__c11_atomic_fetch_and:
    case AtomicExpr::AO__atomic_fetch_and:
    case AtomicExpr::AO__c11_atomic_fetch_or:
    case AtomicExpr::AO__atomic_fetch_or:
    case AtomicExpr::AO__c11_atomic_fetch_sub:
    case AtomicExpr::AO__atomic_fetch_sub:
    case AtomicExpr::AO__c11_atomic_fetch_xor:
    case AtomicExpr::AO__atomic_fetch_xor:
      // For these, only library calls for certain sizes exist.
      UseOptimizedLibcall = true;
      break;
    default:
      // Only use optimized library calls for sizes for which they exist.
      if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
        UseOptimizedLibcall = true;
      break;
    }

    CallArgList Args;
    if (!UseOptimizedLibcall) {
      // For non-optimized library calls, the size is the first parameter
      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
               getContext().getSizeType());
    }
    // Atomic address is the first or second parameter
    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);

    std::string LibCallName;
    QualType LoweredMemTy =
      MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
    QualType RetTy;
    bool HaveRetTy = false;
    switch (E->getOp()) {
    // There is only one libcall for compare an exchange, because there is no
    // optimisation benefit possible from a libcall version of a weak compare
    // and exchange.
    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
    //                                void *desired, int success, int failure)
    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
    //                                  int success, int failure)
    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
    case AtomicExpr::AO__atomic_compare_exchange:
    case AtomicExpr::AO__atomic_compare_exchange_n:
      LibCallName = "__atomic_compare_exchange";
      RetTy = getContext().BoolTy;
      HaveRetTy = true;
      Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
                        E->getExprLoc(), sizeChars);
      Args.add(RValue::get(Order), getContext().IntTy);
      Order = OrderFail;
      break;
    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
    //                        int order)
    // T __atomic_exchange_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_exchange:
    case AtomicExpr::AO__atomic_exchange_n:
    case AtomicExpr::AO__atomic_exchange:
      LibCallName = "__atomic_exchange";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // void __atomic_store(size_t size, void *mem, void *val, int order)
    // void __atomic_store_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_store:
    case AtomicExpr::AO__atomic_store:
    case AtomicExpr::AO__atomic_store_n:
      LibCallName = "__atomic_store";
      RetTy = getContext().VoidTy;
      HaveRetTy = true;
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // void __atomic_load(size_t size, void *mem, void *return, int order)
    // T __atomic_load_N(T *mem, int order)
    case AtomicExpr::AO__c11_atomic_load:
    case AtomicExpr::AO__atomic_load:
    case AtomicExpr::AO__atomic_load_n:
      LibCallName = "__atomic_load";
      break;
    // T __atomic_fetch_add_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_add:
    case AtomicExpr::AO__atomic_fetch_add:
      LibCallName = "__atomic_fetch_add";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_and_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_and:
    case AtomicExpr::AO__atomic_fetch_and:
      LibCallName = "__atomic_fetch_and";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_or_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_or:
    case AtomicExpr::AO__atomic_fetch_or:
      LibCallName = "__atomic_fetch_or";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_sub_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_sub:
    case AtomicExpr::AO__atomic_fetch_sub:
      LibCallName = "__atomic_fetch_sub";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
                        E->getExprLoc(), sizeChars);
      break;
    // T __atomic_fetch_xor_N(T *mem, T val, int order)
    case AtomicExpr::AO__c11_atomic_fetch_xor:
    case AtomicExpr::AO__atomic_fetch_xor:
      LibCallName = "__atomic_fetch_xor";
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                        E->getExprLoc(), sizeChars);
      break;
    default: return EmitUnsupportedRValue(E, "atomic library call");
    }

    // Optimized functions have the size in their name.
    if (UseOptimizedLibcall)
      LibCallName += "_" + llvm::utostr(Size);
    // By default, assume we return a value of the atomic type.
    if (!HaveRetTy) {
      if (UseOptimizedLibcall) {
        // Value is returned directly.
        // The function returns an appropriately sized integer type.
        RetTy = getContext().getIntTypeForBitwidth(
            getContext().toBits(sizeChars), /*Signed=*/false);
      } else {
        // Value is returned through parameter before the order.
        RetTy = getContext().VoidTy;
        Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
      }
    }
    // order is always the last parameter
    Args.add(RValue::get(Order),
             getContext().IntTy);

    RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
    // The value is returned directly from the libcall.
    if (HaveRetTy && !RetTy->isVoidType())
      return Res;
    // The value is returned via an explicit out param.
    if (RetTy->isVoidType())
      return RValue::get(nullptr);
    // The value is returned directly for optimized libcalls but the caller is
    // expected an out-param.
    if (UseOptimizedLibcall) {
      llvm::Value *ResVal = Res.getScalarVal();
      llvm::StoreInst *StoreDest = Builder.CreateStore(
          ResVal,
          Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
      StoreDest->setAlignment(Align);
    }
    return convertTempToRValue(Dest, RValTy, E->getExprLoc());
  }

  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
                 E->getOp() == AtomicExpr::AO__atomic_store ||
                 E->getOp() == AtomicExpr::AO__atomic_store_n;
  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
                E->getOp() == AtomicExpr::AO__atomic_load ||
                E->getOp() == AtomicExpr::AO__atomic_load_n;

  llvm::Type *ITy =
      llvm::IntegerType::get(getLLVMContext(), Size * 8);
  llvm::Value *OrigDest = GetDest();
  Ptr = Builder.CreateBitCast(
      Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
  if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
  if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
  if (Dest && !E->isCmpXChg())
    Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());

  if (isa<llvm::ConstantInt>(Order)) {
    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
    switch (ord) {
    case AtomicExpr::AO_ABI_memory_order_relaxed:
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::Monotonic);
      break;
    case AtomicExpr::AO_ABI_memory_order_consume:
    case AtomicExpr::AO_ABI_memory_order_acquire:
      if (IsStore)
        break; // Avoid crashing on code with undefined behavior
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::Acquire);
      break;
    case AtomicExpr::AO_ABI_memory_order_release:
      if (IsLoad)
        break; // Avoid crashing on code with undefined behavior
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::Release);
      break;
    case AtomicExpr::AO_ABI_memory_order_acq_rel:
      if (IsLoad || IsStore)
        break; // Avoid crashing on code with undefined behavior
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::AcquireRelease);
      break;
    case AtomicExpr::AO_ABI_memory_order_seq_cst:
      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                   Size, Align, llvm::SequentiallyConsistent);
      break;
    default: // invalid order
      // We should not ever get here normally, but it's hard to
      // enforce that in general.
      break;
    }
    if (RValTy->isVoidType())
      return RValue::get(nullptr);
    return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
  }

  // Long case, when Order isn't obviously constant.

  // Create all the relevant BB's
  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
                   *ReleaseBB = nullptr, *AcqRelBB = nullptr,
                   *SeqCstBB = nullptr;
  MonotonicBB = createBasicBlock("monotonic", CurFn);
  if (!IsStore)
    AcquireBB = createBasicBlock("acquire", CurFn);
  if (!IsLoad)
    ReleaseBB = createBasicBlock("release", CurFn);
  if (!IsLoad && !IsStore)
    AcqRelBB = createBasicBlock("acqrel", CurFn);
  SeqCstBB = createBasicBlock("seqcst", CurFn);
  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);

  // Create the switch for the split
  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
  // doesn't matter unless someone is crazy enough to use something that
  // doesn't fold to a constant for the ordering.
  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);

  // Emit all the different atomics
  Builder.SetInsertPoint(MonotonicBB);
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
               Size, Align, llvm::Monotonic);
  Builder.CreateBr(ContBB);
  if (!IsStore) {
    Builder.SetInsertPoint(AcquireBB);
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                 Size, Align, llvm::Acquire);
    Builder.CreateBr(ContBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
                AcquireBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
                AcquireBB);
  }
  if (!IsLoad) {
    Builder.SetInsertPoint(ReleaseBB);
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                 Size, Align, llvm::Release);
    Builder.CreateBr(ContBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
                ReleaseBB);
  }
  if (!IsLoad && !IsStore) {
    Builder.SetInsertPoint(AcqRelBB);
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
                 Size, Align, llvm::AcquireRelease);
    Builder.CreateBr(ContBB);
    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
                AcqRelBB);
  }
  Builder.SetInsertPoint(SeqCstBB);
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
               Size, Align, llvm::SequentiallyConsistent);
  Builder.CreateBr(ContBB);
  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
              SeqCstBB);

  // Cleanup and return
  Builder.SetInsertPoint(ContBB);
  if (RValTy->isVoidType())
    return RValue::get(nullptr);
  return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
llvm::Constant *
CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
                                   GlobalDecl GD, bool Extern,
                                   const CovariantThunkAdjustment &Adjustment) {
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  QualType ResultType = FPT->getResultType();

  FunctionArgList Args;
  ImplicitParamDecl *ThisDecl =
    ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
                              MD->getThisType(getContext()));
  Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
  for (FunctionDecl::param_const_iterator i = MD->param_begin(),
         e = MD->param_end();
       i != e; ++i) {
    ParmVarDecl *D = *i;
    Args.push_back(std::make_pair(D, D->getType()));
  }
  IdentifierInfo *II
    = &CGM.getContext().Idents.get("__thunk_named_foo_");
  FunctionDecl *FD = FunctionDecl::Create(getContext(),
                                          getContext().getTranslationUnitDecl(),
                                          SourceLocation(), II, ResultType, 0,
                                          Extern
                                            ? FunctionDecl::Extern
                                            : FunctionDecl::Static,
                                          false, true);
  StartFunction(FD, ResultType, Fn, Args, SourceLocation());

  // generate body
  const llvm::Type *Ty =
    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
                                   FPT->isVariadic());
  llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);

  CallArgList CallArgs;

  bool ShouldAdjustReturnPointer = true;
  QualType ArgType = MD->getThisType(getContext());
  llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this");
  if (!Adjustment.ThisAdjustment.isEmpty()) {
    // Do the this adjustment.
    const llvm::Type *OrigTy = Callee->getType();
    Arg = DynamicTypeAdjust(Arg, Adjustment.ThisAdjustment);
    
    if (!Adjustment.ReturnAdjustment.isEmpty()) {
      const CovariantThunkAdjustment &ReturnAdjustment = 
        CovariantThunkAdjustment(ThunkAdjustment(),
                                 Adjustment.ReturnAdjustment);
      
      Callee = CGM.BuildCovariantThunk(GD, Extern, ReturnAdjustment);
      
      Callee = Builder.CreateBitCast(Callee, OrigTy);
      ShouldAdjustReturnPointer = false;
    }
  }    

  CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType));

  for (FunctionDecl::param_const_iterator i = MD->param_begin(),
         e = MD->param_end();
       i != e; ++i) {
    ParmVarDecl *D = *i;
    QualType ArgType = D->getType();

    // llvm::Value *Arg = CGF.GetAddrOfLocalVar(Dst);
    Expr *Arg = new (getContext()) DeclRefExpr(D, ArgType.getNonReferenceType(),
                                               SourceLocation());
    CallArgs.push_back(std::make_pair(EmitCallArg(Arg, ArgType), ArgType));
  }

  RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
                                                      FPT->getCallConv(),
                                                      FPT->getNoReturnAttr()),
                       Callee, ReturnValueSlot(), CallArgs, MD);
  if (ShouldAdjustReturnPointer && !Adjustment.ReturnAdjustment.isEmpty()) {
    bool CanBeZero = !(ResultType->isReferenceType()
    // FIXME: attr nonnull can't be zero either
                       /* || ResultType->hasAttr<NonNullAttr>() */ );
    // Do the return result adjustment.
    if (CanBeZero) {
      llvm::BasicBlock *NonZeroBlock = createBasicBlock();
      llvm::BasicBlock *ZeroBlock = createBasicBlock();
      llvm::BasicBlock *ContBlock = createBasicBlock();

      const llvm::Type *Ty = RV.getScalarVal()->getType();
      llvm::Value *Zero = llvm::Constant::getNullValue(Ty);
      Builder.CreateCondBr(Builder.CreateICmpNE(RV.getScalarVal(), Zero),
                           NonZeroBlock, ZeroBlock);
      EmitBlock(NonZeroBlock);
      llvm::Value *NZ = 
        DynamicTypeAdjust(RV.getScalarVal(), Adjustment.ReturnAdjustment);
      EmitBranch(ContBlock);
      EmitBlock(ZeroBlock);
      llvm::Value *Z = RV.getScalarVal();
      EmitBlock(ContBlock);
      llvm::PHINode *RVOrZero = Builder.CreatePHI(Ty);
      RVOrZero->reserveOperandSpace(2);
      RVOrZero->addIncoming(NZ, NonZeroBlock);
      RVOrZero->addIncoming(Z, ZeroBlock);
      RV = RValue::get(RVOrZero);
    } else
      RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), 
                                         Adjustment.ReturnAdjustment));
  }

  if (!ResultType->isVoidType())
    EmitReturnOfRValue(RV, ResultType);

  FinishFunction();
  return Fn;
}
Esempio n. 16
0
void
CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
                                               llvm::GlobalVariable *GV) {
  // Bail out early if this initializer isn't reachable.
  if (!Builder.GetInsertBlock()) return;

  bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
  
  llvm::SmallString<256> GuardVName;
  CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);

  // Create the guard variable.
  llvm::GlobalVariable *GuardVariable =
    new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
                             false, GV->getLinkage(),
                             llvm::Constant::getNullValue(Int64Ty),
                             GuardVName.str());

  // Load the first byte of the guard variable.
  const llvm::Type *PtrTy
    = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
  llvm::Value *V = 
    Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");

  llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
  llvm::BasicBlock *EndBlock = createBasicBlock("init.end");

  // Check if the first byte of the guard variable is zero.
  Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"), 
                       InitCheckBlock, EndBlock);

  EmitBlock(InitCheckBlock);

  // Variables used when coping with thread-safe statics and exceptions.
  if (ThreadsafeStatics) {    
    // Call __cxa_guard_acquire.
    V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
               
    llvm::BasicBlock *InitBlock = createBasicBlock("init");
  
    Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
                         InitBlock, EndBlock);
  
    // Call __cxa_guard_abort along the exceptional edge.
    if (Exceptions)
      EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
    
    EmitBlock(InitBlock);
  }

  if (D.getType()->isReferenceType()) {
    QualType T = D.getType();
    RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
    EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);

  } else
    EmitDeclInit(*this, D, GV);

  if (ThreadsafeStatics) {
    // Pop the guard-abort cleanup if we pushed one.
    if (Exceptions)
      PopCleanupBlock();

    // Call __cxa_guard_release.  This cannot throw.
    Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);    
  } else {
    llvm::Value *One = 
      llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
    Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
  }

  // Register the call to the destructor.
  if (!D.getType()->isReferenceType())
    EmitDeclDestroy(*this, D, GV);
  
  EmitBlock(EndBlock);
}
Esempio n. 17
0
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
  // Pragma 'simd' code depends on presence of 'lastprivate'.
  // If present, we have to separate last iteration of the loop:
  //
  // if (LastIteration != 0) {
  //   for (IV in 0..LastIteration-1) BODY;
  //   BODY with updates of lastprivate vars;
  //   <Final counter/linear vars updates>;
  // }
  //
  // otherwise (when there's no lastprivate):
  //
  //   for (IV in 0..LastIteration) BODY;
  //   <Final counter/linear vars updates>;
  //

  // Walk clauses and process safelen/lastprivate.
  bool SeparateIter = false;
  LoopStack.setParallel();
  LoopStack.setVectorizerEnable(true);
  for (auto C : S.clauses()) {
    switch (C->getClauseKind()) {
    case OMPC_safelen: {
      RValue Len = EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(),
                               AggValueSlot::ignored(), true);
      llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
      LoopStack.setVectorizerWidth(Val->getZExtValue());
      // In presence of finite 'safelen', it may be unsafe to mark all
      // the memory instructions parallel, because loop-carried
      // dependences of 'safelen' iterations are possible.
      LoopStack.setParallel(false);
      break;
    }
    case OMPC_aligned:
      EmitOMPAlignedClause(*this, CGM, cast<OMPAlignedClause>(*C));
      break;
    case OMPC_lastprivate:
      SeparateIter = true;
      break;
    default:
      // Not handled yet
      ;
    }
  }

  RunCleanupsScope DirectiveScope(*this);

  CGDebugInfo *DI = getDebugInfo();
  if (DI)
    DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());

  // Emit the loop iteration variable.
  const Expr *IVExpr = S.getIterationVariable();
  const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
  EmitVarDecl(*IVDecl);
  EmitIgnoredExpr(S.getInit());

  // Emit the iterations count variable.
  // If it is not a variable, Sema decided to calculate iterations count on each
  // iteration (e.g., it is foldable into a constant).
  if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
    EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
    // Emit calculation of the iterations count.
    EmitIgnoredExpr(S.getCalcLastIteration());
  }

  if (SeparateIter) {
    // Emit: if (LastIteration > 0) - begin.
    RegionCounter Cnt = getPGORegionCounter(&S);
    auto ThenBlock = createBasicBlock("simd.if.then");
    auto ContBlock = createBasicBlock("simd.if.end");
    EmitBranchOnBoolExpr(S.getPreCond(), ThenBlock, ContBlock, Cnt.getCount());
    EmitBlock(ThenBlock);
    Cnt.beginRegion(Builder);
    // Emit 'then' code.
    {
      OMPPrivateScope LoopScope(*this);
      EmitPrivateLoopCounters(*this, LoopScope, S.counters());
      EmitOMPInnerLoop(S, LoopScope, /* SeparateIter */ true);
      EmitOMPLoopBody(S, /* SeparateIter */ true);
    }
    EmitOMPSimdFinal(S);
    // Emit: if (LastIteration != 0) - end.
    EmitBranch(ContBlock);
    EmitBlock(ContBlock, true);
  } else {
    {
      OMPPrivateScope LoopScope(*this);
      EmitPrivateLoopCounters(*this, LoopScope, S.counters());
      EmitOMPInnerLoop(S, LoopScope);
    }
    EmitOMPSimdFinal(S);
  }

  if (DI)
    DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
}
Esempio n. 18
0
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!rvalue.isAggregate() ||
         rvalue.getAggregateAddr()->getType()->getPointerElementType()
           == dest.getAddress()->getType()->getPointerElementType());

  AtomicInfo atomics(*this, dest);

  // If this is an initialization, just put the value there normally.
  if (isInit) {
    atomics.emitCopyIntoMemory(rvalue, dest);
    return;
  }

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    // Produce a source address.
    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);

    // void __atomic_store(size_t size, void *mem, void *val, int order)
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
    return;
  }

  // Okay, we're doing this natively.
  llvm::Value *intValue;

  // If we've got a scalar value of the right size, try to avoid going
  // through memory.
  if (rvalue.isScalar() && !atomics.hasPadding()) {
    llvm::Value *value = rvalue.getScalarVal();
    if (isa<llvm::IntegerType>(value->getType())) {
      intValue = value;
    } else {
      llvm::IntegerType *inputIntTy =
        llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
      if (isa<llvm::PointerType>(value->getType())) {
        intValue = Builder.CreatePtrToInt(value, inputIntTy);
      } else {
        intValue = Builder.CreateBitCast(value, inputIntTy);
      }
    }

  // Otherwise, we need to go through memory.
  } else {
    // Put the r-value in memory.
    llvm::Value *addr = atomics.materializeRValue(rvalue);

    // Cast the temporary to the atomic int type and pull a value out.
    addr = atomics.emitCastToAtomicIntPointer(addr);
    intValue = Builder.CreateAlignedLoad(addr,
                                 atomics.getAtomicAlignment().getQuantity());
  }

  // Do the atomic store.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);

  // Initializations don't need to be atomic.
  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  store->setAlignment(dest.getAlignment().getQuantity());
  if (dest.isVolatileQualified())
    store->setVolatile(true);
  if (dest.getTBAAInfo())
    CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
Esempio n. 19
0
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
                                 llvm::Value *Callee,
                                 const CallArgList &CallArgs,
                                 const Decl *TargetDecl) {
  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  llvm::SmallVector<llvm::Value*, 16> Args;

  // Handle struct-return functions by passing a pointer to the
  // location that we would like to return into.
  QualType RetTy = CallInfo.getReturnType();
  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();


  // If the call returns a temporary with struct return, create a temporary
  // alloca to hold the result.
  if (CGM.ReturnTypeUsesSret(CallInfo))
    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));

  assert(CallInfo.arg_size() == CallArgs.size() &&
         "Mismatch between function signature & arguments.");
  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
       I != E; ++I, ++info_it) {
    const ABIArgInfo &ArgInfo = info_it->info;
    RValue RV = I->first;

    switch (ArgInfo.getKind()) {
    case ABIArgInfo::Indirect:
      if (RV.isScalar() || RV.isComplex()) {
        // Make a temporary alloca to pass the argument.
        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
        if (RV.isScalar())
          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
        else
          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
      } else {
        Args.push_back(RV.getAggregateAddr());
      }
      break;

    case ABIArgInfo::Extend:
    case ABIArgInfo::Direct:
      if (RV.isScalar()) {
        Args.push_back(RV.getScalarVal());
      } else if (RV.isComplex()) {
        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
        Args.push_back(Tmp);
      } else {
        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
      }
      break;

    case ABIArgInfo::Ignore:
      break;

    case ABIArgInfo::Coerce: {
      // FIXME: Avoid the conversion through memory if possible.
      llvm::Value *SrcPtr;
      if (RV.isScalar()) {
        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
      } else if (RV.isComplex()) {
        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
      } else
        SrcPtr = RV.getAggregateAddr();
      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
                                       *this));
      break;
    }

    case ABIArgInfo::Expand:
      ExpandTypeToArgs(I->second, RV, Args);
      break;
    }
  }

  // If the callee is a bitcast of a function to a varargs pointer to function
  // type, check to see if we can remove the bitcast.  This handles some cases
  // with unprototyped functions.
  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
      const llvm::FunctionType *CurFT =
        cast<llvm::FunctionType>(CurPT->getElementType());
      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();

      if (CE->getOpcode() == llvm::Instruction::BitCast &&
          ActualFT->getReturnType() == CurFT->getReturnType() &&
          ActualFT->getNumParams() == CurFT->getNumParams() &&
          ActualFT->getNumParams() == Args.size()) {
        bool ArgsMatch = true;
        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
            ArgsMatch = false;
            break;
          }

        // Strip the cast if we can get away with it.  This is a nice cleanup,
        // but also allows us to inline the function at -O0 if it is marked
        // always_inline.
        if (ArgsMatch)
          Callee = CalleeF;
      }
    }


  llvm::BasicBlock *InvokeDest = getInvokeDest();
  unsigned CallingConv;
  CodeGen::AttributeListType AttributeList;
  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
                                                   AttributeList.end());

  llvm::CallSite CS;
  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
  } else {
    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
                              Args.data(), Args.data()+Args.size());
    EmitBlock(Cont);
  }

  CS.setAttributes(Attrs);
  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));

  // If the call doesn't return, finish the basic block and clear the
  // insertion point; this allows the rest of IRgen to discard
  // unreachable code.
  if (CS.doesNotReturn()) {
    Builder.CreateUnreachable();
    Builder.ClearInsertionPoint();

    // FIXME: For now, emit a dummy basic block because expr emitters in
    // generally are not ready to handle emitting expressions at unreachable
    // points.
    EnsureInsertPoint();

    // Return a reasonable RValue.
    return GetUndefRValue(RetTy);
  }

  llvm::Instruction *CI = CS.getInstruction();
  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
    CI->setName("call");

  switch (RetAI.getKind()) {
  case ABIArgInfo::Indirect:
    if (RetTy->isAnyComplexType())
      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
      return RValue::getAggregate(Args[0]);
    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));

  case ABIArgInfo::Extend:
  case ABIArgInfo::Direct:
    if (RetTy->isAnyComplexType()) {
      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
      return RValue::getComplex(std::make_pair(Real, Imag));
    }
    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
      Builder.CreateStore(CI, V);
      return RValue::getAggregate(V);
    }
    return RValue::get(CI);

  case ABIArgInfo::Ignore:
    // If we are ignoring an argument that had a result, make sure to
    // construct the appropriate return value for our caller.
    return GetUndefRValue(RetTy);

  case ABIArgInfo::Coerce: {
    // FIXME: Avoid the conversion through memory if possible.
    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
    CreateCoercedStore(CI, V, *this);
    if (RetTy->isAnyComplexType())
      return RValue::getComplex(LoadComplexFromAddr(V, false));
    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
      return RValue::getAggregate(V);
    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
  }

  case ABIArgInfo::Expand:
    assert(0 && "Invalid ABI kind for return argument");
  }

  assert(0 && "Unhandled ABIArgInfo::Kind");
  return RValue::get(0);
}