Пример #1
0
LValue ComplexExprEmitter::
EmitCompoundAssignLValue(const CompoundAssignOperator *E,
          ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
                         RValue &Val) {
  TestAndClearIgnoreReal();
  TestAndClearIgnoreImag();
  QualType LHSTy = E->getLHS()->getType();

  BinOpInfo OpInfo;

  // Load the RHS and LHS operands.
  // __block variables need to have the rhs evaluated first, plus this should
  // improve codegen a little.
  OpInfo.Ty = E->getComputationResultType();

  // The RHS should have been converted to the computation type.
  assert(OpInfo.Ty->isAnyComplexType());
  assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
                                                 E->getRHS()->getType()));
  OpInfo.RHS = Visit(E->getRHS());
  
  LValue LHS = CGF.EmitLValue(E->getLHS());

  // Load from the l-value and convert it.
  if (LHSTy->isAnyComplexType()) {
    ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
    OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
  } else {
    llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
    OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
  }

  // Expand the binary operator.
  ComplexPairTy Result = (this->*Func)(OpInfo);

  // Truncate the result and store it into the LHS lvalue.
  if (LHSTy->isAnyComplexType()) {
    ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
    EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
    Val = RValue::getComplex(ResVal);
  } else {
    llvm::Value *ResVal =
        CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
    CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
    Val = RValue::get(ResVal);
  }

  return LHS;
}
Пример #2
0
void 
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
  QualType type = LV.getType();
  // FIXME: Ignore result?
  // FIXME: Are initializers affected by volatile?
  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
    // Storing "i32 0" to a zero'd memory location is a noop.
  } else if (isa<ImplicitValueInitExpr>(E)) {
    EmitNullInitializationToLValue(LV);
  } else if (type->isReferenceType()) {
    RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
    CGF.EmitStoreThroughLValue(RV, LV);
  } else if (type->isAnyComplexType()) {
    CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
  } else if (CGF.hasAggregateLLVMType(type)) {
    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
                                               AggValueSlot::IsDestructed,
                                      AggValueSlot::DoesNotNeedGCBarriers,
                                               AggValueSlot::IsNotAliased,
                                               Dest.isZeroed()));
  } else if (LV.isSimple()) {
    CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
  } else {
    CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
  }
}
Пример #3
0
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
                         llvm::Constant *DeclPtr) {
  assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
  assert(!D.getType()->isReferenceType() && 
         "Should not call EmitDeclInit on a reference!");
  
  ASTContext &Context = CGF.getContext();

  CharUnits alignment = Context.getDeclAlign(&D);
  QualType type = D.getType();
  LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);

  const Expr *Init = D.getInit();
  if (!CGF.hasAggregateLLVMType(type)) {
    CodeGenModule &CGM = CGF.CGM;
    if (lv.isObjCStrong())
      CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
                                                DeclPtr, D.isThreadSpecified());
    else if (lv.isObjCWeak())
      CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
                                              DeclPtr);
    else
      CGF.EmitScalarInit(Init, &D, lv, false);
  } else if (type->isAnyComplexType()) {
    CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
  } else {
    CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
                                          AggValueSlot::DoesNotNeedGCBarriers,
                                                  AggValueSlot::IsNotAliased));
  }
}
Пример #4
0
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
                         llvm::Constant *DeclPtr) {
  assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
  assert(!D.getType()->isReferenceType() && 
         "Should not call EmitDeclInit on a reference!");
  
  CodeGenModule &CGM = CGF.CGM;
  ASTContext &Context = CGF.getContext();
    
  const Expr *Init = D.getInit();
  QualType T = D.getType();
  bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();

  if (!CGF.hasAggregateLLVMType(T)) {
    llvm::Value *V = CGF.EmitScalarExpr(Init);
    CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
  } else if (T->isAnyComplexType()) {
    CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
  } else {
    CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
    
    // Avoid generating destructor(s) for initialized objects. 
    if (!isa<CXXConstructExpr>(Init))
      return;
    
    const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
    if (Array)
      T = Context.getBaseElementType(Array);
    
    const RecordType *RT = T->getAs<RecordType>();
    if (!RT)
      return;
    
    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
    if (RD->hasTrivialDestructor())
      return;
    
    CXXDestructorDecl *Dtor = RD->getDestructor(Context);
    
    llvm::Constant *DtorFn;
    if (Array) {
      DtorFn = 
        CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, 
                                                             Array, 
                                                             DeclPtr);
      const llvm::Type *Int8PtrTy =
        llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
      DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
     } else
      DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);

    CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
  }
}
Пример #5
0
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
  if (Loc::isLocType(type))
    return makeNull();

  if (type->isIntegralOrEnumerationType())
    return makeIntVal(0, type);

  if (type->isArrayType() || type->isRecordType() || type->isVectorType() ||
      type->isAnyComplexType())
    return makeCompoundVal(type, BasicVals.getEmptySValList());

  // FIXME: Handle floats.
  return UnknownVal();
}
Пример #6
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);

  ProgramStateRef state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();

  if (!IE->isGLValue() &&
      (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
       T->isAnyComplexType())) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();

    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }

    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      SVal V = state->getSVal(cast<Expr>(*it), LCtx);
      vals = getBasicVals().prependSVal(V, vals);
    }

    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }

  // Handle scalars: int{5} and int{} and GLvalues.
  // Note, if the InitListExpr is a GLvalue, it means that there is an address
  // representing it, so it must have a single init element.
  assert(NumInitElements <= 1);

  SVal V;
  if (NumInitElements == 0)
    V = getSValBuilder().makeZeroVal(T);
  else
    V = state->getSVal(IE->getInit(0), LCtx);

  B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
Пример #7
0
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
                                   ExplodedNode *Pred,
                                   ExplodedNodeSet &Dst) {
  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);

  ProgramStateRef state = Pred->getState();
  const LocationContext *LCtx = Pred->getLocationContext();
  QualType T = getContext().getCanonicalType(IE->getType());
  unsigned NumInitElements = IE->getNumInits();
  
  if (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
      T->isAnyComplexType()) {
    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
    
    // Handle base case where the initializer has no elements.
    // e.g: static int* myArray[] = {};
    if (NumInitElements == 0) {
      SVal V = svalBuilder.makeCompoundVal(T, vals);
      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
      return;
    }
    
    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
         ei = IE->rend(); it != ei; ++it) {
      SVal V = state->getSVal(cast<Expr>(*it), LCtx);
      if (dyn_cast_or_null<CXXTempObjectRegion>(V.getAsRegion()))
        V = UnknownVal();
      vals = getBasicVals().consVals(V, vals);
    }
    
    B.generateNode(IE, Pred,
                   state->BindExpr(IE, LCtx,
                                   svalBuilder.makeCompoundVal(T, vals)));
    return;
  }

  // Handle scalars: int{5} and int{}.
  assert(NumInitElements <= 1);

  SVal V;
  if (NumInitElements == 0)
    V = getSValBuilder().makeZeroVal(T);
  else
    V = state->getSVal(IE->getInit(0), LCtx);

  B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
                                        llvm::Value *SrcPtr, QualType Ty,
                                        bool isVolatile) {
  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");

  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
  // C99 6.5.16.1p3, which states "If the value being stored in an object is
  // read from another object that overlaps in anyway the storage of the first
  // object, then the overlap shall be exact and the two objects shall have
  // qualified or unqualified versions of a compatible type."
  //
  // memcpy is not defined if the source and destination pointers are exactly
  // equal, but other compilers do this optimization, and almost every memcpy
  // implementation handles this case safely.  If there is a libc that does not
  // safely handle this, we can add a target hook.
  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
  if (DestPtr->getType() != BP)
    DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
  if (SrcPtr->getType() != BP)
    SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");

  // Get size and alignment info for this aggregate.
  std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);

  // FIXME: Handle variable sized types.
  const llvm::Type *IntPtr =
          llvm::IntegerType::get(VMContext, LLVMPointerWidth);

  // FIXME: If we have a volatile struct, the optimizer can remove what might
  // appear to be `extra' memory ops:
  //
  // volatile struct { int i; } a, b;
  //
  // int main() {
  //   a = b;
  //   a = b;
  // }
  //
  // we need to use a differnt call here.  We use isVolatile to indicate when
  // either the source or the destination is volatile.
  Builder.CreateCall4(CGM.getMemCpyFn(),
                      DestPtr, SrcPtr,
                      // TypeInfo.first describes size in bits.
                      llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
                      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
                                             TypeInfo.second/8));
}
Пример #9
0
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
                                         llvm::Value *ReturnValue) {
  llvm::Value *RV = 0;

  // Functions with no result always return void.
  if (ReturnValue) {
    QualType RetTy = FI.getReturnType();
    const ABIArgInfo &RetAI = FI.getReturnInfo();

    switch (RetAI.getKind()) {
    case ABIArgInfo::Indirect:
      if (RetTy->isAnyComplexType()) {
        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
      } else {
        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
                          false, RetTy);
      }
      break;

    case ABIArgInfo::Extend:
    case ABIArgInfo::Direct:
      // The internal return value temp always will have
      // pointer-to-return-type type.
      RV = Builder.CreateLoad(ReturnValue);
      break;

    case ABIArgInfo::Ignore:
      break;

    case ABIArgInfo::Coerce:
      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
      break;

    case ABIArgInfo::Expand:
      assert(0 && "Invalid ABI kind for return argument");
    }
  }

  if (RV) {
    Builder.CreateRet(RV);
  } else {
    Builder.CreateRetVoid();
  }
}
Пример #10
0
void 
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
  // FIXME: Ignore result?
  // FIXME: Are initializers affected by volatile?
  if (isa<ImplicitValueInitExpr>(E)) {
    EmitNullInitializationToLValue(LV, T);
  } else if (T->isReferenceType()) {
    RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
    CGF.EmitStoreThroughLValue(RV, LV, T);
  } else if (T->isAnyComplexType()) {
    CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
  } else if (CGF.hasAggregateLLVMType(T)) {
    CGF.EmitAnyExpr(E, LV.getAddress(), false);
  } else {
    CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
  }
}
Пример #11
0
void 
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
  // FIXME: Ignore result?
  // FIXME: Are initializers affected by volatile?
  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
    // Storing "i32 0" to a zero'd memory location is a noop.
  } else if (isa<ImplicitValueInitExpr>(E)) {
    EmitNullInitializationToLValue(LV, T);
  } else if (T->isReferenceType()) {
    RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
    CGF.EmitStoreThroughLValue(RV, LV, T);
  } else if (T->isAnyComplexType()) {
    CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
  } else if (CGF.hasAggregateLLVMType(T)) {
    CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
                                             false, Dest.isZeroed()));
  } else {
    CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
  }
}
Пример #12
0
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
                         llvm::Constant *DeclPtr) {
  assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
  assert(!D.getType()->isReferenceType() && 
         "Should not call EmitDeclInit on a reference!");
  
  ASTContext &Context = CGF.getContext();
    
  const Expr *Init = D.getInit();
  QualType T = D.getType();
  bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();

  if (!CGF.hasAggregateLLVMType(T)) {
    llvm::Value *V = CGF.EmitScalarExpr(Init);
    CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
  } else if (T->isAnyComplexType()) {
    CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
  } else {
    CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
  }
}
Пример #13
0
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
                               llvm::Value *NewPtr,
                               llvm::Value *NumElements) {
  QualType AllocType = E->getAllocatedType();

  if (!E->isArray()) {
    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
      CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
                                 E->constructor_arg_begin(),
                                 E->constructor_arg_end());

      return;
    }
    
    // We have a POD type.
    if (E->getNumConstructorArgs() == 0)
      return;

    assert(E->getNumConstructorArgs() == 1 &&
           "Can only have one argument to initializer of POD type.");
      
    const Expr *Init = E->getConstructorArg(0);
    
    if (!CGF.hasAggregateLLVMType(AllocType))
      CGF.Builder.CreateStore(CGF.EmitScalarExpr(Init), NewPtr);
    else if (AllocType->isAnyComplexType())
      CGF.EmitComplexExprIntoAddr(Init, NewPtr, 
                                  AllocType.isVolatileQualified());
    else
      CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
    return;
  }
  
  if (CXXConstructorDecl *Ctor = E->getConstructor())
    CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr);
}
Пример #14
0
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
    T->isObjCObjectType();
}
Пример #15
0
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
                                        llvm::Value *SrcPtr, QualType Ty,
                                        bool isVolatile, unsigned Alignment) {
  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");

  if (getContext().getLangOptions().CPlusPlus) {
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
      assert((Record->hasTrivialCopyConstructor() || 
              Record->hasTrivialCopyAssignment() ||
              Record->hasTrivialMoveConstructor() ||
              Record->hasTrivialMoveAssignment()) &&
             "Trying to aggregate-copy a type without a trivial copy "
             "constructor or assignment operator");
      // Ignore empty classes in C++.
      if (Record->isEmpty())
        return;
    }
  }
  
  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
  // C99 6.5.16.1p3, which states "If the value being stored in an object is
  // read from another object that overlaps in anyway the storage of the first
  // object, then the overlap shall be exact and the two objects shall have
  // qualified or unqualified versions of a compatible type."
  //
  // memcpy is not defined if the source and destination pointers are exactly
  // equal, but other compilers do this optimization, and almost every memcpy
  // implementation handles this case safely.  If there is a libc that does not
  // safely handle this, we can add a target hook.

  // Get size and alignment info for this aggregate.
  std::pair<CharUnits, CharUnits> TypeInfo = 
    getContext().getTypeInfoInChars(Ty);

  if (!Alignment)
    Alignment = TypeInfo.second.getQuantity();

  // FIXME: Handle variable sized types.

  // FIXME: If we have a volatile struct, the optimizer can remove what might
  // appear to be `extra' memory ops:
  //
  // volatile struct { int i; } a, b;
  //
  // int main() {
  //   a = b;
  //   a = b;
  // }
  //
  // we need to use a different call here.  We use isVolatile to indicate when
  // either the source or the destination is volatile.

  llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
  llvm::Type *DBP =
    llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
  DestPtr = Builder.CreateBitCast(DestPtr, DBP);

  llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
  llvm::Type *SBP =
    llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
  SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);

  // Don't do any of the memmove_collectable tests if GC isn't set.
  if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
    // fall through
  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
    RecordDecl *Record = RecordTy->getDecl();
    if (Record->hasObjectMember()) {
      CharUnits size = TypeInfo.first;
      llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
      llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 
                                                    SizeVal);
      return;
    }
  } else if (Ty->isArrayType()) {
    QualType BaseType = getContext().getBaseElementType(Ty);
    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
      if (RecordTy->getDecl()->hasObjectMember()) {
        CharUnits size = TypeInfo.first;
        llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
        llvm::Value *SizeVal = 
          llvm::ConstantInt::get(SizeTy, size.getQuantity());
        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 
                                                      SizeVal);
        return;
      }
    }
  }
  
  Builder.CreateMemCpy(DestPtr, SrcPtr,
                       llvm::ConstantInt::get(IntPtrTy, 
                                              TypeInfo.first.getQuantity()),
                       Alignment, isVolatile);
}
Пример #16
0
LValue ComplexExprEmitter::
EmitCompoundAssignLValue(const CompoundAssignOperator *E,
          ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
                         RValue &Val) {
  TestAndClearIgnoreReal();
  TestAndClearIgnoreImag();
  QualType LHSTy = E->getLHS()->getType();
  if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
    LHSTy = AT->getValueType();

  BinOpInfo OpInfo;

  // Load the RHS and LHS operands.
  // __block variables need to have the rhs evaluated first, plus this should
  // improve codegen a little.
  OpInfo.Ty = E->getComputationResultType();
  QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();

  // The RHS should have been converted to the computation type.
  if (E->getRHS()->getType()->isRealFloatingType()) {
    assert(
        CGF.getContext()
            .hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
    OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
  } else {
    assert(CGF.getContext()
               .hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
    OpInfo.RHS = Visit(E->getRHS());
  }

  LValue LHS = CGF.EmitLValue(E->getLHS());

  // Load from the l-value and convert it.
  if (LHSTy->isAnyComplexType()) {
    ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
    OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
  } else {
    llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
    // For floating point real operands we can directly pass the scalar form
    // to the binary operator emission and potentially get more efficient code.
    if (LHSTy->isRealFloatingType()) {
      if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
        LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy);
      OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
    } else {
      OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
    }
  }

  // Expand the binary operator.
  ComplexPairTy Result = (this->*Func)(OpInfo);

  // Truncate the result and store it into the LHS lvalue.
  if (LHSTy->isAnyComplexType()) {
    ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
    EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
    Val = RValue::getComplex(ResVal);
  } else {
    llvm::Value *ResVal =
        CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
    CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
    Val = RValue::get(ResVal);
  }

  return LHS;
}
Пример #17
0
void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");

  EmitMemSetToZero(DestPtr, Ty);
}
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
    T->isMemberFunctionPointerType();
}
Пример #19
0
// FIXME: should rewrite according to the cast kind.
SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
  if (val.isUnknownOrUndef() || castTy == originalTy)
    return val;

  // For const casts, just propagate the value.
  if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
    if (Context.hasSameUnqualifiedType(castTy, originalTy))
      return val;

  // Check for casts to real or complex numbers.  We don't handle these at all
  // right now.
  if (castTy->isFloatingType() || castTy->isAnyComplexType())
    return UnknownVal();
  
  // Check for casts from integers to integers.
  if (castTy->isIntegerType() && originalTy->isIntegerType())
    return evalCastNL(cast<NonLoc>(val), castTy);

  // Check for casts from pointers to integers.
  if (castTy->isIntegerType() && Loc::IsLocType(originalTy))
    return evalCastL(cast<Loc>(val), castTy);

  // Check for casts from integers to pointers.
  if (Loc::IsLocType(castTy) && originalTy->isIntegerType()) {
    if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
      if (const MemRegion *R = LV->getLoc().getAsRegion()) {
        StoreManager &storeMgr = StateMgr.getStoreManager();
        R = storeMgr.CastRegion(R, castTy);
        return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
      }
      return LV->getLoc();
    }
    goto DispatchCast;
  }

  // Just pass through function and block pointers.
  if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
    assert(Loc::IsLocType(castTy));
    return val;
  }

  // Check for casts from array type to another type.
  if (originalTy->isArrayType()) {
    // We will always decay to a pointer.
    val = StateMgr.ArrayToPointer(cast<Loc>(val));

    // Are we casting from an array to a pointer?  If so just pass on
    // the decayed value.
    if (castTy->isPointerType())
      return val;

    // Are we casting from an array to an integer?  If so, cast the decayed
    // pointer value to an integer.
    assert(castTy->isIntegerType());

    // FIXME: Keep these here for now in case we decide soon that we
    // need the original decayed type.
    //    QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
    //    QualType pointerTy = C.getPointerType(elemTy);
    return evalCastL(cast<Loc>(val), castTy);
  }

  // Check for casts from a region to a specific type.
  if (const MemRegion *R = val.getAsRegion()) {
    // FIXME: We should handle the case where we strip off view layers to get
    //  to a desugared type.

    if (!Loc::IsLocType(castTy)) {
      // FIXME: There can be gross cases where one casts the result of a function
      // (that returns a pointer) to some other value that happens to fit
      // within that pointer value.  We currently have no good way to
      // model such operations.  When this happens, the underlying operation
      // is that the caller is reasoning about bits.  Conceptually we are
      // layering a "view" of a location on top of those bits.  Perhaps
      // we need to be more lazy about mutual possible views, even on an
      // SVal?  This may be necessary for bit-level reasoning as well.
      return UnknownVal();
    }

    // We get a symbolic function pointer for a dereference of a function
    // pointer, but it is of function type. Example:

    //  struct FPRec {
    //    void (*my_func)(int * x);
    //  };
    //
    //  int bar(int x);
    //
    //  int f1_a(struct FPRec* foo) {
    //    int x;
    //    (*foo->my_func)(&x);
    //    return bar(x)+1; // no-warning
    //  }

    assert(Loc::IsLocType(originalTy) || originalTy->isFunctionType() ||
           originalTy->isBlockPointerType());

    StoreManager &storeMgr = StateMgr.getStoreManager();

    // Delegate to store manager to get the result of casting a region to a
    // different type.  If the MemRegion* returned is NULL, this expression
    // Evaluates to UnknownVal.
    R = storeMgr.CastRegion(R, castTy);
    return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
  }

DispatchCast:
  // All other cases.
  return isa<Loc>(val) ? evalCastL(cast<Loc>(val), castTy)
                       : evalCastNL(cast<NonLoc>(val), castTy);
}
Пример #20
0
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
                                 llvm::Value *Callee,
                                 const CallArgList &CallArgs,
                                 const Decl *TargetDecl) {
  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  llvm::SmallVector<llvm::Value*, 16> Args;

  // Handle struct-return functions by passing a pointer to the
  // location that we would like to return into.
  QualType RetTy = CallInfo.getReturnType();
  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();


  // If the call returns a temporary with struct return, create a temporary
  // alloca to hold the result.
  if (CGM.ReturnTypeUsesSret(CallInfo))
    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));

  assert(CallInfo.arg_size() == CallArgs.size() &&
         "Mismatch between function signature & arguments.");
  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
       I != E; ++I, ++info_it) {
    const ABIArgInfo &ArgInfo = info_it->info;
    RValue RV = I->first;

    switch (ArgInfo.getKind()) {
    case ABIArgInfo::Indirect:
      if (RV.isScalar() || RV.isComplex()) {
        // Make a temporary alloca to pass the argument.
        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
        if (RV.isScalar())
          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
        else
          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
      } else {
        Args.push_back(RV.getAggregateAddr());
      }
      break;

    case ABIArgInfo::Extend:
    case ABIArgInfo::Direct:
      if (RV.isScalar()) {
        Args.push_back(RV.getScalarVal());
      } else if (RV.isComplex()) {
        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
        Args.push_back(Tmp);
      } else {
        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
      }
      break;

    case ABIArgInfo::Ignore:
      break;

    case ABIArgInfo::Coerce: {
      // FIXME: Avoid the conversion through memory if possible.
      llvm::Value *SrcPtr;
      if (RV.isScalar()) {
        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
      } else if (RV.isComplex()) {
        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
      } else
        SrcPtr = RV.getAggregateAddr();
      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
                                       *this));
      break;
    }

    case ABIArgInfo::Expand:
      ExpandTypeToArgs(I->second, RV, Args);
      break;
    }
  }

  // If the callee is a bitcast of a function to a varargs pointer to function
  // type, check to see if we can remove the bitcast.  This handles some cases
  // with unprototyped functions.
  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
      const llvm::FunctionType *CurFT =
        cast<llvm::FunctionType>(CurPT->getElementType());
      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();

      if (CE->getOpcode() == llvm::Instruction::BitCast &&
          ActualFT->getReturnType() == CurFT->getReturnType() &&
          ActualFT->getNumParams() == CurFT->getNumParams() &&
          ActualFT->getNumParams() == Args.size()) {
        bool ArgsMatch = true;
        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
            ArgsMatch = false;
            break;
          }

        // Strip the cast if we can get away with it.  This is a nice cleanup,
        // but also allows us to inline the function at -O0 if it is marked
        // always_inline.
        if (ArgsMatch)
          Callee = CalleeF;
      }
    }


  llvm::BasicBlock *InvokeDest = getInvokeDest();
  unsigned CallingConv;
  CodeGen::AttributeListType AttributeList;
  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
                                                   AttributeList.end());

  llvm::CallSite CS;
  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
  } else {
    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
                              Args.data(), Args.data()+Args.size());
    EmitBlock(Cont);
  }

  CS.setAttributes(Attrs);
  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));

  // If the call doesn't return, finish the basic block and clear the
  // insertion point; this allows the rest of IRgen to discard
  // unreachable code.
  if (CS.doesNotReturn()) {
    Builder.CreateUnreachable();
    Builder.ClearInsertionPoint();

    // FIXME: For now, emit a dummy basic block because expr emitters in
    // generally are not ready to handle emitting expressions at unreachable
    // points.
    EnsureInsertPoint();

    // Return a reasonable RValue.
    return GetUndefRValue(RetTy);
  }

  llvm::Instruction *CI = CS.getInstruction();
  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
    CI->setName("call");

  switch (RetAI.getKind()) {
  case ABIArgInfo::Indirect:
    if (RetTy->isAnyComplexType())
      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
      return RValue::getAggregate(Args[0]);
    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));

  case ABIArgInfo::Extend:
  case ABIArgInfo::Direct:
    if (RetTy->isAnyComplexType()) {
      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
      return RValue::getComplex(std::make_pair(Real, Imag));
    }
    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
      Builder.CreateStore(CI, V);
      return RValue::getAggregate(V);
    }
    return RValue::get(CI);

  case ABIArgInfo::Ignore:
    // If we are ignoring an argument that had a result, make sure to
    // construct the appropriate return value for our caller.
    return GetUndefRValue(RetTy);

  case ABIArgInfo::Coerce: {
    // FIXME: Avoid the conversion through memory if possible.
    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
    CreateCoercedStore(CI, V, *this);
    if (RetTy->isAnyComplexType())
      return RValue::getComplex(LoadComplexFromAddr(V, false));
    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
      return RValue::getAggregate(V);
    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
  }

  case ABIArgInfo::Expand:
    assert(0 && "Invalid ABI kind for return argument");
  }

  assert(0 && "Unhandled ABIArgInfo::Kind");
  return RValue::get(0);
}