Example #1
0
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
                                               llvm::Constant *DeclPtr,
                                               bool PerformInit) {

  const Expr *Init = D.getInit();
  QualType T = D.getType();

  if (!T->isReferenceType()) {
    if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
      (void)CGM.getOpenMPRuntime().EmitOMPThreadPrivateVarDefinition(
          &D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
          PerformInit, this);
    if (PerformInit)
      EmitDeclInit(*this, D, DeclPtr);
    if (CGM.isTypeConstant(D.getType(), true))
      EmitDeclInvariant(*this, D, DeclPtr);
    else
      EmitDeclDestroy(*this, D, DeclPtr);
    return;
  }

  assert(PerformInit && "cannot have constant initializer which needs "
         "destruction for reference");
  unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
  RValue RV = EmitReferenceBindingToExpr(Init);
  EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
Example #2
0
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
  assert(Src.isAggregate() && "value must be aggregate value!");

  // If the result is ignored, don't copy from the value.
  if (DestPtr == 0) {
    if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
      return;
    // If the source is volatile, we must read from it; to do that, we need
    // some place to put it.
    DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
  }

  if (RequiresGCollection) {
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
                                              DestPtr, Src.getAggregateAddr(),
                                              E->getType());
    return;
  }
  // If the result of the assignment is used, copy the LHS there also.
  // FIXME: Pass VolatileDest as well.  I think we also need to merge volatile
  // from the source as well, as we can't eliminate it if either operand
  // is volatile, unless copy has volatile for both source and destination..
  CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
                        VolatileDest|Src.isVolatileQualified());
}
Example #3
0
void
CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
  const RecordType *RT = Ty->getAsStructureType();
  assert(RT && "Can only expand structure types.");

  RecordDecl *RD = RT->getDecl();
  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
  llvm::Value *Addr = RV.getAggregateAddr();
  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
         i != e; ++i) {
    FieldDecl *FD = *i;
    QualType FT = FD->getType();

    // FIXME: What are the right qualifiers here?
    LValue LV = EmitLValueForField(Addr, FD, false, 0);
    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
    } else {
      RValue RV = EmitLoadOfLValue(LV, FT);
      assert(RV.isScalar() &&
             "Unexpected non-scalar rvalue during struct expansion.");
      Args.push_back(RV.getScalarVal());
    }
  }
}
Example #4
0
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
  const CapturedStmt *CS = cast<CapturedStmt>(S.getAssociatedStmt());
  const Stmt *Body = CS->getCapturedStmt();
  LoopStack.setParallel();
  LoopStack.setVectorizerEnable(true);
  for (auto C : S.clauses()) {
    switch (C->getClauseKind()) {
    case OMPC_safelen: {
      RValue Len = EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(),
                               AggValueSlot::ignored(), true);
      llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
      LoopStack.setVectorizerWidth(Val->getZExtValue());
      // In presence of finite 'safelen', it may be unsafe to mark all
      // the memory instructions parallel, because loop-carried
      // dependences of 'safelen' iterations are possible.
      LoopStack.setParallel(false);
      break;
    }
    default:
      // Not handled yet
      ;
    }
  }
  EmitStmt(Body);
}
Example #5
0
/// \brief Emit a libcall for a binary operation on complex types.
ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
                                                          const BinOpInfo &Op) {
  CallArgList Args;
  Args.add(RValue::get(Op.LHS.first),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.LHS.second),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.RHS.first),
           Op.Ty->castAs<ComplexType>()->getElementType());
  Args.add(RValue::get(Op.RHS.second),
           Op.Ty->castAs<ComplexType>()->getElementType());

  // We *must* use the full CG function call building logic here because the
  // complex type has special ABI handling. We also should not forget about
  // special calling convention which may be used for compiler builtins.
  const CGFunctionInfo &FuncInfo =
    CGF.CGM.getTypes().arrangeFreeFunctionCall(
      Op.Ty, Args, FunctionType::ExtInfo(/* No CC here - will be added later */),
      RequiredArgs::All);
  llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
  llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
  llvm::Instruction *Call;

  RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
                            nullptr, &Call);
  cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
  cast<llvm::CallInst>(Call)->setDoesNotThrow();

  return Res.getComplexVal();
}
Example #6
0
bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
  if (rv.isScalar())
    return DominatingLLVMValue::needsSaving(rv.getScalarVal());
  if (rv.isAggregate())
    return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
  return true;
}
Example #7
0
RValue Inst::PointerMath(int type, RValue ptr, RValue val, CodeContext& context)
{
	if (type != ParserBase::TT_INC && type != ParserBase::TT_DEC) {
		context.addError("pointer arithmetic only valid using ++/-- operators");
		return ptr;
	}
	auto ptrVal = GetElementPtrInst::Create(ptr, val.value(), "", context);
	return RValue(ptrVal, ptr.stype());
}
Example #8
0
LValue CodeGenFunction::
EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
                                    llvm::Value *&Result) {
  CompoundFunc Op = getComplexOp(E->getOpcode());
  RValue Val;
  LValue Ret = ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
  Result = Val.getScalarVal();
  return Ret;
}
Example #9
0
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
  if (RV.isScalar()) {
    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
  } else if (RV.isAggregate()) {
    EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
  } else {
    StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
  }
  EmitBranchThroughCleanup(ReturnBlock);
}
Example #10
0
RValue Inst::Deref(CodeContext& context, RValue value, bool recursive)
{
	auto retVal = RValue(value.value(), value.stype());
	while (retVal.stype()->isPointer()) {
		retVal = RValue(new LoadInst(retVal, "", context), retVal.stype()->subType());
		if (!recursive)
			break;
	}
	return retVal;
}
Example #11
0
llvm::Value *CodeGenFunction::EmitUPCLoad(llvm::Value *Addr,
                                          bool isStrict,
                                          llvm::Type *LTy,
                                          CharUnits Align,
                                          SourceLocation Loc) {
  const ASTContext& Context = getContext();
  const llvm::DataLayout &Target = CGM.getDataLayout();
  uint64_t Size = Target.getTypeSizeInBits(LTy);
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType ResultTy;
  llvm::SmallString<16> Name("__get");
  if (isStrict) Name += 's';
  if (CGM.getCodeGenOpts().UPCDebug) Name += "g";

  if (const char * ID = getUPCTypeID(*this, &ResultTy, LTy, Size, Context.toBits(Align))) {
    Name += ID;

    CallArgList Args;
    Args.add(RValue::get(Addr), ArgTy);
    if (CGM.getCodeGenOpts().UPCDebug) {
      getFileAndLine(*this, Loc, &Args);
      Name += '3';
    } else {
      Name += '2';
    }
    RValue Result = EmitUPCCall(*this, Name, ResultTy, Args);
    llvm::Value *Value = Result.getScalarVal();
    if (LTy->isPointerTy())
      Value = Builder.CreateIntToPtr(Value, LTy);
    else
      Value = Builder.CreateBitCast(Value, LTy);
    return Value;
  } else {
    Name += "blk";

    llvm::AllocaInst *Mem = CreateTempAlloca(LTy);
    Mem->setAlignment(Target.getABITypeAlignment(LTy));
    llvm::Value *Tmp = Builder.CreateBitCast(Mem, VoidPtrTy);
    llvm::Value *SizeArg = llvm::ConstantInt::get(SizeTy, Size/Context.getCharWidth());
    
    CallArgList Args;
    Args.add(RValue::get(Tmp), Context.VoidPtrTy);
    Args.add(RValue::get(Addr), ArgTy);
    Args.add(RValue::get(SizeArg), Context.getSizeType());
    if (CGM.getCodeGenOpts().UPCDebug) {
      getFileAndLine(*this, Loc, &Args);
      Name += '5';
    } else {
      Name += '3';
    }
    EmitUPCCall(*this, Name, getContext().VoidTy, Args);

    return Builder.CreateLoad(Mem);
  }
}
Example #12
0
RValue::RValue(const RValue &copied, SILGenFunction &gen, SILLocation l)
  : type(copied.type),
    elementsToBeAdded(copied.elementsToBeAdded)
{
  assert((copied.isComplete() || copied.isUsed())
         && "can't copy incomplete rvalue");
  values.reserve(copied.values.size());
  for (ManagedValue value : copied.values) {
    values.push_back(value.copy(gen, l));
  }
}
Example #13
0
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
                                               llvm::Constant *DeclPtr,
                                               bool PerformInit) {

  const Expr *Init = D.getInit();
  QualType T = D.getType();

  // The address space of a static local variable (DeclPtr) may be different
  // from the address space of the "this" argument of the constructor. In that
  // case, we need an addrspacecast before calling the constructor.
  //
  // struct StructWithCtor {
  //   __device__ StructWithCtor() {...}
  // };
  // __device__ void foo() {
  //   __shared__ StructWithCtor s;
  //   ...
  // }
  //
  // For example, in the above CUDA code, the static local variable s has a
  // "shared" address space qualifier, but the constructor of StructWithCtor
  // expects "this" in the "generic" address space.
  unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
  unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
  if (ActualAddrSpace != ExpectedAddrSpace) {
    llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
    llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
    DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
  }

  ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));

  if (!T->isReferenceType()) {
    if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
        D.hasAttr<OMPThreadPrivateDeclAttr>()) {
      (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
          &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
          PerformInit, this);
    }
    if (PerformInit)
      EmitDeclInit(*this, D, DeclAddr);
    if (CGM.isTypeConstant(D.getType(), true))
      EmitDeclInvariant(*this, D, DeclPtr);
    else
      EmitDeclDestroy(*this, D, DeclAddr);
    return;
  }

  assert(PerformInit && "cannot have constant initializer which needs "
         "destruction for reference");
  RValue RV = EmitReferenceBindingToExpr(Init);
  EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
}
Example #14
0
bool RValue::isObviouslyEqual(const RValue &rhs) const {
  assert(isComplete() && rhs.isComplete() && "Comparing incomplete rvalues");

  // Compare the count of elements instead of the type.
  if (values.size() != rhs.values.size())
    return false;

  return std::equal(values.begin(), values.end(), rhs.values.begin(),
                [](const ManagedValue &lhs, const ManagedValue &rhs) -> bool {
                  return areObviouslySameValue(lhs.getValue(), rhs.getValue());
                });
}
Example #15
0
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
  // If this is an aggregate r-value, it should agree in type except
  // maybe for address-space qualification.
  assert(!rvalue.isAggregate() ||
         rvalue.getAggregateAddr()->getType()->getPointerElementType()
           == dest.getAddress()->getType()->getPointerElementType());

  AtomicInfo atomics(*this, dest);

  // If this is an initialization, just put the value there normally.
  if (isInit) {
    atomics.emitCopyIntoMemory(rvalue, dest);
    return;
  }

  // Check whether we should use a library call.
  if (atomics.shouldUseLibcall()) {
    // Produce a source address.
    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);

    // void __atomic_store(size_t size, void *mem, void *val, int order)
    CallArgList args;
    args.add(RValue::get(atomics.getAtomicSizeValue()),
             getContext().getSizeType());
    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
             getContext().VoidPtrTy);
    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
             getContext().VoidPtrTy);
    args.add(RValue::get(llvm::ConstantInt::get(
                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
             getContext().IntTy);
    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
    return;
  }

  // Okay, we're doing this natively.
  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);

  // Do the atomic store.
  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);

  // Initializations don't need to be atomic.
  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);

  // Other decoration.
  store->setAlignment(dest.getAlignment().getQuantity());
  if (dest.isVolatileQualified())
    store->setVolatile(true);
  if (dest.getTBAAInfo())
    CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
/// Emit a materializeForSet operation that simply loads the l-value
/// into the result buffer.  This operation creates a callback to write
/// the l-value back.
SILValue
MaterializeForSetEmitter::emitUsingGetterSetter(SILGenFunction &SGF,
                                                SILLocation loc,
                                                ManagedValue self,
                                                RValue &&indices,
                                                SILValue resultBuffer,
                                                SILValue callbackBuffer,
                                                SILFunction *&callback) {
  // Copy the indices into the callback storage.
  const TypeLowering *indicesTL = nullptr;
  CleanupHandle indicesCleanup = CleanupHandle::invalid();
  CanType indicesFormalType;
  if (isa<SubscriptDecl>(WitnessStorage)) {
    indicesFormalType = indices.getType();
    indicesTL = &SGF.getTypeLowering(indicesFormalType);
    SILValue allocatedCallbackBuffer =
      SGF.B.createAllocValueBuffer(loc, indicesTL->getLoweredType(),
                                   callbackBuffer);

    // Emit into the buffer.
    auto init = SGF.useBufferAsTemporary(allocatedCallbackBuffer, *indicesTL);
    indicesCleanup = init->getInitializedCleanup();

    indices.copyInto(SGF, loc, init.get());
  }

  // Set up the result buffer.
  resultBuffer =
    SGF.B.createPointerToAddress(loc, resultBuffer,
                                 RequirementStorageType.getAddressType(),
                                 /*isStrict*/ true,
                                 /*isInvariant*/ false);
  TemporaryInitialization init(resultBuffer, CleanupHandle::invalid());

  // Evaluate the getter into the result buffer.
  LValue lv = buildLValue(SGF, loc, self, std::move(indices), AccessKind::Read);
  RValue result = SGF.emitLoadOfLValue(loc, std::move(lv),
                                             SGFContext(&init));
  if (!result.isInContext()) {
    std::move(result).forwardInto(SGF, loc, &init);
  }

  // Forward the cleanup on the saved indices.
  if (indicesCleanup.isValid()) {
    SGF.Cleanups.setCleanupState(indicesCleanup, CleanupState::Dead);
  }

  callback = createSetterCallback(SGF.F, indicesTL, indicesFormalType);
  return resultBuffer;
}
Example #17
0
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
                                               llvm::Constant *DeclPtr) {

  const Expr *Init = D.getInit();
  QualType T = D.getType();

  if (!T->isReferenceType()) {
    EmitDeclInit(*this, D, DeclPtr);
    EmitDeclDestroy(*this, D, DeclPtr);
    return;
  }

  RValue RV = EmitReferenceBindingToExpr(Init, &D);
  EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
}
Example #18
0
// Compound assignments.
ComplexPairTy ComplexExprEmitter::
EmitCompoundAssign(const CompoundAssignOperator *E,
                   ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
  RValue Val;
  LValue LV = EmitCompoundAssignLValue(E, Func, Val);

  // The result of an assignment in C is the assigned r-value.
  if (!CGF.getLangOpts().CPlusPlus)
    return Val.getComplexVal();

  // If the lvalue is non-volatile, return the computed value of the assignment.
  if (!LV.isVolatileQualified())
    return Val.getComplexVal();

  return EmitLoadOfLValue(LV, E->getExprLoc());
}
Example #19
0
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
                                               llvm::Constant *DeclPtr) {

  const Expr *Init = D.getInit();
  QualType T = D.getType();

  if (!T->isReferenceType()) {
    EmitDeclInit(*this, D, DeclPtr);
    return;
  }
  if (Init->isLvalue(getContext()) == Expr::LV_Valid) {
    RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
    EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
    return;
  }
  ErrorUnsupported(Init, 
                   "global variable that binds reference to a non-lvalue");
}
Example #20
0
CodeGenFunction::PeepholeProtection
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
  // At the moment, the only aggressive peephole we do in IR gen
  // is trunc(zext) folding, but if we add more, we can easily
  // extend this protection.

  if (!rvalue.isScalar()) return PeepholeProtection();
  llvm::Value *value = rvalue.getScalarVal();
  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();

  // Just make an extra bitcast.
  assert(HaveInsertPoint());
  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
                                                  Builder.GetInsertBlock());

  PeepholeProtection protection;
  protection.Inst = inst;
  return protection;
}
Example #21
0
/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
///
/// The idea is that you do something like this:
///   RValue Result = EmitSomething(..., getReturnValueSlot());
///   EmitGCMove(E, Result);
/// If GC doesn't interfere, this will cause the result to be emitted
/// directly into the return value slot.  If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
  if (Dest.requiresGCollection()) {
    CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
    const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
    llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
                                                    Src.getAggregateAddr(),
                                                    SizeVal);
  }
}
Example #22
0
bool Inst::CastMatch(CodeContext& context, RValue& lhs, RValue& rhs, bool upcast)
{
	auto ltype = lhs.stype();
	auto rtype = rhs.stype();

	if (ltype == rtype) {
		return false;
	} else if (ltype->isComplex() || rtype->isComplex()) {
		context.addError("can not cast complex types");
		return true;
	} else if (ltype->isPointer() || rtype->isPointer()) {
		// different pointer types can't be cast automatically
		// the operations need to handle pointers specially
		return false;
	}

	auto toType = SType::numericConv(context, ltype, rtype, upcast);
	return CastTo(context, lhs, toType, upcast) || CastTo(context, rhs, toType, upcast);
}
Example #23
0
llvm::Value *CodeGenFunction::EmitUPCCastSharedToLocal(llvm::Value *Value,
                                                       QualType DestTy,
                                                       SourceLocation Loc) {
  const ASTContext& Context = getContext();
  QualType ArgTy = Context.getPointerType(Context.getSharedType(Context.VoidTy));
  QualType ResultTy = Context.VoidPtrTy;

  const char *FnName = "__getaddr";

  CallArgList Args;
  Args.add(RValue::get(Value), ArgTy);
  if (CGM.getCodeGenOpts().UPCDebug) {
    getFileAndLine(*this, Loc, &Args);
    FnName = "__getaddrg";
  }

  RValue Result = EmitUPCCall(*this, FnName, ResultTy, Args);

  return Builder.CreateBitCast(Result.getScalarVal(), ConvertType(DestTy));
}
Example #24
0
void SILGenFunction::emitReturnExpr(SILLocation branchLoc,
                                    Expr *ret) {
  SILValue result;
  if (IndirectReturnAddress) {
    // Indirect return of an address-only value.
    FullExpr scope(Cleanups, CleanupLocation(ret));
    InitializationPtr returnInit(
                       new KnownAddressInitialization(IndirectReturnAddress));
    emitExprInto(ret, returnInit.get());
  } else {
    // SILValue return.
    FullExpr scope(Cleanups, CleanupLocation(ret));
    RValue resultRValue = emitRValue(ret);
    if (!resultRValue.getType()->isVoid()) {
      result = std::move(resultRValue).forwardAsSingleValue(*this, ret);
    }
  }
  Cleanups.emitBranchAndCleanups(ReturnDest, branchLoc,
                                 result ? result : ArrayRef<SILValue>{});
}
Example #25
0
/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
///
/// The idea is that you do something like this:
///   RValue Result = EmitSomething(..., getReturnValueSlot());
///   EmitGCMove(E, Result);
/// If GC doesn't interfere, this will cause the result to be emitted
/// directly into the return value slot.  If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
  if (RequiresGCollection) {
    std::pair<uint64_t, unsigned> TypeInfo = 
      CGF.getContext().getTypeInfo(E->getType());
    unsigned long size = TypeInfo.first/8;
    const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
    llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
                                                    Src.getAggregateAddr(),
                                                    SizeVal);
  }
}
Example #26
0
/// \brief Perform the final move to DestPtr if for some reason
/// getReturnValueSlot() didn't use it directly.
///
/// The idea is that you do something like this:
///   RValue Result = EmitSomething(..., getReturnValueSlot());
///   EmitMoveFromReturnSlot(E, Result);
///
/// If nothing interferes, this will cause the result to be emitted
/// directly into the return value slot.  Otherwise, a final move
/// will be performed.
void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
  if (shouldUseDestForReturnSlot()) {
    // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
    // The possibility of undef rvalues complicates that a lot,
    // though, so we can't really assert.
    return;
  }

  // Otherwise, do a final copy, 
  assert(Dest.getAddr() != Src.getAggregateAddr());
  EmitFinalDestCopy(E, Src, /*Ignore*/ true);
}
Example #27
0
DominatingValue<RValue>::saved_type
DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
  if (rv.isScalar()) {
    llvm::Value *V = rv.getScalarVal();

    // These automatically dominate and don't need to be saved.
    if (!DominatingLLVMValue::needsSaving(V))
      return saved_type(V, ScalarLiteral);

    // Everything else needs an alloca.
    llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
    CGF.Builder.CreateStore(V, addr);
    return saved_type(addr, ScalarAddress);
  }

  if (rv.isComplex()) {
    CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
    llvm::Type *ComplexTy =
      llvm::StructType::get(V.first->getType(), V.second->getType(),
                            (void*) nullptr);
    llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
    CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
    CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
    return saved_type(addr, ComplexAddress);
  }

  assert(rv.isAggregate());
  llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
  if (!DominatingLLVMValue::needsSaving(V))
    return saved_type(V, AggregateLiteral);

  llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
  CGF.Builder.CreateStore(V, addr);
  return saved_type(addr, AggregateAddress);  
}
Example #28
0
/// Copy an r-value into memory as part of storing to an atomic type.
/// This needs to create a bit-pattern suitable for atomic operations.
void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
  // If we have an r-value, the rvalue should be of the atomic type,
  // which means that the caller is responsible for having zeroed
  // any padding.  Just do an aggregate copy of that type.
  if (rvalue.isAggregate()) {
    CGF.EmitAggregateCopy(dest.getAddress(),
                          rvalue.getAggregateAddr(),
                          getAtomicType(),
                          (rvalue.isVolatileQualified()
                           || dest.isVolatileQualified()),
                          dest.getAlignment());
    return;
  }

  // Okay, otherwise we're copying stuff.

  // Zero out the buffer if necessary.
  emitMemSetZeroIfNecessary(dest);

  // Drill past the padding if present.
  dest = projectValue(dest);

  // Okay, store the rvalue in.
  if (rvalue.isScalar()) {
    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
  } else {
    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
  }
}
Example #29
0
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
                                       unsigned Alignment) {
  assert(Src.isAggregate() && "value must be aggregate value!");

  // If Dest is ignored, then we're evaluating an aggregate expression
  // in a context (like an expression statement) that doesn't care
  // about the result.  C says that an lvalue-to-rvalue conversion is
  // performed in these cases; C++ says that it is not.  In either
  // case, we don't actually need to do anything unless the value is
  // volatile.
  if (Dest.isIgnored()) {
    if (!Src.isVolatileQualified() ||
        CGF.CGM.getLangOptions().CPlusPlus ||
        (IgnoreResult && Ignore))
      return;

    // If the source is volatile, we must read from it; to do that, we need
    // some place to put it.
    Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
  }

  if (Dest.requiresGCollection()) {
    CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
    llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
    llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
                                                      Dest.getAddr(),
                                                      Src.getAggregateAddr(),
                                                      SizeVal);
    return;
  }
  // If the result of the assignment is used, copy the LHS there also.
  // FIXME: Pass VolatileDest as well.  I think we also need to merge volatile
  // from the source as well, as we can't eliminate it if either operand
  // is volatile, unless copy has volatile for both source and destination..
  CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
                        Dest.isVolatile()|Src.isVolatileQualified(),
                        Alignment);
}
Example #30
0
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
                                               llvm::Constant *DeclPtr,
                                               bool PerformInit) {

  const Expr *Init = D.getInit();
  QualType T = D.getType();

  if (!T->isReferenceType()) {
    if (PerformInit)
      EmitDeclInit(*this, D, DeclPtr);
    if (CGM.isTypeConstant(D.getType(), true))
      EmitDeclInvariant(*this, D, DeclPtr);
    else
      EmitDeclDestroy(*this, D, DeclPtr);
    return;
  }

  assert(PerformInit && "cannot have constant initializer which needs "
         "destruction for reference");
  unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
  RValue RV = EmitReferenceBindingToExpr(Init, &D);
  EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}