/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of /// EVTs that represent all the individual underlying /// non-aggregate types that comprise it. /// /// If Offsets is non-null, it points to a vector to be filled in /// with the in-memory offsets of each of the individual values. /// void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, SmallVectorImpl<EVT> &ValueVTs, SmallVectorImpl<uint64_t> *Offsets, uint64_t StartingOffset) { // Given a struct type, recursively traverse the elements. if (StructType *STy = dyn_cast<StructType>(Ty)) { const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy); for (StructType::element_iterator EB = STy->element_begin(), EI = EB, EE = STy->element_end(); EI != EE; ++EI) ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, StartingOffset + SL->getElementOffset(EI - EB)); return; } // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { Type *EltTy = ATy->getElementType(); uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, StartingOffset + i * EltSize); return; } // Interpret void as zero return values. if (Ty->isVoidTy()) return; // Base case: we can get an EVT for this LLVM IR type. ValueVTs.push_back(TLI.getValueType(Ty)); if (Offsets) Offsets->push_back(StartingOffset); }
/// Get the EVTs and ArgFlags collections that represent the legalized return /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. /// TODO: Move this out of TargetLowering.cpp. void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, SmallVectorImpl<ISD::OutputArg> &Outs, const TargetLowering &TLI) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, ReturnType, ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; for (unsigned j = 0, f = NumValues; j != f; ++j) { EVT VT = ValueVTs[j]; ISD::NodeType ExtendKind = ISD::ANY_EXTEND; if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) ExtendKind = ISD::SIGN_EXTEND; else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) ExtendKind = ISD::ZERO_EXTEND; // FIXME: C calling convention requires the return type to be promoted to // at least 32-bit. But this is not necessary for non-C calling // conventions. The frontend should mark functions whose return values // require promoting with signext or zeroext attributes. if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); if (VT.bitsLT(MinVT)) VT = MinVT; } unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg)) Flags.setInReg(); // Propagate extension type if any if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) Flags.setSExt(); else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0, 0)); } }
/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified /// inline asm call are due to memory operands. If so, return true, otherwise /// return false. static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, const TargetLowering &TLI) { std::vector<TargetLowering::AsmOperandInfo> TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI)); for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; // Compute the constraint code and ConstraintType to use. TLI.ComputeConstraintToUse(OpInfo, SDValue()); // If this asm operand is our Value*, and if it isn't an indirect memory // operand, we can't fold it! if (OpInfo.CallOperandVal == OpVal && (OpInfo.ConstraintType != TargetLowering::C_Memory || !OpInfo.isIndirect)) return false; } return true; }
bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, const TargetLowering &TLI) { const Function *F = DAG.getMachineFunction().getFunction(); // Conservatively require the attributes of the call to match those of // the return. Ignore noalias because it doesn't affect the call sequence. unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); if (CallerRetAttr & ~Attribute::NoAlias) return false; // It's not safe to eliminate the sign / zero extension of the return value. if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) return false; // Check if the only use is a function return node. return TLI.isUsedByReturnOnly(Node); }
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being /// processed uses a memory 'm' constraint. bool llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, const TargetLowering &TLI) { for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { InlineAsm::ConstraintInfo &CI = CInfos[i]; for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); if (CType == TargetLowering::C_Memory) return true; } // Indirect operand accesses access memory. if (CI.isIndirect) return true; } return false; }
/// Test if the given instruction is in a position to be optimized /// with a tail-call. This roughly means that it's in a block with /// a return and there's nothing that needs to be scheduled /// between it and the return. /// /// This function only tests target-independent requirements. bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetLowering &TLI) { const Instruction *I = CS.getInstruction(); const BasicBlock *ExitBB = I->getParent(); const TerminatorInst *Term = ExitBB->getTerminator(); const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); // The block must end in a return statement or unreachable. // // FIXME: Decline tailcall if it's not guaranteed and if the block ends in // an unreachable, for now. The way tailcall optimization is currently // implemented means it will add an epilogue followed by a jump. That is // not profitable. Also, if the callee is a special function (e.g. // longjmp on x86), it can end up causing miscompilation that has not // been fully understood. if (!Ret && (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false; // If I will have a chain, make sure no other instruction that will have a // chain interposes between I and the return. if (I->mayHaveSideEffects() || I->mayReadFromMemory() || !isSafeToSpeculativelyExecute(I)) for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; --BBI) { if (&*BBI == I) break; // Debug info intrinsics do not get in the way of tail call optimization. if (isa<DbgInfoIntrinsic>(BBI)) continue; if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || !isSafeToSpeculativelyExecute(BBI)) return false; } return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret, TLI); }
/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified /// inline asm call are due to memory operands. If so, return true, otherwise /// return false. static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, const TargetLowering &TLI) { std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); unsigned ArgNo = 1; // ArgNo - The operand of the CallInst. for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { TargetLowering::AsmOperandInfo OpInfo(Constraints[i]); // Compute the value type for each operand. switch (OpInfo.Type) { case InlineAsm::isOutput: if (OpInfo.isIndirect) OpInfo.CallOperandVal = CI->getOperand(ArgNo++); break; case InlineAsm::isInput: OpInfo.CallOperandVal = CI->getOperand(ArgNo++); break; case InlineAsm::isClobber: // Nothing to do. break; } // Compute the constraint code and ConstraintType to use. TLI.ComputeConstraintToUse(OpInfo, SDValue(), OpInfo.ConstraintType == TargetLowering::C_Memory); // If this asm operand is our Value*, and if it isn't an indirect memory // operand, we can't fold it! if (OpInfo.CallOperandVal == OpVal && (OpInfo.ConstraintType != TargetLowering::C_Memory || !OpInfo.isIndirect)) return false; } return true; }
/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), /// sink it into user blocks to reduce the number of virtual /// registers that must be created and coalesced. /// /// Return true if any changes are made. /// static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI) { // If this is a noop copy, EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); EVT DstVT = TLI.getValueType(CI->getType()); // This is an fp<->int conversion? if (SrcVT.isInteger() != DstVT.isInteger()) return false; // If this is an extension, it will be a zero or sign extension, which // isn't a noop. if (SrcVT.bitsLT(DstVT)) return false; // If these values will be promoted, find out what they will be promoted // to. This helps us consider truncates on PPC as noop copies when they // are. if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::TypePromoteInteger) SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::TypePromoteInteger) DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); // If, after promotion, these are the same types, this is a noop copy. if (SrcVT != DstVT) return false; BasicBlock *DefBB = CI->getParent(); /// InsertedCasts - Only insert a cast in each block once. DenseMap<BasicBlock*, CastInst*> InsertedCasts; bool MadeChange = false; for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); UI != E; ) { Use &TheUse = UI.getUse(); Instruction *User = cast<Instruction>(*UI); // Figure out which BB this cast is used in. For PHI's this is the // appropriate predecessor block. BasicBlock *UserBB = User->getParent(); if (PHINode *PN = dyn_cast<PHINode>(User)) { UserBB = PN->getIncomingBlock(UI); } // Preincrement use iterator so we don't invalidate it. ++UI; // If this user is in the same block as the cast, don't change the cast. if (UserBB == DefBB) continue; // If we have already inserted a cast into this block, use it. CastInst *&InsertedCast = InsertedCasts[UserBB]; if (!InsertedCast) { BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", InsertPt); MadeChange = true; } // Replace a use of the cast with a use of the new cast. TheUse = InsertedCast; ++NumCastUses; } // If we removed all uses, nuke the cast. if (CI->use_empty()) { CI->eraseFromParent(); MadeChange = true; } return MadeChange; }
/// Test if the given instruction is in a position to be optimized /// with a tail-call. This roughly means that it's in a block with /// a return and there's nothing that needs to be scheduled /// between it and the return. /// /// This function only tests target-independent requirements. bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attribute CalleeRetAttr, const TargetLowering &TLI) { const Instruction *I = CS.getInstruction(); const BasicBlock *ExitBB = I->getParent(); const TerminatorInst *Term = ExitBB->getTerminator(); const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); // The block must end in a return statement or unreachable. // // FIXME: Decline tailcall if it's not guaranteed and if the block ends in // an unreachable, for now. The way tailcall optimization is currently // implemented means it will add an epilogue followed by a jump. That is // not profitable. Also, if the callee is a special function (e.g. // longjmp on x86), it can end up causing miscompilation that has not // been fully understood. if (!Ret && (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false; // If I will have a chain, make sure no other instruction that will have a // chain interposes between I and the return. if (I->mayHaveSideEffects() || I->mayReadFromMemory() || !isSafeToSpeculativelyExecute(I)) for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; --BBI) { if (&*BBI == I) break; // Debug info intrinsics do not get in the way of tail call optimization. if (isa<DbgInfoIntrinsic>(BBI)) continue; if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || !isSafeToSpeculativelyExecute(BBI)) return false; } // If the block ends with a void return or unreachable, it doesn't matter // what the call's return type is. if (!Ret || Ret->getNumOperands() == 0) return true; // If the return value is undef, it doesn't matter what the call's // return type is. if (isa<UndefValue>(Ret->getOperand(0))) return true; // Conservatively require the attributes of the call to match those of // the return. Ignore noalias because it doesn't affect the call sequence. const Function *F = ExitBB->getParent(); Attribute CallerRetAttr = F->getAttributes().getRetAttributes(); if (AttrBuilder(CalleeRetAttr).removeAttribute(Attribute::NoAlias) != AttrBuilder(CallerRetAttr).removeAttribute(Attribute::NoAlias)) return false; // It's not safe to eliminate the sign / zero extension of the return value. if (CallerRetAttr.hasAttribute(Attribute::ZExt) || CallerRetAttr.hasAttribute(Attribute::SExt)) return false; // Otherwise, make sure the unmodified return value of I is the return value. // We handle two cases: multiple return values + scalars. Value *RetVal = Ret->getOperand(0); if (!isa<InsertValueInst>(RetVal) || !isa<StructType>(RetVal->getType())) // Handle scalars first. return getNoopInput(Ret->getOperand(0), TLI) == I; // If this is an aggregate return, look through the insert/extract values and // see if each is transparent. for (unsigned i = 0, e =cast<StructType>(RetVal->getType())->getNumElements(); i != e; ++i) { const Value *InScalar = FindInsertedValue(RetVal, i); if (InScalar == 0) return false; InScalar = getNoopInput(InScalar, TLI); // If the scalar value being inserted is an extractvalue of the right index // from the call, then everything is good. const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(InScalar); if (EVI == 0 || EVI->getOperand(0) != I || EVI->getNumIndices() != 1 || EVI->getIndices()[0] != i) return false; } return true; }
/// Test if the given instruction is in a position to be optimized /// with a tail-call. This roughly means that it's in a block with /// a return and there's nothing that needs to be scheduled /// between it and the return. /// /// This function only tests target-independent requirements. bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, const TargetLowering &TLI) { const Instruction *I = CS.getInstruction(); const BasicBlock *ExitBB = I->getParent(); const TerminatorInst *Term = ExitBB->getTerminator(); const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); // The block must end in a return statement or unreachable. // // FIXME: Decline tailcall if it's not guaranteed and if the block ends in // an unreachable, for now. The way tailcall optimization is currently // implemented means it will add an epilogue followed by a jump. That is // not profitable. Also, if the callee is a special function (e.g. // longjmp on x86), it can end up causing miscompilation that has not // been fully understood. if (!Ret && (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false; // If I will have a chain, make sure no other instruction that will have a // chain interposes between I and the return. if (I->mayHaveSideEffects() || I->mayReadFromMemory() || !I->isSafeToSpeculativelyExecute()) for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; --BBI) { if (&*BBI == I) break; // Debug info intrinsics do not get in the way of tail call optimization. if (isa<DbgInfoIntrinsic>(BBI)) continue; if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || !BBI->isSafeToSpeculativelyExecute()) return false; } // If the block ends with a void return or unreachable, it doesn't matter // what the call's return type is. if (!Ret || Ret->getNumOperands() == 0) return true; // If the return value is undef, it doesn't matter what the call's // return type is. if (isa<UndefValue>(Ret->getOperand(0))) return true; // Conservatively require the attributes of the call to match those of // the return. Ignore noalias because it doesn't affect the call sequence. const Function *F = ExitBB->getParent(); unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) return false; // It's not safe to eliminate the sign / zero extension of the return value. if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) return false; // Otherwise, make sure the unmodified return value of I is the return value. for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ; U = dyn_cast<Instruction>(U->getOperand(0))) { if (!U) return false; if (!U->hasOneUse()) return false; if (U == I) break; // Check for a truly no-op truncate. if (isa<TruncInst>(U) && TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType())) continue; // Check for a truly no-op bitcast. if (isa<BitCastInst>(U) && (U->getOperand(0)->getType() == U->getType() || (U->getOperand(0)->getType()->isPointerTy() && U->getType()->isPointerTy()))) continue; // Otherwise it's not a true no-op. return false; } return true; }
/// EmitJumpTableInfo - Print assembly representations of the jump tables used /// by the current function to the current output stream. /// void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI, MachineFunction &MF) { const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); if (JT.empty()) return; bool IsPic = TM.getRelocationModel() == Reloc::PIC_; // Use JumpTableDirective otherwise honor the entry size from the jump table // info. const char *JTEntryDirective = TAI->getJumpTableDirective(); bool HadJTEntryDirective = JTEntryDirective != NULL; if (!HadJTEntryDirective) { JTEntryDirective = MJTI->getEntrySize() == 4 ? TAI->getData32bitsDirective() : TAI->getData64bitsDirective(); } // Pick the directive to use to print the jump table entries, and switch to // the appropriate section. TargetLowering *LoweringInfo = TM.getTargetLowering(); const char* JumpTableDataSection = TAI->getJumpTableDataSection(); if ((IsPic && !(LoweringInfo && LoweringInfo->usesGlobalOffsetTable())) || !JumpTableDataSection) { // In PIC mode, we need to emit the jump table to the same section as the // function body itself, otherwise the label differences won't make sense. // We should also do if the section name is NULL. const Function *F = MF.getFunction(); SwitchToTextSection(getSectionForFunction(*F).c_str(), F); } else { SwitchToDataSection(JumpTableDataSection); } EmitAlignment(Log2_32(MJTI->getAlignment())); for (unsigned i = 0, e = JT.size(); i != e; ++i) { const std::vector<MachineBasicBlock*> &JTBBs = JT[i].MBBs; // If this jump table was deleted, ignore it. if (JTBBs.empty()) continue; // For PIC codegen, if possible we want to use the SetDirective to reduce // the number of relocations the assembler will generate for the jump table. // Set directives are all printed before the jump table itself. std::set<MachineBasicBlock*> EmittedSets; if (TAI->getSetDirective() && IsPic) for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) if (EmittedSets.insert(JTBBs[ii]).second) printSetLabel(i, JTBBs[ii]); // On some targets (e.g. darwin) we want to emit two consequtive labels // before each jump table. The first label is never referenced, but tells // the assembler and linker the extents of the jump table object. The // second label is actually referenced by the code. if (const char *JTLabelPrefix = TAI->getJumpTableSpecialLabelPrefix()) O << JTLabelPrefix << "JTI" << getFunctionNumber() << '_' << i << ":\n"; O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << i << ":\n"; for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) { O << JTEntryDirective << ' '; // If we have emitted set directives for the jump table entries, print // them rather than the entries themselves. If we're emitting PIC, then // emit the table entries as differences between two text section labels. // If we're emitting non-PIC code, then emit the entries as direct // references to the target basic blocks. if (!EmittedSets.empty()) { O << TAI->getPrivateGlobalPrefix() << getFunctionNumber() << '_' << i << "_set_" << JTBBs[ii]->getNumber(); } else if (IsPic) { printBasicBlockLabel(JTBBs[ii], false, false); // If the arch uses custom Jump Table directives, don't calc relative to // JT if (!HadJTEntryDirective) O << '-' << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << i; } else { printBasicBlockLabel(JTBBs[ii], false, false); } O << '\n'; } } }
/// Test if the given instruction is in a position to be optimized /// with a tail-call. This roughly means that it's in a block with /// a return and there's nothing that needs to be scheduled /// between it and the return. /// /// This function only tests target-independent requirements. bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetLowering &TLI) { const Instruction *I = CS.getInstruction(); const BasicBlock *ExitBB = I->getParent(); const TerminatorInst *Term = ExitBB->getTerminator(); const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); // The block must end in a return statement or unreachable. // // FIXME: Decline tailcall if it's not guaranteed and if the block ends in // an unreachable, for now. The way tailcall optimization is currently // implemented means it will add an epilogue followed by a jump. That is // not profitable. Also, if the callee is a special function (e.g. // longjmp on x86), it can end up causing miscompilation that has not // been fully understood. if (!Ret && (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false; // If I will have a chain, make sure no other instruction that will have a // chain interposes between I and the return. if (I->mayHaveSideEffects() || I->mayReadFromMemory() || !isSafeToSpeculativelyExecute(I)) for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; --BBI) { if (&*BBI == I) break; // Debug info intrinsics do not get in the way of tail call optimization. if (isa<DbgInfoIntrinsic>(BBI)) continue; if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || !isSafeToSpeculativelyExecute(BBI)) return false; } // If the block ends with a void return or unreachable, it doesn't matter // what the call's return type is. if (!Ret || Ret->getNumOperands() == 0) return true; // If the return value is undef, it doesn't matter what the call's // return type is. if (isa<UndefValue>(Ret->getOperand(0))) return true; // Make sure the attributes attached to each return are compatible. AttrBuilder CallerAttrs(ExitBB->getParent()->getAttributes(), AttributeSet::ReturnIndex); AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(), AttributeSet::ReturnIndex); // Noalias is completely benign as far as calling convention goes, it // shouldn't affect whether the call is a tail call. CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias); CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias); bool AllowDifferingSizes = true; if (CallerAttrs.contains(Attribute::ZExt)) { if (!CalleeAttrs.contains(Attribute::ZExt)) return false; AllowDifferingSizes = false; CallerAttrs.removeAttribute(Attribute::ZExt); CalleeAttrs.removeAttribute(Attribute::ZExt); } else if (CallerAttrs.contains(Attribute::SExt)) { if (!CalleeAttrs.contains(Attribute::SExt)) return false; AllowDifferingSizes = false; CallerAttrs.removeAttribute(Attribute::SExt); CalleeAttrs.removeAttribute(Attribute::SExt); } // If they're still different, there's some facet we don't understand // (currently only "inreg", but in future who knows). It may be OK but the // only safe option is to reject the tail call. if (CallerAttrs != CalleeAttrs) return false; const Value *RetVal = Ret->getOperand(0), *CallVal = I; SmallVector<unsigned, 4> RetPath, CallPath; SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes; bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath); bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath); // Nothing's actually returned, it doesn't matter what the callee put there // it's a valid tail call. if (RetEmpty) return true; // Iterate pairwise through each of the value types making up the tail call // and the corresponding return. For each one we want to know whether it's // essentially going directly from the tail call to the ret, via operations // that end up not generating any code. // // We allow a certain amount of covariance here. For example it's permitted // for the tail call to define more bits than the ret actually cares about // (e.g. via a truncate). do { if (CallEmpty) { // We've exhausted the values produced by the tail call instruction, the // rest are essentially undef. The type doesn't really matter, but we need // *something*. Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back()); CallVal = UndefValue::get(SlotType); } // The manipulations performed when we're looking through an insertvalue or // an extractvalue would happen at the front of the RetPath list, so since // we have to copy it anyway it's more efficient to create a reversed copy. using std::copy; SmallVector<unsigned, 4> TmpRetPath, TmpCallPath; copy(RetPath.rbegin(), RetPath.rend(), std::back_inserter(TmpRetPath)); copy(CallPath.rbegin(), CallPath.rend(), std::back_inserter(TmpCallPath)); // Finally, we can check whether the value produced by the tail call at this // index is compatible with the value we return. if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath, AllowDifferingSizes, TLI)) return false; CallEmpty = !nextRealType(CallSubTypes, CallPath); } while(nextRealType(RetSubTypes, RetPath)); return true; }