unsigned FastISel::getRegForGEPIndex(Value *Idx) { unsigned IdxN = getRegForValue(Idx); if (IdxN == 0) // Unhandled operand. Halt "fast" selection and bail. return 0; // If the index is smaller or larger than intptr_t, truncate or extend it. MVT PtrVT = TLI.getPointerTy(); EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); if (IdxVT.bitsLT(PtrVT)) IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN); else if (IdxVT.bitsGT(PtrVT)) IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN); return IdxN; }
/// Get the EVTs and ArgFlags collections that represent the legalized return /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. /// TODO: Move this out of TargetLowering.cpp. void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, SmallVectorImpl<ISD::OutputArg> &Outs, const TargetLowering &TLI) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, ReturnType, ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; for (unsigned j = 0, f = NumValues; j != f; ++j) { EVT VT = ValueVTs[j]; ISD::NodeType ExtendKind = ISD::ANY_EXTEND; if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) ExtendKind = ISD::SIGN_EXTEND; else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) ExtendKind = ISD::ZERO_EXTEND; // FIXME: C calling convention requires the return type to be promoted to // at least 32-bit. But this is not necessary for non-C calling // conventions. The frontend should mark functions whose return values // require promoting with signext or zeroext attributes. if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); if (VT.bitsLT(MinVT)) VT = MinVT; } unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg)) Flags.setInReg(); // Propagate extension type if any if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) Flags.setSExt(); else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0, 0)); } }
void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi) { SDValue OldVec = N->getOperand(0); unsigned OldElts = OldVec.getValueType().getVectorNumElements(); EVT OldEltVT = OldVec.getValueType().getVectorElementType(); SDLoc dl(N); // Convert to a vector of the expanded element type, for example // <3 x i64> -> <6 x i32>. EVT OldVT = N->getValueType(0); EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT); if (OldVT != OldEltVT) { // The result of EXTRACT_VECTOR_ELT may be larger than the element type of // the input vector. If so, extend the elements of the input vector to the // same bitwidth as the result before expanding. assert(OldEltVT.bitsLT(OldVT) && "Result type smaller then element type!"); EVT NVecVT = EVT::getVectorVT(*DAG.getContext(), OldVT, OldElts); OldVec = DAG.getNode(ISD::ANY_EXTEND, dl, NVecVT, N->getOperand(0)); } SDValue NewVec = DAG.getNode(ISD::BITCAST, dl, EVT::getVectorVT(*DAG.getContext(), NewVT, 2*OldElts), OldVec); // Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector. SDValue Idx = N->getOperand(1); // Make sure the type of Idx is big enough to hold the new values. if (Idx.getValueType().bitsLT(TLI.getPointerTy())) Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, Idx); Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx); Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, DAG.getConstant(1, Idx.getValueType())); Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx); if (TLI.isBigEndian()) std::swap(Lo, Hi); }
std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) { unsigned IdxN = getRegForValue(Idx); if (IdxN == 0) // Unhandled operand. Halt "fast" selection and bail. return std::pair<unsigned, bool>(0, false); bool IdxNIsKill = hasTrivialKill(Idx); // If the index is smaller or larger than intptr_t, truncate or extend it. MVT PtrVT = TLI.getPointerTy(); EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); if (IdxVT.bitsLT(PtrVT)) { IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN, IdxNIsKill); IdxNIsKill = true; } else if (IdxVT.bitsGT(PtrVT)) { IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill); IdxNIsKill = true; } return std::pair<unsigned, bool>(IdxN, IdxNIsKill); }
/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), /// sink it into user blocks to reduce the number of virtual /// registers that must be created and coalesced. /// /// Return true if any changes are made. /// static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI) { // If this is a noop copy, EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); EVT DstVT = TLI.getValueType(CI->getType()); // This is an fp<->int conversion? if (SrcVT.isInteger() != DstVT.isInteger()) return false; // If this is an extension, it will be a zero or sign extension, which // isn't a noop. if (SrcVT.bitsLT(DstVT)) return false; // If these values will be promoted, find out what they will be promoted // to. This helps us consider truncates on PPC as noop copies when they // are. if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::TypePromoteInteger) SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::TypePromoteInteger) DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); // If, after promotion, these are the same types, this is a noop copy. if (SrcVT != DstVT) return false; BasicBlock *DefBB = CI->getParent(); /// InsertedCasts - Only insert a cast in each block once. DenseMap<BasicBlock*, CastInst*> InsertedCasts; bool MadeChange = false; for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); UI != E; ) { Use &TheUse = UI.getUse(); Instruction *User = cast<Instruction>(*UI); // Figure out which BB this cast is used in. For PHI's this is the // appropriate predecessor block. BasicBlock *UserBB = User->getParent(); if (PHINode *PN = dyn_cast<PHINode>(User)) { UserBB = PN->getIncomingBlock(UI); } // Preincrement use iterator so we don't invalidate it. ++UI; // If this user is in the same block as the cast, don't change the cast. if (UserBB == DefBB) continue; // If we have already inserted a cast into this block, use it. CastInst *&InsertedCast = InsertedCasts[UserBB]; if (!InsertedCast) { BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", InsertPt); MadeChange = true; } // Replace a use of the cast with a use of the new cast. TheUse = InsertedCast; ++NumCastUses; } // If we removed all uses, nuke the cast. if (CI->use_empty()) { CI->eraseFromParent(); MadeChange = true; } return MadeChange; }
bool FastISel::SelectOperator(const User *I, unsigned Opcode) { switch (Opcode) { case Instruction::Add: return SelectBinaryOp(I, ISD::ADD); case Instruction::FAdd: return SelectBinaryOp(I, ISD::FADD); case Instruction::Sub: return SelectBinaryOp(I, ISD::SUB); case Instruction::FSub: // FNeg is currently represented in LLVM IR as a special case of FSub. if (BinaryOperator::isFNeg(I)) return SelectFNeg(I); return SelectBinaryOp(I, ISD::FSUB); case Instruction::Mul: return SelectBinaryOp(I, ISD::MUL); case Instruction::FMul: return SelectBinaryOp(I, ISD::FMUL); case Instruction::SDiv: return SelectBinaryOp(I, ISD::SDIV); case Instruction::UDiv: return SelectBinaryOp(I, ISD::UDIV); case Instruction::FDiv: return SelectBinaryOp(I, ISD::FDIV); case Instruction::SRem: return SelectBinaryOp(I, ISD::SREM); case Instruction::URem: return SelectBinaryOp(I, ISD::UREM); case Instruction::FRem: return SelectBinaryOp(I, ISD::FREM); case Instruction::Shl: return SelectBinaryOp(I, ISD::SHL); case Instruction::LShr: return SelectBinaryOp(I, ISD::SRL); case Instruction::AShr: return SelectBinaryOp(I, ISD::SRA); case Instruction::And: return SelectBinaryOp(I, ISD::AND); case Instruction::Or: return SelectBinaryOp(I, ISD::OR); case Instruction::Xor: return SelectBinaryOp(I, ISD::XOR); case Instruction::GetElementPtr: return SelectGetElementPtr(I); case Instruction::Br: { const BranchInst *BI = cast<BranchInst>(I); if (BI->isUnconditional()) { const BasicBlock *LLVMSucc = BI->getSuccessor(0); MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; FastEmitBranch(MSucc, BI->getDebugLoc()); return true; } // Conditional branches are not handed yet. // Halt "fast" selection and bail. return false; } case Instruction::Unreachable: // Nothing to emit. return true; case Instruction::Alloca: // FunctionLowering has the static-sized case covered. if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) return true; // Dynamic-sized alloca is not handled yet. return false; case Instruction::Call: return SelectCall(I); case Instruction::BitCast: return SelectBitCast(I); case Instruction::FPToSI: return SelectCast(I, ISD::FP_TO_SINT); case Instruction::ZExt: return SelectCast(I, ISD::ZERO_EXTEND); case Instruction::SExt: return SelectCast(I, ISD::SIGN_EXTEND); case Instruction::Trunc: return SelectCast(I, ISD::TRUNCATE); case Instruction::SIToFP: return SelectCast(I, ISD::SINT_TO_FP); case Instruction::IntToPtr: // Deliberate fall-through. case Instruction::PtrToInt: { EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); EVT DstVT = TLI.getValueType(I->getType()); if (DstVT.bitsGT(SrcVT)) return SelectCast(I, ISD::ZERO_EXTEND); if (DstVT.bitsLT(SrcVT)) return SelectCast(I, ISD::TRUNCATE); unsigned Reg = getRegForValue(I->getOperand(0)); if (Reg == 0) return false; UpdateValueMap(I, Reg); return true; } case Instruction::PHI: llvm_unreachable("FastISel shouldn't visit PHI nodes!"); default: // Unhandled instruction. Halt "fast" selection and bail. return false; } }
bool FastISel::SelectCall(const User *I) { const Function *F = cast<CallInst>(I)->getCalledFunction(); if (!F) return false; // Handle selected intrinsic function calls. unsigned IID = F->getIntrinsicID(); switch (IID) { default: break; case Intrinsic::dbg_declare: { const DbgDeclareInst *DI = cast<DbgDeclareInst>(I); if (!DIVariable(DI->getVariable()).Verify() || !FuncInfo.MF->getMMI().hasDebugInfo()) return true; const Value *Address = DI->getAddress(); if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address)) return true; unsigned Reg = 0; unsigned Offset = 0; if (const Argument *Arg = dyn_cast<Argument>(Address)) { if (Arg->hasByValAttr()) { // Byval arguments' frame index is recorded during argument lowering. // Use this info directly. Offset = FuncInfo.getByValArgumentFrameIndex(Arg); if (Offset) Reg = TRI.getFrameRegister(*FuncInfo.MF); } } if (!Reg) Reg = getRegForValue(Address); if (Reg) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::DBG_VALUE)) .addReg(Reg, RegState::Debug).addImm(Offset) .addMetadata(DI->getVariable()); return true; } case Intrinsic::dbg_value: { // This form of DBG_VALUE is target-independent. const DbgValueInst *DI = cast<DbgValueInst>(I); const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const Value *V = DI->getValue(); if (!V) { // Currently the optimizer can produce this; insert an undef to // help debugging. Probably the optimizer should not do this. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(0U).addImm(DI->getOffset()) .addMetadata(DI->getVariable()); } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addImm(CI->getZExtValue()).addImm(DI->getOffset()) .addMetadata(DI->getVariable()); } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addFPImm(CF).addImm(DI->getOffset()) .addMetadata(DI->getVariable()); } else if (unsigned Reg = lookUpRegForValue(V)) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(Reg, RegState::Debug).addImm(DI->getOffset()) .addMetadata(DI->getVariable()); } else { // We can't yet handle anything else here because it would require // generating code, thus altering codegen because of debug info. DEBUG(dbgs() << "Dropping debug info for " << DI); } return true; } case Intrinsic::eh_exception: { EVT VT = TLI.getValueType(I->getType()); switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { default: break; case TargetLowering::Expand: { assert(FuncInfo.MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); unsigned Reg = TLI.getExceptionAddressRegister(); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); UpdateValueMap(I, ResultReg); return true; } } break; } case Intrinsic::eh_selector: { EVT VT = TLI.getValueType(I->getType()); switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { default: break; case TargetLowering::Expand: { if (FuncInfo.MBB->isLandingPad()) AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB); else { #ifndef NDEBUG FuncInfo.CatchInfoLost.insert(cast<CallInst>(I)); #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); if (Reg) FuncInfo.MBB->addLiveIn(Reg); } unsigned Reg = TLI.getExceptionSelectorRegister(); EVT SrcVT = TLI.getPointerTy(); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); bool ResultRegIsKill = hasTrivialKill(I); // Cast the register to the type of the selector. if (SrcVT.bitsGT(MVT::i32)) ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE, ResultReg, ResultRegIsKill); else if (SrcVT.bitsLT(MVT::i32)) ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill); if (ResultReg == 0) // Unhandled operand. Halt "fast" selection and bail. return false; UpdateValueMap(I, ResultReg); return true; } } break; } } // An arbitrary call. Bail. return false; }
bool FastISel::SelectCall(User *I) { Function *F = cast<CallInst>(I)->getCalledFunction(); if (!F) return false; unsigned IID = F->getIntrinsicID(); switch (IID) { default: break; case Intrinsic::dbg_stoppoint: case Intrinsic::dbg_region_start: case Intrinsic::dbg_region_end: case Intrinsic::dbg_func_start: // FIXME - Remove this instructions once the dust settles. return true; case Intrinsic::dbg_declare: { DbgDeclareInst *DI = cast<DbgDeclareInst>(I); if (!isValidDebugInfoIntrinsic(*DI, CodeGenOpt::None) || !DW || !DW->ShouldEmitDwarfDebug()) return true; Value *Address = DI->getAddress(); if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) Address = BCI->getOperand(0); AllocaInst *AI = dyn_cast<AllocaInst>(Address); // Don't handle byval struct arguments or VLAs, for example. if (!AI) break; DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(AI); if (SI == StaticAllocaMap.end()) break; // VLAs. int FI = SI->second; if (MMI) { MetadataContext &TheMetadata = DI->getParent()->getContext().getMetadata(); unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); MDNode *Dbg = TheMetadata.getMD(MDDbgKind, DI); MMI->setVariableDbgInfo(DI->getVariable(), FI, Dbg); } return true; } case Intrinsic::eh_exception: { EVT VT = TLI.getValueType(I->getType()); switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { default: break; case TargetLowering::Expand: { assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); unsigned Reg = TLI.getExceptionAddressRegister(); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, RC, RC); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; UpdateValueMap(I, ResultReg); return true; } } break; } case Intrinsic::eh_selector: { EVT VT = TLI.getValueType(I->getType()); switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { default: break; case TargetLowering::Expand: { if (MMI) { if (MBB->isLandingPad()) AddCatchInfo(*cast<CallInst>(I), MMI, MBB); else { #ifndef NDEBUG CatchInfoLost.insert(cast<CallInst>(I)); #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); if (Reg) MBB->addLiveIn(Reg); } unsigned Reg = TLI.getExceptionSelectorRegister(); EVT SrcVT = TLI.getPointerTy(); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); unsigned ResultReg = createResultReg(RC); bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, RC, RC); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; // Cast the register to the type of the selector. if (SrcVT.bitsGT(MVT::i32)) ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE, ResultReg); else if (SrcVT.bitsLT(MVT::i32)) ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::SIGN_EXTEND, ResultReg); if (ResultReg == 0) // Unhandled operand. Halt "fast" selection and bail. return false; UpdateValueMap(I, ResultReg); } else { unsigned ResultReg = getRegForValue(Constant::getNullValue(I->getType())); UpdateValueMap(I, ResultReg); } return true; } } break; } } return false; }
bool FastISel::SelectCall(const User *I) { const Function *F = cast<CallInst>(I)->getCalledFunction(); if (!F) return false; // Handle selected intrinsic function calls. unsigned IID = F->getIntrinsicID(); switch (IID) { default: break; case Intrinsic::dbg_declare: { const DbgDeclareInst *DI = cast<DbgDeclareInst>(I); if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None) || !MF.getMMI().hasDebugInfo()) return true; const Value *Address = DI->getAddress(); if (!Address) return true; if (isa<UndefValue>(Address)) return true; const AllocaInst *AI = dyn_cast<AllocaInst>(Address); // Don't handle byval struct arguments or VLAs, for example. if (!AI) break; DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(AI); if (SI == StaticAllocaMap.end()) break; // VLAs. int FI = SI->second; if (!DI->getDebugLoc().isUnknown()) MF.getMMI().setVariableDbgInfo(DI->getVariable(), FI, DI->getDebugLoc()); // Building the map above is target independent. Generating DBG_VALUE // inline is target dependent; do this now. (void)TargetSelectInstruction(cast<Instruction>(I)); return true; } case Intrinsic::dbg_value: { // This form of DBG_VALUE is target-independent. const DbgValueInst *DI = cast<DbgValueInst>(I); const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const Value *V = DI->getValue(); if (!V) { // Currently the optimizer can produce this; insert an undef to // help debugging. Probably the optimizer should not do this. BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). addMetadata(DI->getVariable()); } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()). addMetadata(DI->getVariable()); } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()). addMetadata(DI->getVariable()); } else if (unsigned Reg = lookUpRegForValue(V)) { BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()). addMetadata(DI->getVariable()); } else { // We can't yet handle anything else here because it would require // generating code, thus altering codegen because of debug info. // Insert an undef so we can see what we dropped. BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). addMetadata(DI->getVariable()); } return true; } case Intrinsic::eh_exception: { EVT VT = TLI.getValueType(I->getType()); switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { default: break; case TargetLowering::Expand: { assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); unsigned Reg = TLI.getExceptionAddressRegister(); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, RC, RC); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; UpdateValueMap(I, ResultReg); return true; } } break; } case Intrinsic::eh_selector: { EVT VT = TLI.getValueType(I->getType()); switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { default: break; case TargetLowering::Expand: { if (MBB->isLandingPad()) AddCatchInfo(*cast<CallInst>(I), &MF.getMMI(), MBB); else { #ifndef NDEBUG CatchInfoLost.insert(cast<CallInst>(I)); #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); if (Reg) MBB->addLiveIn(Reg); } unsigned Reg = TLI.getExceptionSelectorRegister(); EVT SrcVT = TLI.getPointerTy(); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); unsigned ResultReg = createResultReg(RC); bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, RC, RC); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; // Cast the register to the type of the selector. if (SrcVT.bitsGT(MVT::i32)) ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE, ResultReg); else if (SrcVT.bitsLT(MVT::i32)) ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::SIGN_EXTEND, ResultReg); if (ResultReg == 0) // Unhandled operand. Halt "fast" selection and bail. return false; UpdateValueMap(I, ResultReg); return true; } } break; } } // An arbitrary call. Bail. return false; }