RegisterBankInfo::InstructionMapping AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI); if (Mapping.isValid()) return Mapping; // As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this // won't work for normal floating-point types (or NZCV). When such // instructions exist we'll need to look at the MI's opcode. LLT Ty = MI.getType(); unsigned BankID; if (Ty.isVector()) BankID = AArch64::FPRRegBankID; else BankID = AArch64::GPRRegBankID; Mapping = InstructionMapping{1, 1, MI.getNumOperands()}; int Size = Ty.isSized() ? Ty.getSizeInBits() : 0; for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) Mapping.setOperandMapping(Idx, Size, getRegBank(BankID)); return Mapping; }
RegisterBankInfo::InstructionMapping AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { const unsigned Opc = MI.getOpcode(); const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); // Try the default logic for non-generic instructions that are either copies // or already have some operands assigned to banks. if (!isPreISelGenericOpcode(Opc)) { RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI); if (Mapping.isValid()) return Mapping; } RegisterBankInfo::InstructionMapping Mapping = InstructionMapping{DefaultMappingID, 1, MI.getNumOperands()}; // Track the size and bank of each register. We don't do partial mappings. SmallVector<unsigned, 4> OpBaseIdx(MI.getNumOperands()); SmallVector<unsigned, 4> OpFinalIdx(MI.getNumOperands()); for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) { auto &MO = MI.getOperand(Idx); if (!MO.isReg()) continue; LLT Ty = MRI.getType(MO.getReg()); unsigned RBIdx = AArch64::getRegBankBaseIdx(Ty.getSizeInBits()); OpBaseIdx[Idx] = RBIdx; // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs. // For floating-point instructions, scalars go in FPRs. if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc)) { assert(RBIdx < (AArch64::LastFPR - AArch64::FirstFPR) + 1 && "Index out of bound"); OpFinalIdx[Idx] = AArch64::FirstFPR + RBIdx; } else { assert(RBIdx < (AArch64::LastGPR - AArch64::FirstGPR) + 1 && "Index out of bound"); OpFinalIdx[Idx] = AArch64::FirstGPR + RBIdx; } } // Some of the floating-point instructions have mixed GPR and FPR operands: // fine-tune the computed mapping. switch (Opc) { case TargetOpcode::G_SITOFP: case TargetOpcode::G_UITOFP: { OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstFPR, OpBaseIdx[1] + AArch64::FirstGPR}; break; } case TargetOpcode::G_FPTOSI: case TargetOpcode::G_FPTOUI: { OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstGPR, OpBaseIdx[1] + AArch64::FirstFPR}; break; } case TargetOpcode::G_FCMP: { OpFinalIdx = {OpBaseIdx[0] + AArch64::FirstGPR, /* Predicate */ 0, OpBaseIdx[2] + AArch64::FirstFPR, OpBaseIdx[3] + AArch64::FirstFPR}; break; } } // Finally construct the computed mapping. for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) if (MI.getOperand(Idx).isReg()) Mapping.setOperandMapping( Idx, ValueMapping{&AArch64::PartMappings[OpFinalIdx[Idx]], 1}); return Mapping; }