/// AdjustStackOffset - Helper function used to adjust the stack frame offset. void LocalStackSlotPass::AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, int64_t &Offset, bool StackGrowsDown, unsigned &MaxAlign) { // If the stack grows down, add the object size to find the lowest address. if (StackGrowsDown) Offset += MFI.getObjectSize(FrameIdx); unsigned Align = MFI.getObjectAlignment(FrameIdx); // If the alignment of this object is greater than that of the stack, then // increase the stack alignment to match. MaxAlign = std::max(MaxAlign, Align); // Adjust to alignment boundary. Offset = (Offset + Align - 1) / Align * Align; int64_t LocalOffset = StackGrowsDown ? -Offset : Offset; DEBUG(dbgs() << "Allocate FI(" << FrameIdx << ") to local offset " << LocalOffset << "\n"); // Keep the offset available for base register allocation LocalOffsets[FrameIdx] = LocalOffset; // And tell MFI about it for PEI to use later MFI.mapLocalFrameObject(FrameIdx, LocalOffset); if (!StackGrowsDown) Offset += MFI.getObjectSize(FrameIdx); ++NumAllocations; }
/// AdjustStackOffset - Helper function used to adjust the stack frame offset. static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, bool StackGrowsDown, int64_t &Offset, unsigned &MaxAlign, unsigned Skew) { // If the stack grows down, add the object size to find the lowest address. if (StackGrowsDown) Offset += MFI.getObjectSize(FrameIdx); unsigned Align = MFI.getObjectAlignment(FrameIdx); // If the alignment of this object is greater than that of the stack, then // increase the stack alignment to match. MaxAlign = std::max(MaxAlign, Align); // Adjust to alignment boundary. Offset = alignTo(Offset, Align, Skew); if (StackGrowsDown) { LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n"); MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset } else { LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n"); MFI.setObjectOffset(FrameIdx, Offset); Offset += MFI.getObjectSize(FrameIdx); } }
/// Assign frame object to an unused portion of the stack in the fixed stack /// object range. Return true if the allocation was successful. static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, bool StackGrowsDown, unsigned MaxAlign, BitVector &StackBytesFree) { if (MFI.isVariableSizedObjectIndex(FrameIdx)) return false; if (StackBytesFree.none()) { // clear it to speed up later scavengeStackSlot calls to // StackBytesFree.none() StackBytesFree.clear(); return false; } unsigned ObjAlign = MFI.getObjectAlignment(FrameIdx); if (ObjAlign > MaxAlign) return false; int64_t ObjSize = MFI.getObjectSize(FrameIdx); int FreeStart; for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; FreeStart = StackBytesFree.find_next(FreeStart)) { // Check that free space has suitable alignment. unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; if (alignTo(ObjStart, ObjAlign) != ObjStart) continue; if (FreeStart + ObjSize > StackBytesFree.size()) return false; bool AllBytesFree = true; for (unsigned Byte = 0; Byte < ObjSize; ++Byte) if (!StackBytesFree.test(FreeStart + Byte)) { AllBytesFree = false; break; } if (AllBytesFree) break; } if (FreeStart == -1) return false; if (StackGrowsDown) { int ObjStart = -(FreeStart + ObjSize); LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << ObjStart << "]\n"); MFI.setObjectOffset(FrameIdx, ObjStart); } else { LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << FreeStart << "]\n"); MFI.setObjectOffset(FrameIdx, FreeStart); } StackBytesFree.reset(FreeStart, FreeStart + ObjSize); return true; }
void MIRPrinter::convertStackObjects(yaml::MachineFunction &MF, const MachineFrameInfo &MFI, MachineModuleInfo &MMI, ModuleSlotTracker &MST, const TargetRegisterInfo *TRI) { // Process fixed stack objects. unsigned ID = 0; for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) { if (MFI.isDeadObjectIndex(I)) continue; yaml::FixedMachineStackObject YamlObject; YamlObject.ID = ID; YamlObject.Type = MFI.isSpillSlotObjectIndex(I) ? yaml::FixedMachineStackObject::SpillSlot : yaml::FixedMachineStackObject::DefaultType; YamlObject.Offset = MFI.getObjectOffset(I); YamlObject.Size = MFI.getObjectSize(I); YamlObject.Alignment = MFI.getObjectAlignment(I); YamlObject.IsImmutable = MFI.isImmutableObjectIndex(I); YamlObject.IsAliased = MFI.isAliasedObjectIndex(I); MF.FixedStackObjects.push_back(YamlObject); StackObjectOperandMapping.insert( std::make_pair(I, FrameIndexOperand::createFixed(ID++))); } // Process ordinary stack objects. ID = 0; for (int I = 0, E = MFI.getObjectIndexEnd(); I < E; ++I) { if (MFI.isDeadObjectIndex(I)) continue; yaml::MachineStackObject YamlObject; YamlObject.ID = ID; if (const auto *Alloca = MFI.getObjectAllocation(I)) YamlObject.Name.Value = Alloca->hasName() ? Alloca->getName() : "<unnamed alloca>"; YamlObject.Type = MFI.isSpillSlotObjectIndex(I) ? yaml::MachineStackObject::SpillSlot : MFI.isVariableSizedObjectIndex(I) ? yaml::MachineStackObject::VariableSized : yaml::MachineStackObject::DefaultType; YamlObject.Offset = MFI.getObjectOffset(I); YamlObject.Size = MFI.getObjectSize(I); YamlObject.Alignment = MFI.getObjectAlignment(I); MF.StackObjects.push_back(YamlObject); StackObjectOperandMapping.insert(std::make_pair( I, FrameIndexOperand::create(YamlObject.Name.Value, ID++))); } for (const auto &CSInfo : MFI.getCalleeSavedInfo()) { yaml::StringValue Reg; printReg(CSInfo.getReg(), Reg, TRI); auto StackObjectInfo = StackObjectOperandMapping.find(CSInfo.getFrameIdx()); assert(StackObjectInfo != StackObjectOperandMapping.end() && "Invalid stack object index"); const FrameIndexOperand &StackObject = StackObjectInfo->second; if (StackObject.IsFixed) MF.FixedStackObjects[StackObject.ID].CalleeSavedRegister = Reg; else MF.StackObjects[StackObject.ID].CalleeSavedRegister = Reg; } for (unsigned I = 0, E = MFI.getLocalFrameObjectCount(); I < E; ++I) { auto LocalObject = MFI.getLocalFrameObjectMap(I); auto StackObjectInfo = StackObjectOperandMapping.find(LocalObject.first); assert(StackObjectInfo != StackObjectOperandMapping.end() && "Invalid stack object index"); const FrameIndexOperand &StackObject = StackObjectInfo->second; assert(!StackObject.IsFixed && "Expected a locally mapped stack object"); MF.StackObjects[StackObject.ID].LocalOffset = LocalObject.second; } // Print the stack object references in the frame information class after // converting the stack objects. if (MFI.hasStackProtectorIndex()) { raw_string_ostream StrOS(MF.FrameInfo.StackProtector.Value); MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) .printStackObjectReference(MFI.getStackProtectorIndex()); } // Print the debug variable information. for (MachineModuleInfo::VariableDbgInfo &DebugVar : MMI.getVariableDbgInfo()) { auto StackObjectInfo = StackObjectOperandMapping.find(DebugVar.Slot); assert(StackObjectInfo != StackObjectOperandMapping.end() && "Invalid stack object index"); const FrameIndexOperand &StackObject = StackObjectInfo->second; assert(!StackObject.IsFixed && "Expected a non-fixed stack object"); auto &Object = MF.StackObjects[StackObject.ID]; { raw_string_ostream StrOS(Object.DebugVar.Value); DebugVar.Var->printAsOperand(StrOS, MST); } { raw_string_ostream StrOS(Object.DebugExpr.Value); DebugVar.Expr->printAsOperand(StrOS, MST); } { raw_string_ostream StrOS(Object.DebugLoc.Value); DebugVar.Loc->printAsOperand(StrOS, MST); } } }
void Nios2RegisterInfo::adjustNios2StackFrame(MachineFunction &MF) const { MachineFrameInfo *MFI = MF.getFrameInfo(); Nios2FunctionInfo *Nios2FI = MF.getInfo<Nios2FunctionInfo>(); const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment(); unsigned RegSize = 4; bool HasGP = Nios2FI->needGPSaveRestore(); // Min and Max CSI FrameIndex. int MinCSFI = -1, MaxCSFI = -1; // See the description at Nios2MachineFunction.h int TopCPUSavedRegOff = -1; // It happens that the default stack frame allocation order does not directly // map to the convention used for nios2. So we must fix it. We move the callee // save register slots after the local variables area, as described in the // stack frame above. unsigned CalleeSavedAreaSize = 0; if (!CSI.empty()) { MinCSFI = CSI[0].getFrameIdx(); MaxCSFI = CSI[CSI.size()-1].getFrameIdx(); } for (unsigned i = 0, e = CSI.size(); i != e; ++i) CalleeSavedAreaSize += MFI->getObjectAlignment(CSI[i].getFrameIdx()); unsigned StackOffset = HasGP ? (Nios2FI->getGPStackOffset()+RegSize) : 16; // Adjust local variables. They should come on the stack right // after the arguments. int LastOffsetFI = -1; for (int i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (i >= MinCSFI && i <= MaxCSFI) continue; if (MFI->isDeadObjectIndex(i)) continue; unsigned Offset = StackOffset + MFI->getObjectOffset(i) - CalleeSavedAreaSize; if (LastOffsetFI == -1) LastOffsetFI = i; if (Offset > MFI->getObjectOffset(LastOffsetFI)) LastOffsetFI = i; MFI->setObjectOffset(i, Offset); } // Adjust CPU Callee Saved Registers Area. Registers RA and FP must // be saved in this CPU Area. This whole Area must be aligned to the // default Stack Alignment requirements. if (LastOffsetFI >= 0) StackOffset = MFI->getObjectOffset(LastOffsetFI)+ MFI->getObjectSize(LastOffsetFI); StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign); int stackSize = MF.getFrameInfo()->getStackSize(); if (MFI->hasCalls()) { stackSize += RegSize; } if (hasFP(MF)) { stackSize += RegSize; } for (unsigned i = 0, e = CSI.size(); i != e ; ++i) { if (CSI[i].getRegClass() != Nios2::CPURegsRegisterClass) break; stackSize += RegSize; } if (MFI->hasCalls()) { MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true), hasFP(MF) ? 4 : 0); Nios2FI->setRAStackOffset(hasFP(MF) ? 4 : 0); TopCPUSavedRegOff = hasFP(MF) ? 4 : 0; StackOffset += RegSize; } if (hasFP(MF)) { MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true), 0); Nios2FI->setFPStackOffset(0); TopCPUSavedRegOff = 0; StackOffset += RegSize; } for (unsigned i = 0, e = CSI.size(); i != e ; ++i) { if (CSI[i].getRegClass() != Nios2::CPURegsRegisterClass) break; MFI->setObjectOffset(CSI[i].getFrameIdx(), stackSize + (-(StackOffset + 4))); TopCPUSavedRegOff = stackSize + (-(StackOffset + 4)); StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx()); } // Update frame info MFI->setStackSize(stackSize); }
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the /// abstract stack objects. /// void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering(); bool StackGrowsDown = TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; // Loop over all of the stack objects, assigning sequential addresses... MachineFrameInfo *MFI = Fn.getFrameInfo(); // Start at the beginning of the local area. // The Offset is the distance from the stack top in the direction // of stack growth -- so it's always nonnegative. int LocalAreaOffset = TFI.getOffsetOfLocalArea(); if (StackGrowsDown) LocalAreaOffset = -LocalAreaOffset; assert(LocalAreaOffset >= 0 && "Local area offset should be in direction of stack growth"); int64_t Offset = LocalAreaOffset; // If there are fixed sized objects that are preallocated in the local area, // non-fixed objects can't be allocated right at the start of local area. // We currently don't support filling in holes in between fixed sized // objects, so we adjust 'Offset' to point to the end of last fixed sized // preallocated object. for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) { int64_t FixedOff; if (StackGrowsDown) { // The maximum distance from the stack pointer is at lower address of // the object -- which is given by offset. For down growing stack // the offset is negative, so we negate the offset to get the distance. FixedOff = -MFI->getObjectOffset(i); } else { // The maximum distance from the start pointer is at the upper // address of the object. FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i); } if (FixedOff > Offset) Offset = FixedOff; } // First assign frame offsets to stack objects that are used to spill // callee saved registers. if (StackGrowsDown) { for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { // If the stack grows down, we need to add the size to find the lowest // address of the object. Offset += MFI->getObjectSize(i); unsigned Align = MFI->getObjectAlignment(i); // Adjust to alignment boundary Offset = (Offset+Align-1)/Align*Align; MFI->setObjectOffset(i, -Offset); // Set the computed offset } } else { int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex; for (int i = MaxCSFI; i >= MinCSFI ; --i) { unsigned Align = MFI->getObjectAlignment(i); // Adjust to alignment boundary Offset = (Offset+Align-1)/Align*Align; MFI->setObjectOffset(i, Offset); Offset += MFI->getObjectSize(i); } } unsigned MaxAlign = MFI->getMaxAlignment(); // Make sure the special register scavenging spill slot is closest to the // frame pointer if a frame pointer is required. const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo(); if (RS && TFI.hasFP(Fn) && RegInfo->useFPForScavengingIndex(Fn) && !RegInfo->needsStackRealignment(Fn)) { int SFI = RS->getScavengingFrameIndex(); if (SFI >= 0) AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); } // FIXME: Once this is working, then enable flag will change to a target // check for whether the frame is large enough to want to use virtual // frame index registers. Functions which don't want/need this optimization // will continue to use the existing code path. if (MFI->getUseLocalStackAllocationBlock()) { unsigned Align = MFI->getLocalFrameMaxAlign(); // Adjust to alignment boundary. Offset = (Offset + Align - 1) / Align * Align; DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); // Resolve offsets for objects in the local block. for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) { std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i); int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset << "]\n"); MFI->setObjectOffset(Entry.first, FIOffset); } // Allocate the local block Offset += MFI->getLocalFrameSize(); MaxAlign = std::max(Align, MaxAlign); } // Make sure that the stack protector comes before the local variables on the // stack. SmallSet<int, 16> LargeStackObjs; if (MFI->getStackProtectorIndex() >= 0) { AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown, Offset, MaxAlign); // Assign large stack objects first. for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (MFI->isObjectPreAllocated(i) && MFI->getUseLocalStackAllocationBlock()) continue; if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; if (RS && (int)i == RS->getScavengingFrameIndex()) continue; if (MFI->isDeadObjectIndex(i)) continue; if (MFI->getStackProtectorIndex() == (int)i) continue; if (!MFI->MayNeedStackProtector(i)) continue; AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign); LargeStackObjs.insert(i); } } // Then assign frame offsets to stack objects that are not used to spill // callee saved registers. for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (MFI->isObjectPreAllocated(i) && MFI->getUseLocalStackAllocationBlock()) continue; if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; if (RS && (int)i == RS->getScavengingFrameIndex()) continue; if (MFI->isDeadObjectIndex(i)) continue; if (MFI->getStackProtectorIndex() == (int)i) continue; if (LargeStackObjs.count(i)) continue; AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign); } // Make sure the special register scavenging spill slot is closest to the // stack pointer. if (RS && (!TFI.hasFP(Fn) || RegInfo->needsStackRealignment(Fn) || !RegInfo->useFPForScavengingIndex(Fn))) { int SFI = RS->getScavengingFrameIndex(); if (SFI >= 0) AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); } if (!TFI.targetHandlesStackFrameRounding()) { // If we have reserved argument space for call sites in the function // immediately on entry to the current function, count it as part of the // overall stack size. if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn)) Offset += MFI->getMaxCallFrameSize(); // Round up the size to a multiple of the alignment. If the function has // any calls or alloca's, align to the target's StackAlignment value to // ensure that the callee's frame or the alloca data is suitably aligned; // otherwise, for leaf functions, align to the TransientStackAlignment // value. unsigned StackAlign; if (MFI->adjustsStack() || MFI->hasVarSizedObjects() || (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0)) StackAlign = TFI.getStackAlignment(); else StackAlign = TFI.getTransientStackAlignment(); // If the frame pointer is eliminated, all frame offsets will be relative to // SP not FP. Align to MaxAlign so this works. StackAlign = std::max(StackAlign, MaxAlign); unsigned AlignMask = StackAlign - 1; Offset = (Offset + AlignMask) & ~uint64_t(AlignMask); } // Update frame info to pretend that this is part of the stack... int64_t StackSize = Offset - LocalAreaOffset; MFI->setStackSize(StackSize); NumBytesStackSpace += StackSize; }
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the /// abstract stack objects. /// void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo(); bool StackGrowsDown = TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown; // Loop over all of the stack objects, assigning sequential addresses... MachineFrameInfo *MFI = Fn.getFrameInfo(); // Start at the beginning of the local area. // The Offset is the distance from the stack top in the direction // of stack growth -- so it's always nonnegative. int LocalAreaOffset = TFI.getOffsetOfLocalArea(); if (StackGrowsDown) LocalAreaOffset = -LocalAreaOffset; assert(LocalAreaOffset >= 0 && "Local area offset should be in direction of stack growth"); int64_t Offset = LocalAreaOffset; // If there are fixed sized objects that are preallocated in the local area, // non-fixed objects can't be allocated right at the start of local area. // We currently don't support filling in holes in between fixed sized // objects, so we adjust 'Offset' to point to the end of last fixed sized // preallocated object. for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) { int64_t FixedOff; if (StackGrowsDown) { // The maximum distance from the stack pointer is at lower address of // the object -- which is given by offset. For down growing stack // the offset is negative, so we negate the offset to get the distance. FixedOff = -MFI->getObjectOffset(i); } else { // The maximum distance from the start pointer is at the upper // address of the object. FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i); } if (FixedOff > Offset) Offset = FixedOff; } // First assign frame offsets to stack objects that are used to spill // callee saved registers. if (StackGrowsDown) { for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { // If the stack grows down, we need to add the size to find the lowest // address of the object. Offset += MFI->getObjectSize(i); unsigned Align = MFI->getObjectAlignment(i); // Adjust to alignment boundary Offset = (Offset+Align-1)/Align*Align; MFI->setObjectOffset(i, -Offset); // Set the computed offset } } else { int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex; for (int i = MaxCSFI; i >= MinCSFI ; --i) { unsigned Align = MFI->getObjectAlignment(i); // Adjust to alignment boundary Offset = (Offset+Align-1)/Align*Align; MFI->setObjectOffset(i, Offset); Offset += MFI->getObjectSize(i); } } unsigned MaxAlign = MFI->getMaxAlignment(); // Make sure the special register scavenging spill slot is closest to the // frame pointer if a frame pointer is required. const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo(); if (RS && RegInfo->hasFP(Fn) && !RegInfo->needsStackRealignment(Fn)) { int SFI = RS->getScavengingFrameIndex(); if (SFI >= 0) AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); } // Make sure that the stack protector comes before the local variables on the // stack. if (MFI->getStackProtectorIndex() >= 0) AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown, Offset, MaxAlign); // Then assign frame offsets to stack objects that are not used to spill // callee saved registers. for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; if (RS && (int)i == RS->getScavengingFrameIndex()) continue; if (MFI->isDeadObjectIndex(i)) continue; if (MFI->getStackProtectorIndex() == (int)i) continue; AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign); } // Make sure the special register scavenging spill slot is closest to the // stack pointer. if (RS && (!RegInfo->hasFP(Fn) || RegInfo->needsStackRealignment(Fn))) { int SFI = RS->getScavengingFrameIndex(); if (SFI >= 0) AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); } if (!RegInfo->targetHandlesStackFrameRounding()) { // If we have reserved argument space for call sites in the function // immediately on entry to the current function, count it as part of the // overall stack size. if (MFI->adjustsStack() && RegInfo->hasReservedCallFrame(Fn)) Offset += MFI->getMaxCallFrameSize(); // Round up the size to a multiple of the alignment. If the function has // any calls or alloca's, align to the target's StackAlignment value to // ensure that the callee's frame or the alloca data is suitably aligned; // otherwise, for leaf functions, align to the TransientStackAlignment // value. unsigned StackAlign; if (MFI->adjustsStack() || MFI->hasVarSizedObjects() || (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0)) StackAlign = TFI.getStackAlignment(); else StackAlign = TFI.getTransientStackAlignment(); // If the frame pointer is eliminated, all frame offsets will be relative to // SP not FP. Align to MaxAlign so this works. StackAlign = std::max(StackAlign, MaxAlign); unsigned AlignMask = StackAlign - 1; Offset = (Offset + AlignMask) & ~uint64_t(AlignMask); } // Update frame info to pretend that this is part of the stack... MFI->setStackSize(Offset - LocalAreaOffset); }
void MipsFrameInfo::adjustMipsStackFrame(MachineFunction &MF) const { MachineFrameInfo *MFI = MF.getFrameInfo(); MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment(); unsigned RegSize = STI.isGP32bit() ? 4 : 8; bool HasGP = MipsFI->needGPSaveRestore(); // Min and Max CSI FrameIndex. int MinCSFI = -1, MaxCSFI = -1; // See the description at MipsMachineFunction.h int TopCPUSavedRegOff = -1, TopFPUSavedRegOff = -1; // Replace the dummy '0' SPOffset by the negative offsets, as explained on // LowerFormalArguments. Leaving '0' for while is necessary to avoid the // approach done by calculateFrameObjectOffsets to the stack frame. MipsFI->adjustLoadArgsFI(MFI); MipsFI->adjustStoreVarArgsFI(MFI); // It happens that the default stack frame allocation order does not directly // map to the convention used for mips. So we must fix it. We move the callee // save register slots after the local variables area, as described in the // stack frame above. unsigned CalleeSavedAreaSize = 0; if (!CSI.empty()) { MinCSFI = CSI[0].getFrameIdx(); MaxCSFI = CSI[CSI.size()-1].getFrameIdx(); } for (unsigned i = 0, e = CSI.size(); i != e; ++i) CalleeSavedAreaSize += MFI->getObjectAlignment(CSI[i].getFrameIdx()); unsigned StackOffset = HasGP ? (MipsFI->getGPStackOffset()+RegSize) : (STI.isABI_O32() ? 16 : 0); // Adjust local variables. They should come on the stack right // after the arguments. int LastOffsetFI = -1; for (int i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (i >= MinCSFI && i <= MaxCSFI) continue; if (MFI->isDeadObjectIndex(i)) continue; unsigned Offset = StackOffset + MFI->getObjectOffset(i) - CalleeSavedAreaSize; if (LastOffsetFI == -1) LastOffsetFI = i; if (Offset > MFI->getObjectOffset(LastOffsetFI)) LastOffsetFI = i; MFI->setObjectOffset(i, Offset); } // Adjust CPU Callee Saved Registers Area. Registers RA and FP must // be saved in this CPU Area. This whole area must be aligned to the // default Stack Alignment requirements. if (LastOffsetFI >= 0) StackOffset = MFI->getObjectOffset(LastOffsetFI)+ MFI->getObjectSize(LastOffsetFI); StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign); for (unsigned i = 0, e = CSI.size(); i != e ; ++i) { unsigned Reg = CSI[i].getReg(); if (!Mips::CPURegsRegisterClass->contains(Reg)) break; MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset); TopCPUSavedRegOff = StackOffset; StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx()); } // Stack locations for FP and RA. If only one of them is used, // the space must be allocated for both, otherwise no space at all. if (hasFP(MF) || MFI->adjustsStack()) { // FP stack location MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true), StackOffset); MipsFI->setFPStackOffset(StackOffset); TopCPUSavedRegOff = StackOffset; StackOffset += RegSize; // SP stack location MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true), StackOffset); MipsFI->setRAStackOffset(StackOffset); StackOffset += RegSize; if (MFI->adjustsStack()) TopCPUSavedRegOff += RegSize; } StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign); // Adjust FPU Callee Saved Registers Area. This Area must be // aligned to the default Stack Alignment requirements. for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); if (Mips::CPURegsRegisterClass->contains(Reg)) continue; MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset); TopFPUSavedRegOff = StackOffset; StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx()); } StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign); // Update frame info MFI->setStackSize(StackOffset); // Recalculate the final tops offset. The final values must be '0' // if there isn't a callee saved register for CPU or FPU, otherwise // a negative offset is needed. if (TopCPUSavedRegOff >= 0) MipsFI->setCPUTopSavedRegOff(TopCPUSavedRegOff-StackOffset); if (TopFPUSavedRegOff >= 0) MipsFI->setFPUTopSavedRegOff(TopFPUSavedRegOff-StackOffset); }
static void computeCalleeSaveRegisterPairs( MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs) { if (CSI.empty()) return; AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); MachineFrameInfo *MFI = MF.getFrameInfo(); CallingConv::ID CC = MF.getFunction()->getCallingConv(); unsigned Count = CSI.size(); (void)CC; // MachO's compact unwind format relies on all registers being stored in // pairs. assert((!MF.getSubtarget<AArch64Subtarget>().isTargetMachO() || CC == CallingConv::PreserveMost || (Count & 1) == 0) && "Odd number of callee-saved regs to spill!"); unsigned Offset = AFI->getCalleeSavedStackSize(); for (unsigned i = 0; i < Count; ++i) { RegPairInfo RPI; RPI.Reg1 = CSI[i].getReg(); assert(AArch64::GPR64RegClass.contains(RPI.Reg1) || AArch64::FPR64RegClass.contains(RPI.Reg1)); RPI.IsGPR = AArch64::GPR64RegClass.contains(RPI.Reg1); // Add the next reg to the pair if it is in the same register class. if (i + 1 < Count) { unsigned NextReg = CSI[i + 1].getReg(); if ((RPI.IsGPR && AArch64::GPR64RegClass.contains(NextReg)) || (!RPI.IsGPR && AArch64::FPR64RegClass.contains(NextReg))) RPI.Reg2 = NextReg; } // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI // list to come in sorted by frame index so that we can issue the store // pair instructions directly. Assert if we see anything otherwise. // // The order of the registers in the list is controlled by // getCalleeSavedRegs(), so they will always be in-order, as well. assert((!RPI.isPaired() || (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) && "Out of order callee saved regs!"); // MachO's compact unwind format relies on all registers being stored in // adjacent register pairs. assert((!MF.getSubtarget<AArch64Subtarget>().isTargetMachO() || CC == CallingConv::PreserveMost || (RPI.isPaired() && ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || RPI.Reg1 + 1 == RPI.Reg2))) && "Callee-save registers not saved as adjacent register pair!"); RPI.FrameIdx = CSI[i].getFrameIdx(); if (Count * 8 != AFI->getCalleeSavedStackSize() && !RPI.isPaired()) { // Round up size of non-pair to pair size if we need to pad the // callee-save area to ensure 16-byte alignment. Offset -= 16; assert(MFI->getObjectAlignment(RPI.FrameIdx) <= 16); MFI->setObjectSize(RPI.FrameIdx, 16); } else Offset -= RPI.isPaired() ? 16 : 8; assert(Offset % 8 == 0); RPI.Offset = Offset / 8; assert((RPI.Offset >= -64 && RPI.Offset <= 63) && "Offset out of bounds for LDP/STP immediate"); RegPairs.push_back(RPI); if (RPI.isPaired()) ++i; } // Align first offset to even 16-byte boundary to avoid additional SP // adjustment instructions. // Last pair offset is size of whole callee-save region for SP // pre-dec/post-inc. RegPairInfo &LastPair = RegPairs.back(); assert(AFI->getCalleeSavedStackSize() % 8 == 0); LastPair.Offset = AFI->getCalleeSavedStackSize() / 8; }
void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const { MachineFunction *MF = MI->getParent()->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); MachineBasicBlock *MBB = MI->getParent(); SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); MachineFrameInfo *FrameInfo = MF->getFrameInfo(); const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo()); DebugLoc DL = MI->getDebugLoc(); MachineOperand &FIOp = MI->getOperand(FIOperandNum); int Index = MI->getOperand(FIOperandNum).getIndex(); switch (MI->getOpcode()) { // SGPR register spill case AMDGPU::SI_SPILL_S512_SAVE: case AMDGPU::SI_SPILL_S256_SAVE: case AMDGPU::SI_SPILL_S128_SAVE: case AMDGPU::SI_SPILL_S64_SAVE: case AMDGPU::SI_SPILL_S32_SAVE: { unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode()); unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(), &AMDGPU::SGPR_32RegClass, i); struct SIMachineFunctionInfo::SpilledReg Spill = MFI->getSpilledReg(MF, Index, i); if (Spill.hasReg()) { BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32), Spill.VGPR) .addReg(SubReg) .addImm(Spill.Lane); // FIXME: Since this spills to another register instead of an actual // frame index, we should delete the frame index when all references to // it are fixed. } else { // Spill SGPR to a frame index. // FIXME we should use S_STORE_DWORD here for VI. BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) .addReg(SubReg); unsigned Size = FrameInfo->getObjectSize(Index); unsigned Align = FrameInfo->getObjectAlignment(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index); MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, Size, Align); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE)) .addReg(TmpReg) // src .addFrameIndex(Index) // frame_idx .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset .addImm(i * 4) // offset .addMemOperand(MMO); } } MI->eraseFromParent(); break; } // SGPR register restore case AMDGPU::SI_SPILL_S512_RESTORE: case AMDGPU::SI_SPILL_S256_RESTORE: case AMDGPU::SI_SPILL_S128_RESTORE: case AMDGPU::SI_SPILL_S64_RESTORE: case AMDGPU::SI_SPILL_S32_RESTORE: { unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode()); unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(), &AMDGPU::SGPR_32RegClass, i); struct SIMachineFunctionInfo::SpilledReg Spill = MFI->getSpilledReg(MF, Index, i); if (Spill.hasReg()) { BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32), SubReg) .addReg(Spill.VGPR) .addImm(Spill.Lane) .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine); } else { // Restore SGPR from a stack slot. // FIXME: We should use S_LOAD_DWORD here for VI. unsigned Align = FrameInfo->getObjectAlignment(Index); unsigned Size = FrameInfo->getObjectSize(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index); MachineMemOperand *MMO = MF->getMachineMemOperand( PtrInfo, MachineMemOperand::MOLoad, Size, Align); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg) .addFrameIndex(Index) // frame_idx .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset .addImm(i * 4) // offset .addMemOperand(MMO); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg) .addReg(TmpReg, RegState::Kill) .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine); } } // TODO: only do this when it is needed switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) { case AMDGPUSubtarget::SOUTHERN_ISLANDS: // "VALU writes SGPR" -> "SMRD reads that SGPR" needs 4 wait states // ("S_NOP 3") on SI TII->insertWaitStates(MI, 4); break; case AMDGPUSubtarget::SEA_ISLANDS: break; default: // VOLCANIC_ISLANDS and later // "VALU writes SGPR -> VMEM reads that SGPR" needs 5 wait states // ("S_NOP 4") on VI and later. This also applies to VALUs which write // VCC, but we're unlikely to see VMEM use VCC. TII->insertWaitStates(MI, 5); } MI->eraseFromParent(); break; } // VGPR register spill case AMDGPU::SI_SPILL_V512_SAVE: case AMDGPU::SI_SPILL_V256_SAVE: case AMDGPU::SI_SPILL_V128_SAVE: case AMDGPU::SI_SPILL_V96_SAVE: case AMDGPU::SI_SPILL_V64_SAVE: case AMDGPU::SI_SPILL_V32_SAVE: buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET, TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(), FrameInfo->getObjectOffset(Index) + TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS); MI->eraseFromParent(); break; case AMDGPU::SI_SPILL_V32_RESTORE: case AMDGPU::SI_SPILL_V64_RESTORE: case AMDGPU::SI_SPILL_V96_RESTORE: case AMDGPU::SI_SPILL_V128_RESTORE: case AMDGPU::SI_SPILL_V256_RESTORE: case AMDGPU::SI_SPILL_V512_RESTORE: { buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET, TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(), FrameInfo->getObjectOffset(Index) + TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS); MI->eraseFromParent(); break; } default: { int64_t Offset = FrameInfo->getObjectOffset(Index); FIOp.ChangeToImmediate(Offset); if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) { unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) .addImm(Offset); FIOp.ChangeToRegister(TmpReg, false, false, true); } } } }
void MIRPrinter::convertStackObjects(yaml::MachineFunction &MF, const MachineFrameInfo &MFI, const TargetRegisterInfo *TRI) { // Process fixed stack objects. unsigned ID = 0; for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) { if (MFI.isDeadObjectIndex(I)) continue; yaml::FixedMachineStackObject YamlObject; YamlObject.ID = ID; YamlObject.Type = MFI.isSpillSlotObjectIndex(I) ? yaml::FixedMachineStackObject::SpillSlot : yaml::FixedMachineStackObject::DefaultType; YamlObject.Offset = MFI.getObjectOffset(I); YamlObject.Size = MFI.getObjectSize(I); YamlObject.Alignment = MFI.getObjectAlignment(I); YamlObject.IsImmutable = MFI.isImmutableObjectIndex(I); YamlObject.IsAliased = MFI.isAliasedObjectIndex(I); MF.FixedStackObjects.push_back(YamlObject); StackObjectOperandMapping.insert( std::make_pair(I, FrameIndexOperand::createFixed(ID++))); } // Process ordinary stack objects. ID = 0; for (int I = 0, E = MFI.getObjectIndexEnd(); I < E; ++I) { if (MFI.isDeadObjectIndex(I)) continue; yaml::MachineStackObject YamlObject; YamlObject.ID = ID; if (const auto *Alloca = MFI.getObjectAllocation(I)) YamlObject.Name.Value = Alloca->hasName() ? Alloca->getName() : "<unnamed alloca>"; YamlObject.Type = MFI.isSpillSlotObjectIndex(I) ? yaml::MachineStackObject::SpillSlot : MFI.isVariableSizedObjectIndex(I) ? yaml::MachineStackObject::VariableSized : yaml::MachineStackObject::DefaultType; YamlObject.Offset = MFI.getObjectOffset(I); YamlObject.Size = MFI.getObjectSize(I); YamlObject.Alignment = MFI.getObjectAlignment(I); MF.StackObjects.push_back(YamlObject); StackObjectOperandMapping.insert(std::make_pair( I, FrameIndexOperand::create(YamlObject.Name.Value, ID++))); } for (const auto &CSInfo : MFI.getCalleeSavedInfo()) { yaml::StringValue Reg; printReg(CSInfo.getReg(), Reg, TRI); auto StackObjectInfo = StackObjectOperandMapping.find(CSInfo.getFrameIdx()); assert(StackObjectInfo != StackObjectOperandMapping.end() && "Invalid stack object index"); const FrameIndexOperand &StackObject = StackObjectInfo->second; if (StackObject.IsFixed) MF.FixedStackObjects[StackObject.ID].CalleeSavedRegister = Reg; else MF.StackObjects[StackObject.ID].CalleeSavedRegister = Reg; } }
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the /// abstract stack objects. /// void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); StackProtector *SP = &getAnalysis<StackProtector>(); bool StackGrowsDown = TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; // Loop over all of the stack objects, assigning sequential addresses... MachineFrameInfo *MFI = Fn.getFrameInfo(); // Start at the beginning of the local area. // The Offset is the distance from the stack top in the direction // of stack growth -- so it's always nonnegative. int LocalAreaOffset = TFI.getOffsetOfLocalArea(); if (StackGrowsDown) LocalAreaOffset = -LocalAreaOffset; assert(LocalAreaOffset >= 0 && "Local area offset should be in direction of stack growth"); int64_t Offset = LocalAreaOffset; // Skew to be applied to alignment. unsigned Skew = TFI.getStackAlignmentSkew(Fn); // If there are fixed sized objects that are preallocated in the local area, // non-fixed objects can't be allocated right at the start of local area. // We currently don't support filling in holes in between fixed sized // objects, so we adjust 'Offset' to point to the end of last fixed sized // preallocated object. for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) { int64_t FixedOff; if (StackGrowsDown) { // The maximum distance from the stack pointer is at lower address of // the object -- which is given by offset. For down growing stack // the offset is negative, so we negate the offset to get the distance. FixedOff = -MFI->getObjectOffset(i); } else { // The maximum distance from the start pointer is at the upper // address of the object. FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i); } if (FixedOff > Offset) Offset = FixedOff; } // First assign frame offsets to stack objects that are used to spill // callee saved registers. if (StackGrowsDown) { for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { // If the stack grows down, we need to add the size to find the lowest // address of the object. Offset += MFI->getObjectSize(i); unsigned Align = MFI->getObjectAlignment(i); // Adjust to alignment boundary Offset = alignTo(Offset, Align, Skew); DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n"); MFI->setObjectOffset(i, -Offset); // Set the computed offset } } else { int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex; for (int i = MaxCSFI; i >= MinCSFI ; --i) { unsigned Align = MFI->getObjectAlignment(i); // Adjust to alignment boundary Offset = alignTo(Offset, Align, Skew); DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n"); MFI->setObjectOffset(i, Offset); Offset += MFI->getObjectSize(i); } } unsigned MaxAlign = MFI->getMaxAlignment(); // Make sure the special register scavenging spill slot is closest to the // incoming stack pointer if a frame pointer is required and is closer // to the incoming rather than the final stack pointer. const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo(); bool EarlyScavengingSlots = (TFI.hasFP(Fn) && TFI.isFPCloseToIncomingSP() && RegInfo->useFPForScavengingIndex(Fn) && !RegInfo->needsStackRealignment(Fn)); if (RS && EarlyScavengingSlots) { SmallVector<int, 2> SFIs; RS->getScavengingFrameIndices(SFIs); for (SmallVectorImpl<int>::iterator I = SFIs.begin(), IE = SFIs.end(); I != IE; ++I) AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); } // FIXME: Once this is working, then enable flag will change to a target // check for whether the frame is large enough to want to use virtual // frame index registers. Functions which don't want/need this optimization // will continue to use the existing code path. if (MFI->getUseLocalStackAllocationBlock()) { unsigned Align = MFI->getLocalFrameMaxAlign(); // Adjust to alignment boundary. Offset = alignTo(Offset, Align, Skew); DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); // Resolve offsets for objects in the local block. for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) { std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i); int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset << "]\n"); MFI->setObjectOffset(Entry.first, FIOffset); } // Allocate the local block Offset += MFI->getLocalFrameSize(); MaxAlign = std::max(Align, MaxAlign); } // Make sure that the stack protector comes before the local variables on the // stack. SmallSet<int, 16> ProtectedObjs; if (MFI->getStackProtectorIndex() >= 0) { StackObjSet LargeArrayObjs; StackObjSet SmallArrayObjs; StackObjSet AddrOfObjs; AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown, Offset, MaxAlign, Skew); // Assign large stack objects first. for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (MFI->isObjectPreAllocated(i) && MFI->getUseLocalStackAllocationBlock()) continue; if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; if (RS && RS->isScavengingFrameIndex((int)i)) continue; if (MFI->isDeadObjectIndex(i)) continue; if (MFI->getStackProtectorIndex() == (int)i) continue; switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) { case StackProtector::SSPLK_None: continue; case StackProtector::SSPLK_SmallArray: SmallArrayObjs.insert(i); continue; case StackProtector::SSPLK_AddrOf: AddrOfObjs.insert(i); continue; case StackProtector::SSPLK_LargeArray: LargeArrayObjs.insert(i); continue; } llvm_unreachable("Unexpected SSPLayoutKind."); } AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, Offset, MaxAlign, Skew); AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, Offset, MaxAlign, Skew); AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, Offset, MaxAlign, Skew); } SmallVector<int, 8> ObjectsToAllocate; int EHRegNodeFrameIndex = INT_MAX; if (const WinEHFuncInfo *FuncInfo = Fn.getWinEHFuncInfo()) EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; // Then prepare to assign frame offsets to stack objects that are not used to // spill callee saved registers. for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) { if (MFI->isObjectPreAllocated(i) && MFI->getUseLocalStackAllocationBlock()) continue; if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) continue; if (RS && RS->isScavengingFrameIndex((int)i)) continue; if (MFI->isDeadObjectIndex(i)) continue; if (MFI->getStackProtectorIndex() == (int)i) continue; if (EHRegNodeFrameIndex == (int)i) continue; if (ProtectedObjs.count(i)) continue; // Add the objects that we need to allocate to our working set. ObjectsToAllocate.push_back(i); } // Allocate the EH registration node first if one is present. if (EHRegNodeFrameIndex != INT_MAX) AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, MaxAlign, Skew); // Give the targets a chance to order the objects the way they like it. if (Fn.getTarget().getOptLevel() != CodeGenOpt::None && Fn.getTarget().Options.StackSymbolOrdering) TFI.orderFrameObjects(Fn, ObjectsToAllocate); // Now walk the objects and actually assign base offsets to them. for (auto &Object : ObjectsToAllocate) AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); // Make sure the special register scavenging spill slot is closest to the // stack pointer. if (RS && !EarlyScavengingSlots) { SmallVector<int, 2> SFIs; RS->getScavengingFrameIndices(SFIs); for (SmallVectorImpl<int>::iterator I = SFIs.begin(), IE = SFIs.end(); I != IE; ++I) AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); } if (!TFI.targetHandlesStackFrameRounding()) { // If we have reserved argument space for call sites in the function // immediately on entry to the current function, count it as part of the // overall stack size. if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn)) Offset += MFI->getMaxCallFrameSize(); // Round up the size to a multiple of the alignment. If the function has // any calls or alloca's, align to the target's StackAlignment value to // ensure that the callee's frame or the alloca data is suitably aligned; // otherwise, for leaf functions, align to the TransientStackAlignment // value. unsigned StackAlign; if (MFI->adjustsStack() || MFI->hasVarSizedObjects() || (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0)) StackAlign = TFI.getStackAlignment(); else StackAlign = TFI.getTransientStackAlignment(); // If the frame pointer is eliminated, all frame offsets will be relative to // SP not FP. Align to MaxAlign so this works. StackAlign = std::max(StackAlign, MaxAlign); Offset = alignTo(Offset, StackAlign, Skew); } // Update frame info to pretend that this is part of the stack... int64_t StackSize = Offset - LocalAreaOffset; MFI->setStackSize(StackSize); NumBytesStackSpace += StackSize; }
void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const { MachineFunction *MF = MI->getParent()->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); MachineBasicBlock *MBB = MI->getParent(); SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); MachineFrameInfo *FrameInfo = MF->getFrameInfo(); const SISubtarget &ST = MF->getSubtarget<SISubtarget>(); const SIInstrInfo *TII = ST.getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); MachineOperand &FIOp = MI->getOperand(FIOperandNum); int Index = MI->getOperand(FIOperandNum).getIndex(); switch (MI->getOpcode()) { // SGPR register spill case AMDGPU::SI_SPILL_S512_SAVE: case AMDGPU::SI_SPILL_S256_SAVE: case AMDGPU::SI_SPILL_S128_SAVE: case AMDGPU::SI_SPILL_S64_SAVE: case AMDGPU::SI_SPILL_S32_SAVE: { unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode()); unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned SuperReg = MI->getOperand(0).getReg(); bool IsKill = MI->getOperand(0).isKill(); // SubReg carries the "Kill" flag when SubReg == SuperReg. unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill); for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { unsigned SubReg = getPhysRegSubReg(SuperReg, &AMDGPU::SGPR_32RegClass, i); struct SIMachineFunctionInfo::SpilledReg Spill = MFI->getSpilledReg(MF, Index, i); if (Spill.hasReg()) { BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32), Spill.VGPR) .addReg(SubReg, getKillRegState(IsKill)) .addImm(Spill.Lane); // FIXME: Since this spills to another register instead of an actual // frame index, we should delete the frame index when all references to // it are fixed. } else { // Spill SGPR to a frame index. // FIXME we should use S_STORE_DWORD here for VI. MachineInstrBuilder Mov = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) .addReg(SubReg, SubKillState); // There could be undef components of a spilled super register. // TODO: Can we detect this and skip the spill? if (NumSubRegs > 1) { // The last implicit use of the SuperReg carries the "Kill" flag. unsigned SuperKillState = 0; if (i + 1 == e) SuperKillState |= getKillRegState(IsKill); Mov.addReg(SuperReg, RegState::Implicit | SuperKillState); } unsigned Size = FrameInfo->getObjectSize(Index); unsigned Align = FrameInfo->getObjectAlignment(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index); MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, Size, Align); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE)) .addReg(TmpReg, RegState::Kill) // src .addFrameIndex(Index) // frame_idx .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset .addImm(i * 4) // offset .addMemOperand(MMO); } } MI->eraseFromParent(); break; } // SGPR register restore case AMDGPU::SI_SPILL_S512_RESTORE: case AMDGPU::SI_SPILL_S256_RESTORE: case AMDGPU::SI_SPILL_S128_RESTORE: case AMDGPU::SI_SPILL_S64_RESTORE: case AMDGPU::SI_SPILL_S32_RESTORE: { unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode()); unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(), &AMDGPU::SGPR_32RegClass, i); struct SIMachineFunctionInfo::SpilledReg Spill = MFI->getSpilledReg(MF, Index, i); if (Spill.hasReg()) { BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32), SubReg) .addReg(Spill.VGPR) .addImm(Spill.Lane) .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine); } else { // Restore SGPR from a stack slot. // FIXME: We should use S_LOAD_DWORD here for VI. unsigned Align = FrameInfo->getObjectAlignment(Index); unsigned Size = FrameInfo->getObjectSize(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index); MachineMemOperand *MMO = MF->getMachineMemOperand( PtrInfo, MachineMemOperand::MOLoad, Size, Align); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg) .addFrameIndex(Index) // frame_idx .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset .addImm(i * 4) // offset .addMemOperand(MMO); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg) .addReg(TmpReg, RegState::Kill) .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine); } } MI->eraseFromParent(); break; } // VGPR register spill case AMDGPU::SI_SPILL_V512_SAVE: case AMDGPU::SI_SPILL_V256_SAVE: case AMDGPU::SI_SPILL_V128_SAVE: case AMDGPU::SI_SPILL_V96_SAVE: case AMDGPU::SI_SPILL_V64_SAVE: case AMDGPU::SI_SPILL_V32_SAVE: buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET, TII->getNamedOperand(*MI, AMDGPU::OpName::src), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(), FrameInfo->getObjectOffset(Index) + TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS); MI->eraseFromParent(); break; case AMDGPU::SI_SPILL_V32_RESTORE: case AMDGPU::SI_SPILL_V64_RESTORE: case AMDGPU::SI_SPILL_V96_RESTORE: case AMDGPU::SI_SPILL_V128_RESTORE: case AMDGPU::SI_SPILL_V256_RESTORE: case AMDGPU::SI_SPILL_V512_RESTORE: { buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET, TII->getNamedOperand(*MI, AMDGPU::OpName::dst), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(), TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(), FrameInfo->getObjectOffset(Index) + TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS); MI->eraseFromParent(); break; } default: { int64_t Offset = FrameInfo->getObjectOffset(Index); FIOp.ChangeToImmediate(Offset); if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) { unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) .addImm(Offset); FIOp.ChangeToRegister(TmpReg, false, false, true); } } } }