MachineBasicBlock::iterator
SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
        unsigned EltSize) {
    MachineBasicBlock::iterator E = I->getParent()->end();
    MachineBasicBlock::iterator MBBI = I;
    ++MBBI;

    if (MBBI->getOpcode() != I->getOpcode())
        return E;

    // Don't merge volatiles.
    if (MBBI->hasOrderedMemoryRef())
        return E;

    int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
    const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
    const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);

    // Check same base pointer. Be careful of subregisters, which can occur with
    // vectors of pointers.
    if (AddrReg0.getReg() == AddrReg1.getReg() &&
            AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
        int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
                        AMDGPU::OpName::offset);
        unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
        unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;

        // Check both offsets fit in the reduced range.
        if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
            return MBBI;
    }

    return E;
}
Example #2
0
void SystemZRegisterInfo::emitEpilogue(MachineFunction &MF,
                                     MachineBasicBlock &MBB) const {
  const MachineFrameInfo *MFI = MF.getFrameInfo();
  const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
  MachineBasicBlock::iterator MBBI = prior(MBB.end());
  SystemZMachineFunctionInfo *SystemZMFI =
    MF.getInfo<SystemZMachineFunctionInfo>();
  unsigned RetOpcode = MBBI->getOpcode();

  switch (RetOpcode) {
  case SystemZ::RET: break;  // These are ok
  default:
    assert(0 && "Can only insert epilog into returning blocks");
  }

  // Get the number of bytes to allocate from the FrameInfo
  // Note that area for callee-saved stuff is already allocated, thus we need to
  // 'undo' the stack movement.
  uint64_t StackSize =
    MFI->getStackSize() - SystemZMFI->getCalleeSavedFrameSize();
  uint64_t NumBytes = StackSize - TFI.getOffsetOfLocalArea();

  // Skip the final terminator instruction.
  while (MBBI != MBB.begin()) {
    MachineBasicBlock::iterator PI = prior(MBBI);
    --MBBI;
    if (!PI->getDesc().isTerminator())
      break;
  }

  // During callee-saved restores emission stack frame was not yet finialized
  // (and thus - the stack size was unknown). Tune the offset having full stack
  // size in hands.
  if (StackSize || MFI->hasCalls()) {
    assert((MBBI->getOpcode() == SystemZ::MOV64rmm ||
            MBBI->getOpcode() == SystemZ::MOV64rm) &&
           "Expected to see callee-save register restore code");
    assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
           "Invalid stack frame calculation!");

    unsigned i = 0;
    MachineInstr &MI = *MBBI;
    while (!MI.getOperand(i).isImm()) {
      ++i;
      assert(i < MI.getNumOperands() && "Unexpected restore code!");
    }

    uint64_t Offset = NumBytes + MI.getOperand(i).getImm();
    // If Offset does not fit into 20-bit signed displacement field we need to
    // emit some additional code...
    if (Offset > 524287) {
      // Fold the displacement into load instruction as much as possible.
      NumBytes = Offset - 524287;
      Offset = 524287;
      emitSPUpdate(MBB, MBBI, NumBytes, TII);
    }

    MI.getOperand(i).ChangeToImmediate(Offset);
  }
}
Example #3
0
unsigned
XCoreInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator I = MBB.end();
  if (I == MBB.begin()) return 0;
  --I;
  while (I->isDebugValue()) {
    if (I == MBB.begin())
      return 0;
    --I;
  }
  if (!IsBRU(I->getOpcode()) && !IsCondBranch(I->getOpcode()))
    return 0;
  
  // Remove the branch.
  I->eraseFromParent();
  
  I = MBB.end();

  if (I == MBB.begin()) return 1;
  --I;
  if (!IsCondBranch(I->getOpcode()))
    return 1;
  
  // Remove the branch.
  I->eraseFromParent();
  return 2;
}
MachineBasicBlock::iterator
AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
                                     MachineBasicBlock::iterator Update,
                                     bool IsPreIdx) {
  assert((Update->getOpcode() == AArch64::ADDXri ||
          Update->getOpcode() == AArch64::SUBXri) &&
         "Unexpected base register update instruction to merge!");
  MachineBasicBlock::iterator NextI = I;
  // Return the instruction following the merged instruction, which is
  // the instruction following our unmerged load. Unless that's the add/sub
  // instruction we're merging, in which case it's the one after that.
  if (++NextI == Update)
    ++NextI;

  int Value = Update->getOperand(2).getImm();
  assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
         "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
  if (Update->getOpcode() == AArch64::SUBXri)
    Value = -Value;

  unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
                             : getPostIndexedOpcode(I->getOpcode());
  MachineInstrBuilder MIB;
  if (!isPairedLdSt(I)) {
    // Non-paired instruction.
    MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
              .addOperand(getLdStRegOp(Update))
              .addOperand(getLdStRegOp(I))
              .addOperand(getLdStBaseOp(I))
              .addImm(Value);
  } else {
    // Paired instruction.
    int Scale = getMemScale(I);
    MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
              .addOperand(getLdStRegOp(Update))
              .addOperand(getLdStRegOp(I, 0))
              .addOperand(getLdStRegOp(I, 1))
              .addOperand(getLdStBaseOp(I))
              .addImm(Value / Scale);
  }
  (void)MIB;

  if (IsPreIdx)
    DEBUG(dbgs() << "Creating pre-indexed load/store.");
  else
    DEBUG(dbgs() << "Creating post-indexed load/store.");
  DEBUG(dbgs() << "    Replacing instructions:\n    ");
  DEBUG(I->print(dbgs()));
  DEBUG(dbgs() << "    ");
  DEBUG(Update->print(dbgs()));
  DEBUG(dbgs() << "  with instruction:\n    ");
  DEBUG(((MachineInstr *)MIB)->print(dbgs()));
  DEBUG(dbgs() << "\n");

  // Erase the old instructions for the block.
  I->eraseFromParent();
  Update->eraseFromParent();

  return NextI;
}
Example #5
0
unsigned NVPTXInstrInfo::removeBranch(MachineBasicBlock &MBB,
                                      int *BytesRemoved) const {
  assert(!BytesRemoved && "code size not handled");
  MachineBasicBlock::iterator I = MBB.end();
  if (I == MBB.begin())
    return 0;
  --I;
  if (I->getOpcode() != NVPTX::GOTO && I->getOpcode() != NVPTX::CBranch)
    return 0;

  // Remove the branch.
  I->eraseFromParent();

  I = MBB.end();

  if (I == MBB.begin())
    return 1;
  --I;
  if (I->getOpcode() != NVPTX::CBranch)
    return 1;

  // Remove the branch.
  I->eraseFromParent();
  return 2;
}
Example #6
0
unsigned StackColoring::collectMarkers(unsigned NumSlot) {
  unsigned MarkersFound = 0;
  // Scan the function to find all lifetime markers.
  // NOTE: We use the a reverse-post-order iteration to ensure that we obtain a
  // deterministic numbering, and because we'll need a post-order iteration
  // later for solving the liveness dataflow problem.
  for (df_iterator<MachineFunction*> FI = df_begin(MF), FE = df_end(MF);
       FI != FE; ++FI) {

    // Assign a serial number to this basic block.
    BasicBlocks[*FI] = BasicBlockNumbering.size();
    BasicBlockNumbering.push_back(*FI);

    // Keep a reference to avoid repeated lookups.
    BlockLifetimeInfo &BlockInfo = BlockLiveness[*FI];

    BlockInfo.Begin.resize(NumSlot);
    BlockInfo.End.resize(NumSlot);

    for (MachineBasicBlock::iterator BI = (*FI)->begin(), BE = (*FI)->end();
         BI != BE; ++BI) {

      if (BI->getOpcode() != TargetOpcode::LIFETIME_START &&
          BI->getOpcode() != TargetOpcode::LIFETIME_END)
        continue;

      Markers.push_back(BI);

      bool IsStart = BI->getOpcode() == TargetOpcode::LIFETIME_START;
      const MachineOperand &MI = BI->getOperand(0);
      unsigned Slot = MI.getIndex();

      MarkersFound++;

      const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
      if (Allocation) {
        DEBUG(dbgs()<<"Found a lifetime marker for slot #"<<Slot<<
              " with allocation: "<< Allocation->getName()<<"\n");
      }

      if (IsStart) {
        BlockInfo.Begin.set(Slot);
      } else {
        if (BlockInfo.Begin.test(Slot)) {
          // Allocas that start and end within a single block are handled
          // specially when computing the LiveIntervals to avoid pessimizing
          // the liveness propagation.
          BlockInfo.Begin.reset(Slot);
        } else {
          BlockInfo.End.set(Slot);
        }
      }
    }
  }

  // Update statistics.
  NumMarkerSeen += MarkersFound;
  return MarkersFound;
}
Example #7
0
void XCoreFrameInfo::emitEpilogue(MachineFunction &MF,
                                     MachineBasicBlock &MBB) const {
  MachineFrameInfo *MFI            = MF.getFrameInfo();
  MachineBasicBlock::iterator MBBI = prior(MBB.end());
  const XCoreInstrInfo &TII =
    *static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
  DebugLoc dl = MBBI->getDebugLoc();

  bool FP = hasFP(MF);
  if (FP) {
    // Restore the stack pointer.
    unsigned FramePtr = XCore::R10;
    BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r))
      .addReg(FramePtr);
  }

  // Work out frame sizes.
  int FrameSize = MFI->getStackSize();

  assert(FrameSize%4 == 0 && "Misaligned frame size");

  FrameSize/=4;

  bool isU6 = isImmU6(FrameSize);

  if (!isU6 && !isImmU16(FrameSize)) {
    // FIXME could emit multiple instructions.
    report_fatal_error("emitEpilogue Frame size too big: " + Twine(FrameSize));
  }

  if (FrameSize) {
    XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();

    if (FP) {
      // Restore R10
      int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
      FPSpillOffset += FrameSize*4;
      loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl, TII);
    }
    bool restoreLR = XFI->getUsesLR();
    if (restoreLR && MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0) {
      int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
      LRSpillOffset += FrameSize*4;
      loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl, TII);
      restoreLR = false;
    }
    if (restoreLR) {
      // Fold prologue into return instruction
      assert(MBBI->getOpcode() == XCore::RETSP_u6
        || MBBI->getOpcode() == XCore::RETSP_lu6);
      int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
      BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
      MBB.erase(MBBI);
    } else {
      int Opcode = (isU6) ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs;
      BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(FrameSize);
    }
  }
}
Example #8
0
MachineBasicBlock::iterator
Filler::findDelayInstr(MachineBasicBlock &MBB,
                       MachineBasicBlock::iterator slot)
{
  SmallSet<unsigned, 32> RegDefs;
  SmallSet<unsigned, 32> RegUses;
  bool sawLoad = false;
  bool sawStore = false;

  MachineBasicBlock::iterator I = slot;

  if (slot->getOpcode() == SP::RET)
    return MBB.end();

  if (slot->getOpcode() == SP::RETL) {
    --I;
    if (I->getOpcode() != SP::RESTORErr)
      return MBB.end();
    //change retl to ret
    slot->setDesc(TII->get(SP::RET));
    return I;
  }

  //Call's delay filler can def some of call's uses.
  if (slot->getDesc().isCall())
    insertCallUses(slot, RegUses);
  else
    insertDefsUses(slot, RegDefs, RegUses);

  bool done = false;

  while (!done) {
    done = (I == MBB.begin());

    if (!done)
      --I;

    // skip debug value
    if (I->isDebugValue())
      continue;


    if (I->hasUnmodeledSideEffects()
        || I->isInlineAsm()
        || I->isLabel()
        || I->getDesc().hasDelaySlot()
        || isDelayFiller(MBB, I))
      break;

    if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) {
      insertDefsUses(I, RegDefs, RegUses);
      continue;
    }

    return I;
  }
  return MBB.end();
}
void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
                                     MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
  DebugLoc dl = MBBI->getDebugLoc();
  //
  // Only insert deallocframe if we need to.  Also at -O0.  See comment
  // in emitPrologue above.
  //
  if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) {
    MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
    MachineBasicBlock::iterator MBBI_end = MBB.end();

    const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
    // Handle EH_RETURN.
    if (MBBI->getOpcode() == Hexagon::EH_RETURN_JMPR) {
      assert(MBBI->getOperand(0).isReg() && "Offset should be in register!");
      BuildMI(MBB, MBBI, dl, TII.get(Hexagon::L2_deallocframe));
      BuildMI(MBB, MBBI, dl, TII.get(Hexagon::A2_add),
              Hexagon::R29).addReg(Hexagon::R29).addReg(Hexagon::R28);
      return;
    }
    // Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
    // versions.
    if (MF.getSubtarget<HexagonSubtarget>().hasV4TOps() &&
        MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
      // Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
      // instruction if we encounter it.
      MachineBasicBlock::iterator BeforeJMPR =
        MBB.begin() == MBBI ? MBBI : std::prev(MBBI);
      if (BeforeJMPR != MBBI &&
          BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
        // Remove the JMPR node.
        MBB.erase(MBBI);
        return;
      }

      // Add dealloc_return.
      MachineInstrBuilder MIB =
        BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::L4_return));
      // Transfer the function live-out registers.
      MIB->copyImplicitOps(*MBB.getParent(), &*MBBI);
      // Remove the JUMPR node.
      MBB.erase(MBBI);
    } else { // Add deallocframe for V2 and V3, and V4 tail calls.
      // Check for RESTORE_DEALLOC_BEFORE_TAILCALL_V4. We don't need an extra
      // DEALLOCFRAME instruction after it.
      MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
      MachineBasicBlock::iterator I =
        Term == MBB.begin() ?  MBB.end() : std::prev(Term);
      if (I != MBB.end() &&
          I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
        return;

      BuildMI(MBB, MBBI, dl, TII.get(Hexagon::L2_deallocframe));
    }
  }
}
// Finds compare instruction that corresponds to supported types of branching.
// Returns the instruction or nullptr on failures or detecting unsupported
// instructions.
MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
    MachineBasicBlock *MBB) {
  MachineBasicBlock::iterator I = MBB->getFirstTerminator();
  if (I == MBB->end())
    return nullptr;

  if (I->getOpcode() != AArch64::Bcc)
    return nullptr;

  // Now find the instruction controlling the terminator.
  for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
    --I;
    assert(!I->isTerminator() && "Spurious terminator");
    switch (I->getOpcode()) {
    // cmp is an alias for subs with a dead destination register.
    case AArch64::SUBSWri:
    case AArch64::SUBSXri:
    // cmn is an alias for adds with a dead destination register.
    case AArch64::ADDSWri:
    case AArch64::ADDSXri:
      if (MRI->use_empty(I->getOperand(0).getReg()))
        return I;

      DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
      return nullptr;

    // Prevent false positive case like:
    // cmp      w19, #0
    // cinc     w0, w19, gt
    // ...
    // fcmp     d8, #0.0
    // b.gt     .LBB0_5
    case AArch64::FCMPDri:
    case AArch64::FCMPSri:
    case AArch64::FCMPESri:
    case AArch64::FCMPEDri:

    case AArch64::SUBSWrr:
    case AArch64::SUBSXrr:
    case AArch64::ADDSWrr:
    case AArch64::ADDSXrr:
    case AArch64::FCMPSrr:
    case AArch64::FCMPDrr:
    case AArch64::FCMPESrr:
    case AArch64::FCMPEDrr:
      // Skip comparison instructions without immediate operands.
      return nullptr;
    }
  }
  DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n');
  return nullptr;
}
Example #11
0
/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
    const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
    const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
    const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
    MachineFrameInfo *MFI = Fn.getFrameInfo();

    unsigned MaxCallFrameSize = 0;
    bool AdjustsStack = MFI->adjustsStack();

    // Get the function call frame set-up and tear-down instruction opcode
    int FrameSetupOpcode   = TII.getCallFrameSetupOpcode();
    int FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();

    // Early exit for targets which have no call frame setup/destroy pseudo
    // instructions.
    if (FrameSetupOpcode == -1 && FrameDestroyOpcode == -1)
        return;

    std::vector<MachineBasicBlock::iterator> FrameSDOps;
    for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
        for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
            if (I->getOpcode() == FrameSetupOpcode ||
                    I->getOpcode() == FrameDestroyOpcode) {
                assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
                       " instructions should have a single immediate argument!");
                unsigned Size = I->getOperand(0).getImm();
                if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
                AdjustsStack = true;
                FrameSDOps.push_back(I);
            } else if (I->isInlineAsm()) {
                // Some inline asm's need a stack frame, as indicated by operand 1.
                unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
                if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
                    AdjustsStack = true;
            }

    MFI->setAdjustsStack(AdjustsStack);
    MFI->setMaxCallFrameSize(MaxCallFrameSize);

    for (std::vector<MachineBasicBlock::iterator>::iterator
            i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
        MachineBasicBlock::iterator I = *i;

        // If call frames are not being included as part of the stack frame, and
        // the target doesn't indicate otherwise, remove the call frame pseudos
        // here. The sub/add sp instruction pairs are still inserted, but we don't
        // need to track the SP adjustment for frame index elimination.
        if (TFI->canSimplifyCallFramePseudos(Fn))
            RegInfo->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
    }
}
unsigned EpiphanyInstrInfo::estimateRSStackLimit(MachineFunction &MF) const {
    unsigned Limit = (1 << 16) - 1;
    for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
        for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
                I != E; ++I) {
            for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
                if (!I->getOperand(i).isFI()) continue;

                // When using ADDwwi_lsl0_s to get the address of a stack object, 0x3FF
                // is the largest offset guaranteed to fit in the immediate offset.
                if (I->getOpcode() == Epiphany::ADDri) {
                    Limit = std::min(Limit, 0x3FFu);
                    break;
                }

                int AccessScale, MinOffset, MaxOffset;
                getAddressConstraints(*I, AccessScale, MinOffset, MaxOffset);
                Limit = std::min(Limit, static_cast<unsigned>(MaxOffset));

                break; // At most one FI per instruction
            }
        }
    }

    return Limit;
}
/// \brief Replace loop instructions with the constant extended version.
void HexagonFixupHwLoops::useExtLoopInstr(MachineFunction &MF,
                                          MachineBasicBlock::iterator &MII) {
  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
  MachineBasicBlock *MBB = MII->getParent();
  DebugLoc DL = MII->getDebugLoc();
  MachineInstrBuilder MIB;
  unsigned newOp;
  switch (MII->getOpcode()) {
  case Hexagon::J2_loop0r:
    newOp = Hexagon::J2_loop0rext;
    break;
  case Hexagon::J2_loop0i:
    newOp = Hexagon::J2_loop0iext;
    break;
  case Hexagon::J2_loop1r:
    newOp = Hexagon::J2_loop1rext;
    break;
  case Hexagon::J2_loop1i:
    newOp = Hexagon::J2_loop1iext;
    break;
  default:
    llvm_unreachable("Invalid Hardware Loop Instruction.");
  }
  MIB = BuildMI(*MBB, MII, DL, TII->get(newOp));

  for (unsigned i = 0; i < MII->getNumOperands(); ++i)
    MIB.add(MII->getOperand(i));
}
void VectorProcFrameLowering::emitEpilogue(MachineFunction &MF,
                                           MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
  MachineFrameInfo *MFI = MF.getFrameInfo();
  const VectorProcInstrInfo &TII =
      *static_cast<const VectorProcInstrInfo *>(MF.getTarget().getInstrInfo());
  DebugLoc DL = MBBI->getDebugLoc();
  assert(MBBI->getOpcode() == VectorProc::RET &&
         "Can only put epilog before 'retl' instruction!");

  // if framepointer enabled, restore the stack pointer.
  if (hasFP(MF)) {
    // Find the first instruction that restores a callee-saved register.
    MachineBasicBlock::iterator I = MBBI;
    for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
      --I;

    BuildMI(MBB, I, DL, TII.get(VectorProc::MOVESS))
        .addReg(VectorProc::SP_REG)
        .addReg(VectorProc::FP_REG);
  }

  uint64_t StackSize = RoundUpToAlignment(MFI->getStackSize(), getStackAlignment());
  if (!StackSize)
    return;

  TII.adjustStackPointer(MBB, MBBI, StackSize);
}
SPScope::SPScope(SPScope *parent, MachineLoop &loop)
    : Parent(parent), FCFG(loop.getHeader()),
      RootTopLevel(false), LoopBound(-1) {

    assert(parent);
    MachineBasicBlock *header = loop.getHeader();

    // add to parent's child list
    Parent->HeaderMap[header] = this;
    Parent->Subscopes.push_back(this);
    // add to parent's block list as well
    Parent->addMBB(header);
    Depth = Parent->Depth + 1;

    // add header also to this SPScope's block list
    Blocks.push_back(header);

    // info about loop latches and exit edges
    loop.getLoopLatches(Latches);
    loop.getExitEdges(ExitEdges);

    // scan the header for loopbound info
    for (MachineBasicBlock::iterator MI = header->begin(), ME = header->end();
            MI != ME; ++MI) {
        if (MI->getOpcode() == Patmos::PSEUDO_LOOPBOUND) {
            // max is the second operand (idx 1)
            LoopBound = MI->getOperand(1).getImm() + 1;
            break;
        }
    }

}
Example #16
0
bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
  MachineBasicBlock *MBB = MI->getParent();
   int OffsetOpIdx =
       AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
   // addr is a custom operand with multiple MI operands, and only the
   // first MI operand is given a name.
  int RegOpIdx = OffsetOpIdx + 1;
  int ChanOpIdx =
      AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);

  if (isRegisterLoad(*MI)) {
    int DstOpIdx =
        AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
    unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
    unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
    unsigned Address = calculateIndirectAddress(RegIndex, Channel);
    unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
    if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
      buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
                    getIndirectAddrRegClass()->getRegister(Address));
    } else {
      buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
                        Address, OffsetReg);
    }
  } else if (isRegisterStore(*MI)) {
    int ValOpIdx =
        AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
    AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
    unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
    unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
    unsigned Address = calculateIndirectAddress(RegIndex, Channel);
    unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
    if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
      buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
                    MI->getOperand(ValOpIdx).getReg());
    } else {
      buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
                         calculateIndirectAddress(RegIndex, Channel),
                         OffsetReg);
    }
  } else {
    return false;
  }

  MBB->erase(MI);
  return true;
}
Example #17
0
// We have identified this II could be feeder to NVJ,
// verify that it can be.
static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII,
                                      const TargetRegisterInfo *TRI,
                                      MachineBasicBlock::iterator II,
                                      MachineBasicBlock::iterator end,
                                      MachineBasicBlock::iterator skip,
                                      MachineFunction &MF) {

  // Predicated instruction can not be feeder to NVJ.
  if (QII->isPredicated(*II))
    return false;

  // Bail out if feederReg is a paired register (double regs in
  // our case). One would think that we can check to see if a given
  // register cmpReg1 or cmpReg2 is a sub register of feederReg
  // using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic
  // before the callsite of this function
  // But we can not as it comes in the following fashion.
  //    %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
  //    %R0<def> = KILL %R0, %D0<imp-use,kill>
  //    %P0<def> = CMPEQri %R0<kill>, 0
  // Hence, we need to check if it's a KILL instruction.
  if (II->getOpcode() == TargetOpcode::KILL)
    return false;


  // Make sure there there is no 'def' or 'use' of any of the uses of
  // feeder insn between it's definition, this MI and jump, jmpInst
  // skipping compare, cmpInst.
  // Here's the example.
  //    r21=memub(r22+r24<<#0)
  //    p0 = cmp.eq(r21, #0)
  //    r4=memub(r3+r21<<#0)
  //    if (p0.new) jump:t .LBB29_45
  // Without this check, it will be converted into
  //    r4=memub(r3+r21<<#0)
  //    r21=memub(r22+r24<<#0)
  //    p0 = cmp.eq(r21, #0)
  //    if (p0.new) jump:t .LBB29_45
  // and result WAR hazards if converted to New Value Jump.

  for (unsigned i = 0; i < II->getNumOperands(); ++i) {
    if (II->getOperand(i).isReg() &&
        (II->getOperand(i).isUse() || II->getOperand(i).isDef())) {
      MachineBasicBlock::iterator localII = II;
      ++localII;
      unsigned Reg = II->getOperand(i).getReg();
      for (MachineBasicBlock::iterator localBegin = localII;
                        localBegin != end; ++localBegin) {
        if (localBegin == skip ) continue;
        // Check for Subregisters too.
        if (localBegin->modifiesRegister(Reg, TRI) ||
            localBegin->readsRegister(Reg, TRI))
          return false;
      }
    }
  }
  return true;
}
Example #18
0
// return true if the candidate is a delay filler.
bool Filler::isDelayFiller(MachineBasicBlock &MBB,
                           MachineBasicBlock::iterator candidate)
{
  if (candidate == MBB.begin())
    return false;
  if (candidate->getOpcode() == SP::UNIMP)
    return true;
  const TargetInstrDesc &prevdesc = (--candidate)->getDesc();
  return prevdesc.hasDelaySlot();
}
Example #19
0
MachineBasicBlock::iterator AVRFrameLowering::eliminateCallFramePseudoInstr(
    MachineFunction &MF, MachineBasicBlock &MBB,
    MachineBasicBlock::iterator MI) const {
  const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>();
  const TargetFrameLowering &TFI = *STI.getFrameLowering();
  const AVRInstrInfo &TII = *STI.getInstrInfo();

  // There is nothing to insert when the call frame memory is allocated during
  // function entry. Delete the call frame pseudo and replace all pseudo stores
  // with real store instructions.
  if (TFI.hasReservedCallFrame(MF)) {
    fixStackStores(MBB, MI, TII, false);
    return MBB.erase(MI);
  }

  DebugLoc DL = MI->getDebugLoc();
  unsigned int Opcode = MI->getOpcode();
  int Amount = MI->getOperand(0).getImm();

  // Adjcallstackup does not need to allocate stack space for the call, instead
  // we insert push instructions that will allocate the necessary stack.
  // For adjcallstackdown we convert it into an 'adiw reg, <amt>' handling
  // the read and write of SP in I/O space.
  if (Amount != 0) {
    assert(TFI.getStackAlignment() == 1 && "Unsupported stack alignment");

    if (Opcode == TII.getCallFrameSetupOpcode()) {
      fixStackStores(MBB, MI, TII, true);
    } else {
      assert(Opcode == TII.getCallFrameDestroyOpcode());

      // Select the best opcode to adjust SP based on the offset size.
      unsigned addOpcode;
      if (isUInt<6>(Amount)) {
        addOpcode = AVR::ADIWRdK;
      } else {
        addOpcode = AVR::SUBIWRdK;
        Amount = -Amount;
      }

      // Build the instruction sequence.
      BuildMI(MBB, MI, DL, TII.get(AVR::SPREAD), AVR::R31R30).addReg(AVR::SP);

      MachineInstr *New = BuildMI(MBB, MI, DL, TII.get(addOpcode), AVR::R31R30)
                              .addReg(AVR::R31R30, RegState::Kill)
                              .addImm(Amount);
      New->getOperand(3).setIsDead();

      BuildMI(MBB, MI, DL, TII.get(AVR::SPWRITE), AVR::SP)
          .addReg(AVR::R31R30, RegState::Kill);
    }
  }

  return MBB.erase(MI);
}
Example #20
0
void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
                                  MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
  const SparcInstrInfo &TII =
    *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
  DebugLoc dl = MBBI->getDebugLoc();
  assert(MBBI->getOpcode() == SP::RETL &&
         "Can only put epilog before 'retl' instruction!");
  BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
    .addReg(SP::G0);
}
Example #21
0
void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
                                           unsigned LoadStoreOp,
                                           unsigned Value,
                                           unsigned ScratchRsrcReg,
                                           unsigned ScratchOffset,
                                           int64_t Offset,
                                           RegScavenger *RS) const {

  MachineBasicBlock *MBB = MI->getParent();
  const MachineFunction *MF = MI->getParent()->getParent();
  const SIInstrInfo *TII =
      static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
  LLVMContext &Ctx = MF->getFunction()->getContext();
  DebugLoc DL = MI->getDebugLoc();
  bool IsLoad = TII->get(LoadStoreOp).mayLoad();

  bool RanOutOfSGPRs = false;
  unsigned SOffset = ScratchOffset;

  unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
  unsigned Size = NumSubRegs * 4;

  if (!isUInt<12>(Offset + Size)) {
    SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
    if (SOffset == AMDGPU::NoRegister) {
      RanOutOfSGPRs = true;
      SOffset = AMDGPU::SGPR0;
    }
    BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
            .addReg(ScratchOffset)
            .addImm(Offset);
    Offset = 0;
  }

  if (RanOutOfSGPRs)
    Ctx.emitError("Ran out of SGPRs for spilling VGPRS");

  for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
    unsigned SubReg = NumSubRegs > 1 ?
        getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
        Value;

    BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
      .addReg(SubReg, getDefRegState(IsLoad))
      .addReg(ScratchRsrcReg)
      .addReg(SOffset)
      .addImm(Offset)
      .addImm(0) // glc
      .addImm(0) // slc
      .addImm(0) // tfe
      .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
      .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
  }
}
Example #22
0
void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
                                        MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
  const SystemZInstrInfo *ZII =
    static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
  SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();

  // Skip the return instruction.
  assert(MBBI->getOpcode() == SystemZ::RET &&
         "Can only insert epilogue into returning blocks");

  uint64_t StackSize = getAllocatedStackSize(MF);
  if (ZFI->getLowSavedGPR()) {
    --MBBI;
    unsigned Opcode = MBBI->getOpcode();
    if (Opcode != SystemZ::LMG)
      llvm_unreachable("Expected to see callee-save register restore code");

    unsigned AddrOpNo = 2;
    DebugLoc DL = MBBI->getDebugLoc();
    uint64_t Offset = StackSize + MBBI->getOperand(AddrOpNo + 1).getImm();
    unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);

    // If the offset is too large, use the largest stack-aligned offset
    // and add the rest to the base register (the stack or frame pointer).
    if (!NewOpcode) {
      uint64_t NumBytes = Offset - 0x7fff8;
      emitIncrement(MBB, MBBI, DL, MBBI->getOperand(AddrOpNo).getReg(),
                    NumBytes, ZII);
      Offset -= NumBytes;
      NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
      assert(NewOpcode && "No restore instruction available");
    }

    MBBI->setDesc(ZII->get(NewOpcode));
    MBBI->getOperand(AddrOpNo + 1).ChangeToImmediate(Offset);
  } else if (StackSize) {
    DebugLoc DL = MBBI->getDebugLoc();
    emitIncrement(MBB, MBBI, DL, SystemZ::R15D, StackSize, ZII);
  }
}
void WebAssemblyFrameLowering::eliminateCallFramePseudoInstr(
    MachineFunction &MF, MachineBasicBlock &MBB,
    MachineBasicBlock::iterator I) const {
  const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
  DebugLoc DL = I->getDebugLoc();
  unsigned Opc = I->getOpcode();
  bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
  unsigned Amount = I->getOperand(0).getImm();
  if (Amount)
    adjustStackPointer(Amount, IsDestroy, MF, MBB,
                       TII, I, DL);
  MBB.erase(I);
}
Example #24
0
// These are the common checks that need to performed
// to determine if
// 1. compare instruction can be moved before jump.
// 2. feeder to the compare instruction can be moved before jump.
static bool commonChecksToProhibitNewValueJump(bool afterRA,
                          MachineBasicBlock::iterator MII) {

  // If store in path, bail out.
  if (MII->getDesc().mayStore())
    return false;

  // if call in path, bail out.
  if (MII->getOpcode() == Hexagon::J2_call)
    return false;

  // if NVJ is running prior to RA, do the following checks.
  if (!afterRA) {
    // The following Target Opcode instructions are spurious
    // to new value jump. If they are in the path, bail out.
    // KILL sets kill flag on the opcode. It also sets up a
    // single register, out of pair.
    //    %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
    //    %R0<def> = KILL %R0, %D0<imp-use,kill>
    //    %P0<def> = CMPEQri %R0<kill>, 0
    // PHI can be anything after RA.
    // COPY can remateriaze things in between feeder, compare and nvj.
    if (MII->getOpcode() == TargetOpcode::KILL ||
        MII->getOpcode() == TargetOpcode::PHI  ||
        MII->getOpcode() == TargetOpcode::COPY)
      return false;

    // The following pseudo Hexagon instructions sets "use" and "def"
    // of registers by individual passes in the backend. At this time,
    // we don't know the scope of usage and definitions of these
    // instructions.
    if (MII->getOpcode() == Hexagon::LDriw_pred     ||
        MII->getOpcode() == Hexagon::STriw_pred)
      return false;
  }

  return true;
}
MachineBasicBlock::iterator
WebAssemblyFrameLowering::eliminateCallFramePseudoInstr(
    MachineFunction &MF, MachineBasicBlock &MBB,
    MachineBasicBlock::iterator I) const {
    assert(!I->getOperand(0).getImm() && hasFP(MF) &&
           "Call frame pseudos should only be used for dynamic stack adjustment");
    const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
    if (I->getOpcode() == TII->getCallFrameDestroyOpcode() &&
            needsSPWriteback(MF, MF.getFrameInfo())) {
        DebugLoc DL = I->getDebugLoc();
        writeSPToMemory(WebAssembly::SP32, MF, MBB, I, I, DL);
    }
    return MBB.erase(I);
}
void
Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
                                         MachineBasicBlock *NewDest) const {
  MachineBasicBlock *MBB = Tail->getParent();
  ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
  if (!AFI->hasITBlocks() || Tail->isBranch()) {
    TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
    return;
  }

  // If the first instruction of Tail is predicated, we may have to update
  // the IT instruction.
  unsigned PredReg = 0;
  ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
  MachineBasicBlock::iterator MBBI = Tail;
  if (CC != ARMCC::AL)
    // Expecting at least the t2IT instruction before it.
    --MBBI;

  // Actually replace the tail.
  TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);

  // Fix up IT.
  if (CC != ARMCC::AL) {
    MachineBasicBlock::iterator E = MBB->begin();
    unsigned Count = 4; // At most 4 instructions in an IT block.
    while (Count && MBBI != E) {
      if (MBBI->isDebugValue()) {
        --MBBI;
        continue;
      }
      if (MBBI->getOpcode() == ARM::t2IT) {
        unsigned Mask = MBBI->getOperand(1).getImm();
        if (Count == 4)
          MBBI->eraseFromParent();
        else {
          unsigned MaskOn = 1 << Count;
          unsigned MaskOff = ~(MaskOn - 1);
          MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
        }
        return;
      }
      --MBBI;
      --Count;
    }

    // Ctrl flow can reach here if branch folding is run before IT block
    // formation pass.
  }
}
Example #27
0
void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator I) {
  if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
    return;

  // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
  if (LastInstWritesM0 && (I->getOpcode() == AMDGPU::S_SENDMSG || I->getOpcode() == AMDGPU::S_SENDMSGHALT)) {
    BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
    LastInstWritesM0 = false;
    return;
  }

  // Set whether this instruction sets M0
  LastInstWritesM0 = false;

  unsigned NumOperands = I->getNumOperands();
  for (unsigned i = 0; i < NumOperands; i++) {
    const MachineOperand &Op = I->getOperand(i);

    if (Op.isReg() && Op.isDef() && Op.getReg() == AMDGPU::M0)
      LastInstWritesM0 = true;
  }
}
Example #28
0
bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
        MachineBasicBlock &MBB) const {
    while (iter != MBB.end()) {
        switch (iter->getOpcode()) {
        default:
            break;
        case AMDGPU::BRANCH_COND_i32:
        case AMDGPU::BRANCH_COND_f32:
        case AMDGPU::BRANCH:
            return true;
        };
        ++iter;
    }
    return false;
}
bool MipsExpandPseudo::expandMI(MachineBasicBlock &MBB,
                                MachineBasicBlock::iterator MBBI,
                                MachineBasicBlock::iterator &NMBB) {

  bool Modified = false;

  switch (MBBI->getOpcode()) {
  case Mips::ATOMIC_CMP_SWAP_I32_POSTRA:
  case Mips::ATOMIC_CMP_SWAP_I64_POSTRA:
    return expandAtomicCmpSwap(MBB, MBBI, NMBB);
  case Mips::ATOMIC_CMP_SWAP_I8_POSTRA:
  case Mips::ATOMIC_CMP_SWAP_I16_POSTRA:
    return expandAtomicCmpSwapSubword(MBB, MBBI, NMBB);
  case Mips::ATOMIC_SWAP_I8_POSTRA:
  case Mips::ATOMIC_SWAP_I16_POSTRA:
  case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
  case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
  case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
  case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
  case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
  case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
  case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
  case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
  case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
  case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
  case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
  case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
    return expandAtomicBinOpSubword(MBB, MBBI, NMBB);
  case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
  case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
  case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
  case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
  case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
  case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
  case Mips::ATOMIC_SWAP_I32_POSTRA:
    return expandAtomicBinOp(MBB, MBBI, NMBB, 4);
  case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
  case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
  case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
  case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
  case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
  case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
  case Mips::ATOMIC_SWAP_I64_POSTRA:
    return expandAtomicBinOp(MBB, MBBI, NMBB, 8);
  default:
    return Modified;
  }
}
Example #30
0
void SystemZFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF,
                              MachineBasicBlock &MBB,
                              MachineBasicBlock::iterator MI) const {
  switch (MI->getOpcode()) {
  case SystemZ::ADJCALLSTACKDOWN:
  case SystemZ::ADJCALLSTACKUP:
    assert(hasReservedCallFrame(MF) &&
           "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
    MBB.erase(MI);
    break;

  default:
    llvm_unreachable("Unexpected call frame instruction");
  }
}