Beispiel #1
0
// Instrumenting some of the accesses may be proven redundant.
// Currently handled:
//  - read-before-write (within same BB, no calls between)
//
// We do not handle some of the patterns that should not survive
// after the classic compiler optimizations.
// E.g. two reads from the same temp should be eliminated by CSE,
// two writes should be eliminated by DSE, etc.
//
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument(
    SmallVectorImpl<Instruction*> &Local,
    SmallVectorImpl<Instruction*> &All) {
  SmallSet<Value*, 8> WriteTargets;
  // Iterate from the end.
  for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
       E = Local.rend(); It != E; ++It) {
    Instruction *I = *It;
    if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
      WriteTargets.insert(Store->getPointerOperand());
    } else {
      LoadInst *Load = cast<LoadInst>(I);
      Value *Addr = Load->getPointerOperand();
      if (WriteTargets.count(Addr)) {
        // We will write to this temp, so no reason to analyze the read.
        NumOmittedReadsBeforeWrite++;
        continue;
      }
      if (addrPointsToConstantData(Addr)) {
        // Addr points to some constant data -- it can not race with any writes.
        continue;
      }
    }
    All.push_back(I);
  }
  Local.clear();
}
Beispiel #2
0
bool CastVerifier::findAllocTbaaRec(Instruction *Inst, int depth,
                                    SmallSet<Instruction *,16> &VisitedInst) {

  CVER_DEBUG("\t\t\t " << depth << " : " << Inst->getOpcodeName() << ":"
             << *Inst << "\n");

  MDNode *Node = Inst->getMetadata(LLVMContext::MD_tbaa);
  if (Node)
    CVER_DEBUG("\t\t\t\t TBAA : " << *Node << "\n");
  
  // If it's too complicated, don't optimize.
  if (depth >= MAX_SAFECAST_CHECK_DEPTH)
    return false;
  
  // If the inst is visited already, it will be handled in some other branches.
  if (VisitedInst.count(Inst))
    return true;
    
  VisitedInst.insert(Inst);

  // opcode : http://llvm.org/docs/doxygen/html/Instruction_8cpp_source.html
  switch(Inst->getOpcode()) {
  case Instruction::Load:
  case Instruction::BitCast:
  case Instruction::Store: {
    Value *value = Inst->getOperand(0);

    if (!isa<Instruction>(value)) {
      // TODO: If the value is not instruction, this can be an non-instruction
      // value (i.e., an argument value). We don't handle this case for now.
      return false;
    }
    return findAllocTbaaRec(dyn_cast<Instruction>(value), depth+1, VisitedInst);
  }
  case Instruction::Alloca: {
    // Scan all of its def-use chain to see how the value is taken.
    bool isAllSafe = true;
    for (User *user : Inst->users())
      if (Instruction *userInst = dyn_cast<Instruction>(user))
        // If any of use instruction is not safe, it is not safe.
        isAllSafe &= findAllocTbaaRec(userInst, depth+1, VisitedInst);
    return isAllSafe;
  }
  case Instruction::Call: {
    // TODO : check the type in the metadata from Clang.
    if (Inst->getMetadata("cver_new")) {
      // If the instruction is allocation that cver identified, we should be
      // able to retrieve TBAA.
      MDNode *Node = Inst->getMetadata(LLVMContext::MD_tbaa);
      assert(Node);
      CVER_DEBUG("\t\t\t\t cver_new TBAA : " << *Node << "\n");
    }
    break;
  }
  default:
    break;
  }
  // If we don't know how to handle this instruction, don't optimize.
  return false;
}
Beispiel #3
0
static bool allPredCameFromBeginCatch(
    BasicBlock *BB, BasicBlock::reverse_iterator InstRbegin,
    IntrinsicInst **SecondEndCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) {
  VisitedBlocks.insert(BB);
  // Look for a begincatch in this block.
  for (BasicBlock::reverse_iterator RI = InstRbegin, RE = BB->rend(); RI != RE;
       ++RI) {
    IntrinsicInst *IC = dyn_cast<IntrinsicInst>(&*RI);
    if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch)
      return true;
    // If we find another end catch before we find a begin catch, that's
    // an error.
    if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) {
      *SecondEndCatch = IC;
      return false;
    }
    // If we encounter a landingpad instruction, the search failed.
    if (isa<LandingPadInst>(*RI))
      return false;
  }
  // If while searching we find a block with no predeccesors,
  // the search failed.
  if (pred_empty(BB))
    return false;
  // Search any predecessors we haven't seen before.
  for (BasicBlock *Pred : predecessors(BB)) {
    if (VisitedBlocks.count(Pred))
      continue;
    if (!allPredCameFromBeginCatch(Pred, Pred->rbegin(), SecondEndCatch,
                                   VisitedBlocks))
      return false;
  }
  return true;
}
Beispiel #4
0
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
static void
ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
                  DenseMap<SDValue, unsigned> &VRBaseMap,
                  SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
                  SmallSet<unsigned, 8> &Seen) {
  unsigned Order = N->getIROrder();
  if (!Order || !Seen.insert(Order)) {
    // Process any valid SDDbgValues even if node does not have any order
    // assigned.
    ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
    return;
  }

  MachineBasicBlock *BB = Emitter.getBlock();
  if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() ||
      // Fast-isel may have inserted some instructions, in which case the
      // BB->back().isPHI() test will not fire when we want it to.
      std::prev(Emitter.getInsertPos())->isPHI()) {
    // Did not insert any instruction.
    Orders.push_back(std::make_pair(Order, (MachineInstr*)nullptr));
    return;
  }

  Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos())));
  ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
}
/// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
/// the 's' 16-bit instruction partially update CPSR. Abort the
/// transformation to avoid adding false dependency on last CPSR setting
/// instruction which hurts the ability for out-of-order execution engine
/// to do register renaming magic.
/// This function checks if there is a read-of-write dependency between the
/// last instruction that defines the CPSR and the current instruction. If there
/// is, then there is no harm done since the instruction cannot be retired
/// before the CPSR setting instruction anyway.
/// Note, we are not doing full dependency analysis here for the sake of compile
/// time. We're not looking for cases like:
/// r0 = muls ...
/// r1 = add.w r0, ...
/// ...
///    = mul.w r1
/// In this case it would have been ok to narrow the mul.w to muls since there
/// are indirect RAW dependency between the muls and the mul.w
bool
Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use) {
  if (!Def || !STI->avoidCPSRPartialUpdate())
    return false;

  SmallSet<unsigned, 2> Defs;
  for (unsigned i = 0, e = Def->getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = Def->getOperand(i);
    if (!MO.isReg() || MO.isUndef() || MO.isUse())
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0 || Reg == ARM::CPSR)
      continue;
    Defs.insert(Reg);
  }

  for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = Use->getOperand(i);
    if (!MO.isReg() || MO.isUndef() || MO.isDef())
      continue;
    unsigned Reg = MO.getReg();
    if (Defs.count(Reg))
      return false;
  }

  // No read-after-write dependency. The narrowing will add false dependency.
  return true;
}
Beispiel #6
0
/// Remove any duplicate (as SDValues) from the derived pointer pairs.  This
/// is not required for correctness.  It's purpose is to reduce the size of
/// StackMap section.  It has no effect on the number of spill slots required
/// or the actual lowering.
static void removeDuplicatesGCPtrs(SmallVectorImpl<const Value *> &Bases,
                                   SmallVectorImpl<const Value *> &Ptrs,
                                   SmallVectorImpl<const Value *> &Relocs,
                                   SelectionDAGBuilder &Builder) {

  // This is horribly ineffecient, but I don't care right now
  SmallSet<SDValue, 64> Seen;

  SmallVector<const Value *, 64> NewBases, NewPtrs, NewRelocs;
  for (size_t i = 0; i < Ptrs.size(); i++) {
    SDValue SD = Builder.getValue(Ptrs[i]);
    // Only add non-duplicates
    if (Seen.count(SD) == 0) {
      NewBases.push_back(Bases[i]);
      NewPtrs.push_back(Ptrs[i]);
      NewRelocs.push_back(Relocs[i]);
    }
    Seen.insert(SD);
  }
  assert(Bases.size() >= NewBases.size());
  assert(Ptrs.size() >= NewPtrs.size());
  assert(Relocs.size() >= NewRelocs.size());
  Bases = NewBases;
  Ptrs = NewPtrs;
  Relocs = NewRelocs;
  assert(Ptrs.size() == Bases.size());
  assert(Ptrs.size() == Relocs.size());
}
/// AddSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier by adding
/// the exit SU to the register defs and use list. This is because we want to
/// make sure instructions which define registers that are either used by
/// the terminator or are live-out are properly scheduled. This is
/// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block.
void ScheduleDAGInstrs::AddSchedBarrierDeps() {
  MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
  ExitSU.setInstr(ExitMI);
  bool AllDepKnown = ExitMI &&
    (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
  if (ExitMI && AllDepKnown) {
    // If it's a call or a barrier, add dependencies on the defs and uses of
    // instruction.
    for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
      const MachineOperand &MO = ExitMI->getOperand(i);
      if (!MO.isReg() || MO.isDef()) continue;
      unsigned Reg = MO.getReg();
      if (Reg == 0) continue;

      assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
      Uses[Reg].push_back(&ExitSU);
    }
  } else {
    // For others, e.g. fallthrough, conditional branch, assume the exit
    // uses all the registers that are livein to the successor blocks.
    SmallSet<unsigned, 8> Seen;
    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
           SE = BB->succ_end(); SI != SE; ++SI)
      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
             E = (*SI)->livein_end(); I != E; ++I) {    
        unsigned Reg = *I;
        if (Seen.insert(Reg))
          Uses[Reg].push_back(&ExitSU);
      }
  }
}
Beispiel #8
0
static bool
allSuccessorsReachEndCatch(BasicBlock *BB, BasicBlock::iterator InstBegin,
                           IntrinsicInst **SecondBeginCatch,
                           SmallSet<BasicBlock *, 4> &VisitedBlocks) {
  VisitedBlocks.insert(BB);
  for (BasicBlock::iterator I = InstBegin, E = BB->end(); I != E; ++I) {
    IntrinsicInst *IC = dyn_cast<IntrinsicInst>(I);
    if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch)
      return true;
    // If we find another begincatch while looking for an endcatch,
    // that's also an error.
    if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) {
      *SecondBeginCatch = IC;
      return false;
    }
  }

  // If we reach a block with no successors while searching, the
  // search has failed.
  if (succ_empty(BB))
    return false;
  // Otherwise, search all of the successors.
  for (BasicBlock *Succ : successors(BB)) {
    if (VisitedBlocks.count(Succ))
      continue;
    if (!allSuccessorsReachEndCatch(Succ, Succ->begin(), SecondBeginCatch,
                                    VisitedBlocks))
      return false;
  }
  return true;
}
static SmallSet<unsigned, 8> gatherFileIDs(StringRef SourceFile,
                                           const FunctionRecord &Function) {
  SmallSet<unsigned, 8> IDs;
  for (unsigned I = 0, E = Function.Filenames.size(); I < E; ++I)
    if (SourceFile == Function.Filenames[I])
      IDs.insert(I);
  return IDs;
}
/// HandlePhysRegUse - Turn previous partial def's into read/mod/writes. Add
/// implicit defs to a machine instruction if there was an earlier def of its
/// super-register.
void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
  // If there was a previous use or a "full" def all is well.
  if (!PhysRegDef[Reg] && !PhysRegUse[Reg]) {
    // Otherwise, the last sub-register def implicitly defines this register.
    // e.g.
    // AH =
    // AL = ... <imp-def EAX>, <imp-kill AH>
    //    = AH
    // ...
    //    = EAX
    // All of the sub-registers must have been defined before the use of Reg!
    unsigned PartDefReg = 0;
    MachineInstr *LastPartialDef = FindLastPartialDef(Reg, PartDefReg);
    // If LastPartialDef is NULL, it must be using a livein register.
    if (LastPartialDef) {
      LastPartialDef->addOperand(MachineOperand::CreateReg(Reg, true/*IsDef*/,
                                                           true/*IsImp*/));
      PhysRegDef[Reg] = LastPartialDef;
      SmallSet<unsigned, 8> Processed;
      for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
           unsigned SubReg = *SubRegs; ++SubRegs) {
        if (Processed.count(SubReg))
          continue;
        if (SubReg == PartDefReg || TRI->isSubRegister(PartDefReg, SubReg))
          continue;
        // This part of Reg was defined before the last partial def. It's killed
        // here.
        LastPartialDef->addOperand(MachineOperand::CreateReg(SubReg,
                                                             false/*IsDef*/,
                                                             true/*IsImp*/));
        PhysRegDef[SubReg] = LastPartialDef;
        for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
          Processed.insert(*SS);
      }
    }
  }

  // There was an earlier def of a super-register. Add implicit def to that MI.
  //
  //   A: EAX = ...
  //   B: ... = AX
  //
  // Add implicit def to A if there isn't a use of AX (or EAX) before B.
  if (!PhysRegUse[Reg]) {
    MachineInstr *Def = PhysRegDef[Reg];
    if (Def && !Def->modifiesRegister(Reg))
      Def->addOperand(MachineOperand::CreateReg(Reg,
                                                true  /*IsDef*/,
                                                true  /*IsImp*/));
  }
  
  // Remember this use.
  PhysRegUse[Reg]  = MI;
  for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
       unsigned SubReg = *SubRegs; ++SubRegs)
    PhysRegUse[SubReg] =  MI;
}
Beispiel #11
0
void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
  // Mark live-in registers as live-in.
  SmallVector<unsigned, 4> Defs;
  for (const auto &LI : MBB->liveins()) {
    assert(TargetRegisterInfo::isPhysicalRegister(LI.PhysReg) &&
           "Cannot have a live-in virtual register!");
    HandlePhysRegDef(LI.PhysReg, nullptr, Defs);
  }

  // Loop over all of the instructions, processing them.
  DistanceMap.clear();
  unsigned Dist = 0;
  for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
       I != E; ++I) {
    MachineInstr *MI = I;
    if (MI->isDebugValue())
      continue;
    DistanceMap.insert(std::make_pair(MI, Dist++));

    runOnInstr(MI, Defs);
  }

  // Handle any virtual assignments from PHI nodes which might be at the
  // bottom of this basic block.  We check all of our successor blocks to see
  // if they have PHI nodes, and if so, we simulate an assignment at the end
  // of the current block.
  if (!PHIVarInfo[MBB->getNumber()].empty()) {
    SmallVectorImpl<unsigned> &VarInfoVec = PHIVarInfo[MBB->getNumber()];

    for (SmallVectorImpl<unsigned>::iterator I = VarInfoVec.begin(),
           E = VarInfoVec.end(); I != E; ++I)
      // Mark it alive only in the block we are representing.
      MarkVirtRegAliveInBlock(getVarInfo(*I),MRI->getVRegDef(*I)->getParent(),
                              MBB);
  }

  // MachineCSE may CSE instructions which write to non-allocatable physical
  // registers across MBBs. Remember if any reserved register is liveout.
  SmallSet<unsigned, 4> LiveOuts;
  for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
         SE = MBB->succ_end(); SI != SE; ++SI) {
    MachineBasicBlock *SuccMBB = *SI;
    if (SuccMBB->isEHPad())
      continue;
    for (const auto &LI : SuccMBB->liveins()) {
      if (!TRI->isInAllocatableClass(LI.PhysReg))
        // Ignore other live-ins, e.g. those that are live into landing pads.
        LiveOuts.insert(LI.PhysReg);
    }
  }

  // Loop over PhysRegDef / PhysRegUse, killing any registers that are
  // available at the end of the basic block.
  for (unsigned i = 0; i != NumRegs; ++i)
    if ((PhysRegDef[i] || PhysRegUse[i]) && !LiveOuts.count(i))
      HandlePhysRegDef(i, nullptr, Defs);
}
Beispiel #12
0
//Insert Defs and Uses of MI into the sets RegDefs and RegUses.
void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
                            SmallSet<unsigned, 32>& RegDefs,
                            SmallSet<unsigned, 32>& RegUses)
{
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg())
      continue;

    unsigned Reg = MO.getReg();
    if (Reg == 0)
      continue;
    if (MO.isDef())
      RegDefs.insert(Reg);
    if (MO.isUse())
      RegUses.insert(Reg);

  }
}
Beispiel #13
0
void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
                                     SmallVectorImpl<unsigned> &Defs) {
  // What parts of the register are previously defined?
  SmallSet<unsigned, 32> Live;
  if (PhysRegDef[Reg] || PhysRegUse[Reg]) {
    for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
         SubRegs.isValid(); ++SubRegs)
      Live.insert(*SubRegs);
  } else {
    for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
      unsigned SubReg = *SubRegs;
      // If a register isn't itself defined, but all parts that make up of it
      // are defined, then consider it also defined.
      // e.g.
      // AL =
      // AH =
      //    = AX
      if (Live.count(SubReg))
        continue;
      if (PhysRegDef[SubReg] || PhysRegUse[SubReg]) {
        for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true);
             SS.isValid(); ++SS)
          Live.insert(*SS);
      }
    }
  }

  // Start from the largest piece, find the last time any part of the register
  // is referenced.
  HandlePhysRegKill(Reg, MI);
  // Only some of the sub-registers are used.
  for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
    unsigned SubReg = *SubRegs;
    if (!Live.count(SubReg))
      // Skip if this sub-register isn't defined.
      continue;
    HandlePhysRegKill(SubReg, MI);
  }

  if (MI)
    Defs.push_back(Reg);  // Remember this def.
}
Beispiel #14
0
void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
                                     SmallVector<unsigned, 4> &Defs) {
  // What parts of the register are previously defined?
  SmallSet<unsigned, 32> Live;
  if (PhysRegDef[Reg] || PhysRegUse[Reg]) {
    Live.insert(Reg);
    for (const uint16_t *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
      Live.insert(*SS);
  } else {
    for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
         unsigned SubReg = *SubRegs; ++SubRegs) {
      // If a register isn't itself defined, but all parts that make up of it
      // are defined, then consider it also defined.
      // e.g.
      // AL =
      // AH =
      //    = AX
      if (Live.count(SubReg))
        continue;
      if (PhysRegDef[SubReg] || PhysRegUse[SubReg]) {
        Live.insert(SubReg);
        for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
          Live.insert(*SS);
      }
    }
  }

  // Start from the largest piece, find the last time any part of the register
  // is referenced.
  HandlePhysRegKill(Reg, MI);
  // Only some of the sub-registers are used.
  for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
       unsigned SubReg = *SubRegs; ++SubRegs) {
    if (!Live.count(SubReg))
      // Skip if this sub-register isn't defined.
      continue;
    HandlePhysRegKill(SubReg, MI);
  }

  if (MI)
    Defs.push_back(Reg);  // Remember this def.
}
// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
void Filler::insertDefsUses(MachineBasicBlock::instr_iterator MI,
                            SmallSet<unsigned, 32> &RegDefs,
                            SmallSet<unsigned, 32> &RegUses) {
  // If MI is a call or return, just examine the explicit non-variadic operands.
  MCInstrDesc MCID = MI->getDesc();
  unsigned E = MI->isCall() || MI->isReturn() ? MCID.getNumOperands()
                                              : MI->getNumOperands();
  for (unsigned I = 0; I != E; ++I) {
    const MachineOperand &MO = MI->getOperand(I);
    unsigned Reg;

    if (!MO.isReg() || !(Reg = MO.getReg()))
      continue;

    if (MO.isDef())
      RegDefs.insert(Reg);
    else if (MO.isUse())
      RegUses.insert(Reg);
  }
}
Beispiel #16
0
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
/// in RHS into this live interval as the specified value number.
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
/// current interval, it will replace the value numbers of the overlaped
/// live ranges with the specified value number.
void LiveInterval::MergeValueInAsValue(
                                    const LiveInterval &RHS,
                                    const VNInfo *RHSValNo, VNInfo *LHSValNo) {
  SmallVector<VNInfo*, 4> ReplacedValNos;
  iterator IP = begin();
  for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
    assert(I->valno == RHS.getValNumInfo(I->valno->id) && "Bad VNInfo");
    if (I->valno != RHSValNo)
      continue;
    SlotIndex Start = I->start, End = I->end;
    IP = std::upper_bound(IP, end(), Start);
    // If the start of this range overlaps with an existing liverange, trim it.
    if (IP != begin() && IP[-1].end > Start) {
      if (IP[-1].valno != LHSValNo) {
        ReplacedValNos.push_back(IP[-1].valno);
        IP[-1].valno = LHSValNo; // Update val#.
      }
      Start = IP[-1].end;
      // Trimmed away the whole range?
      if (Start >= End) continue;
    }
    // If the end of this range overlaps with an existing liverange, trim it.
    if (IP != end() && End > IP->start) {
      if (IP->valno != LHSValNo) {
        ReplacedValNos.push_back(IP->valno);
        IP->valno = LHSValNo;  // Update val#.
      }
      End = IP->start;
      // If this trimmed away the whole range, ignore it.
      if (Start == End) continue;
    }

    // Map the valno in the other live range to the current live range.
    IP = addRangeFrom(LiveRange(Start, End, LHSValNo), IP);
  }


  SmallSet<VNInfo*, 4> Seen;
  for (unsigned i = 0, e = ReplacedValNos.size(); i != e; ++i) {
    VNInfo *V1 = ReplacedValNos[i];
    if (Seen.insert(V1)) {
      bool isDead = true;
      for (const_iterator I = begin(), E = end(); I != E; ++I)
        if (I->valno == V1) {
          isDead = false;
          break;
        }
      if (isDead) {
        // Now that V1 is dead, remove it.
        markValNoForDeletion(V1);
      }
    }
  }
}
Beispiel #17
0
bool CodeCoverageTool::gatherInterestingFileIDs(
    StringRef SourceFile, const FunctionCoverageMapping &Function,
    SmallSet<unsigned, 8> &InterestingFileIDs) {
  bool Interesting = false;
  for (unsigned I = 0, E = Function.Filenames.size(); I < E; ++I) {
    if (equivalentFiles(SourceFile, Function.Filenames[I])) {
      InterestingFileIDs.insert(I);
      Interesting = true;
    }
  }
  return Interesting;
}
/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
/// those required to be close to the Stack Protector) to stack offsets.
void LocalStackSlotPass::AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
                                           SmallSet<int, 16> &ProtectedObjs,
                                           MachineFrameInfo &MFI,
                                           bool StackGrowsDown, int64_t &Offset,
                                           unsigned &MaxAlign) {
  for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
        E = UnassignedObjs.end(); I != E; ++I) {
    int i = *I;
    AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
    ProtectedObjs.insert(i);
  }
}
Beispiel #19
0
static bool jl_can_finalize_function(StringRef F, SmallSet<Module*, 16> &known)
{
    if (incomplete_fname.find(F) != incomplete_fname.end())
        return false;
    Module *M = module_for_fname.lookup(F);
#if JL_LLVM_VERSION >= 30500
    if (M && known.insert(M).second)
#else
    if (M && known.insert(M))
#endif
    {
        for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) {
            Function *F = &*I;
            if (F->isDeclaration() && !isIntrinsicFunction(F)) {
                if (!jl_can_finalize_function(F->getName(), known))
                    return false;
            }
        }
    }
    return true;
}
Beispiel #20
0
bool CodeCoverageTool::createSourceFileView(
    StringRef SourceFile, SourceCoverageView &View,
    ArrayRef<FunctionCoverageMapping> FunctionMappingRecords,
    bool UseOnlyRegionsInMainFile) {
  auto RegionManager = llvm::make_unique<SourceCoverageDataManager>();
  FunctionInstantiationSetCollector InstantiationSetCollector;

  for (const auto &Function : FunctionMappingRecords) {
    unsigned MainFileID;
    if (findMainViewFileID(SourceFile, Function, MainFileID))
      continue;
    SmallSet<unsigned, 8> InterestingFileIDs;
    if (UseOnlyRegionsInMainFile) {
      InterestingFileIDs.insert(MainFileID);
    } else if (!gatherInterestingFileIDs(SourceFile, Function,
                                         InterestingFileIDs))
      continue;
    // Get the interesting regions
    for (const auto &CR : Function.CountedRegions) {
      if (InterestingFileIDs.count(CR.FileID))
        RegionManager->insert(CR);
    }
    InstantiationSetCollector.insert(Function, MainFileID);
    createExpansionSubViews(View, MainFileID, Function);
  }
  if (RegionManager->getCoverageSegments().empty())
    return true;
  View.load(std::move(RegionManager));
  // Show instantiations
  if (!ViewOpts.ShowFunctionInstantiations)
    return false;
  for (const auto &InstantiationSet : InstantiationSetCollector) {
    if (InstantiationSet.second.size() < 2)
      continue;
    for (auto Function : InstantiationSet.second) {
      unsigned FileID = Function->CountedRegions.front().FileID;
      unsigned Line = 0;
      for (const auto &CR : Function->CountedRegions)
        if (CR.FileID == FileID)
          Line = std::max(CR.LineEnd, Line);
      auto SourceBuffer = getSourceFile(Function->Filenames[FileID]);
      if (!SourceBuffer)
        continue;
      auto SubView = llvm::make_unique<SourceCoverageView>(SourceBuffer.get(),
                                                           View.getOptions());
      createInstantiationSubView(SourceFile, *Function, *SubView);
      View.addInstantiation(Function->Name, Line, std::move(SubView));
    }
  }
  return false;
}
Beispiel #21
0
/// TrackDefUses - Tracking what registers are being defined and used by
/// instructions in the IT block. This also tracks "dependencies", i.e. uses
/// in the IT block that are defined before the IT instruction.
static void TrackDefUses(MachineInstr *MI,
                         SmallSet<unsigned, 4> &Defs,
                         SmallSet<unsigned, 4> &Uses,
                         const TargetRegisterInfo *TRI) {
  SmallVector<unsigned, 4> LocalDefs;
  SmallVector<unsigned, 4> LocalUses;

  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg())
      continue;
    unsigned Reg = MO.getReg();
    if (!Reg || Reg == ARM::ITSTATE || Reg == ARM::SP)
      continue;
    if (MO.isUse())
      LocalUses.push_back(Reg);
    else
      LocalDefs.push_back(Reg);
  }

  for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
    unsigned Reg = LocalUses[i];
    Uses.insert(Reg);
    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
         *Subreg; ++Subreg)
      Uses.insert(*Subreg);
  }

  for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
    unsigned Reg = LocalDefs[i];
    Defs.insert(Reg);
    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
         *Subreg; ++Subreg)
      Defs.insert(*Subreg);
    if (Reg == ARM::CPSR)
      continue;
  }
}
Beispiel #22
0
// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
                            SmallSet<unsigned, 32>& RegDefs,
                            SmallSet<unsigned, 32>& RegUses)
{
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg())
      continue;

    unsigned Reg = MO.getReg();
    if (Reg == 0)
      continue;
    if (MO.isDef())
      RegDefs.insert(Reg);
    if (MO.isUse()) {
      // Implicit register uses of retl are return values and
      // retl does not use them.
      if (MO.isImplicit() && MI->getOpcode() == SP::RETL)
        continue;
      RegUses.insert(Reg);
    }
  }
}
Beispiel #23
0
/// HandlePhysRegUse - Turn previous partial def's into read/mod/writes. Add
/// implicit defs to a machine instruction if there was an earlier def of its
/// super-register.
void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
  MachineInstr *LastDef = PhysRegDef[Reg];
  // If there was a previous use or a "full" def all is well.
  if (!LastDef && !PhysRegUse[Reg]) {
    // Otherwise, the last sub-register def implicitly defines this register.
    // e.g.
    // AH =
    // AL = ... <imp-def EAX>, <imp-kill AH>
    //    = AH
    // ...
    //    = EAX
    // All of the sub-registers must have been defined before the use of Reg!
    SmallSet<unsigned, 4> PartDefRegs;
    MachineInstr *LastPartialDef = FindLastPartialDef(Reg, PartDefRegs);
    // If LastPartialDef is NULL, it must be using a livein register.
    if (LastPartialDef) {
      LastPartialDef->addOperand(MachineOperand::CreateReg(Reg, true/*IsDef*/,
                                                           true/*IsImp*/));
      PhysRegDef[Reg] = LastPartialDef;
      SmallSet<unsigned, 8> Processed;
      for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
           unsigned SubReg = *SubRegs; ++SubRegs) {
        if (Processed.count(SubReg))
          continue;
        if (PartDefRegs.count(SubReg))
          continue;
        // This part of Reg was defined before the last partial def. It's killed
        // here.
        LastPartialDef->addOperand(MachineOperand::CreateReg(SubReg,
                                                             false/*IsDef*/,
                                                             true/*IsImp*/));
        PhysRegDef[SubReg] = LastPartialDef;
        for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
          Processed.insert(*SS);
      }
    }
  }
  else if (LastDef && !PhysRegUse[Reg] &&
           !LastDef->findRegisterDefOperand(Reg))
    // Last def defines the super register, add an implicit def of reg.
    LastDef->addOperand(MachineOperand::CreateReg(Reg,
                                                 true/*IsDef*/, true/*IsImp*/));

  // Remember this use.
  PhysRegUse[Reg]  = MI;
  for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
       unsigned SubReg = *SubRegs; ++SubRegs)
    PhysRegUse[SubReg] =  MI;
}
Beispiel #24
0
void Filler::insertCallUses(MachineBasicBlock::iterator MI,
                            SmallSet<unsigned, 32>& RegUses)
{

  switch(MI->getOpcode()) {
  default: llvm_unreachable("Unknown opcode.");
  case SP::CALL: break;
  case SP::JMPLrr:
  case SP::JMPLri:
    assert(MI->getNumOperands() >= 2);
    const MachineOperand &Reg = MI->getOperand(0);
    assert(Reg.isReg() && "JMPL first operand is not a register.");
    assert(Reg.isUse() && "JMPL first operand is not a use.");
    RegUses.insert(Reg.getReg());

    const MachineOperand &RegOrImm = MI->getOperand(1);
    if (RegOrImm.isImm())
        break;
    assert(RegOrImm.isReg() && "JMPLrr second operand is not a register.");
    assert(RegOrImm.isUse() && "JMPLrr second operand is not a use.");
    RegUses.insert(RegOrImm.getReg());
    break;
  }
}
Beispiel #25
0
void InlineAttempt::postCommitOptimise() {

  if(SkipPostCommit)
    return;

  if(CommitF) {
    
    PCOFunctionCB CB;
    postCommitOptimiseBlocks(CommitF->begin(), CommitF->end(), CB, firstFailedBlock);

    // Now top-sort the blocks, excluding the failed blocks by annotating them 'visited' to start with.
    {

      SmallSet<BasicBlock*, 8> Visited;
      for(Function::iterator it = firstFailedBlock, itend = CommitF->end(); it != itend; ++it)
	Visited.insert(it);

      BasicBlock* firstBlock = &CommitF->getEntryBlock();
      std::vector<BasicBlock*> Ordered;
      createTopOrderingFrom(firstBlock, Ordered, Visited, 0, 0);
      std::reverse(Ordered.begin(), Ordered.end());

      Function::BasicBlockListType& BBL = CommitF->getBasicBlockList();

      Function::iterator fit = CommitF->begin();
      for(std::vector<BasicBlock*>::iterator it = Ordered.begin(), itend = Ordered.end(); it != itend; ++it) {

	// Splice this block before fit, fit moves forwards and continue,
	// or else fit is already in the right place, leave it and insert before its successor.

	if(fit == CommitF->end() || (&*fit) != (*it))
	  BBL.splice(fit, BBL, *it);
	else
	  ++fit;

      }

    }

  }
  else {

    //    PCOBBsCB CB(this);
    //    postCommitOptimiseBlocks(DerefAdaptor(CommitBlocks.begin()), DerefAdaptor(CommitBlocks.end()), CB);

  }
   
}
/// CheckForLiveRegDef - Return true and update live register vector if the
/// specified register def of the specified SUnit clobbers any "live" registers.
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
                               std::vector<SUnit*> &LiveRegDefs,
                               SmallSet<unsigned, 4> &RegAdded,
                               SmallVectorImpl<unsigned> &LRegs,
                               const TargetRegisterInfo *TRI) {
  bool Added = false;
  for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
    if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
      if (RegAdded.insert(*AI).second) {
        LRegs.push_back(*AI);
        Added = true;
      }
    }
  }
  return Added;
}
bool CodeCoverageTool::createSourceFileView(
    StringRef SourceFile, SourceCoverageView &View,
    ArrayRef<FunctionCoverageMapping> FunctionMappingRecords,
    bool UseOnlyRegionsInMainFile) {
  SourceCoverageDataManager RegionManager;
  FunctionInstantiationSetCollector InstantiationSetCollector;

  for (const auto &Function : FunctionMappingRecords) {
    unsigned MainFileID;
    if (findMainViewFileID(SourceFile, Function, MainFileID))
      continue;
    SmallSet<unsigned, 8> InterestingFileIDs;
    if (UseOnlyRegionsInMainFile) {
      InterestingFileIDs.insert(MainFileID);
    } else if (!gatherInterestingFileIDs(SourceFile, Function,
                                         InterestingFileIDs))
      continue;
    // Get the interesting regions
    for (const auto &Region : Function.MappingRegions) {
      if (InterestingFileIDs.count(Region.FileID))
        RegionManager.insert(Region);
    }
    InstantiationSetCollector.insert(Function, MainFileID);
    createExpansionSubViews(View, MainFileID, Function);
  }
  if (RegionManager.getSourceRegions().empty())
    return true;
  View.load(RegionManager);
  // Show instantiations
  if (!ViewOpts.ShowFunctionInstantiations)
    return false;
  for (const auto &InstantiationSet : InstantiationSetCollector) {
    if (InstantiationSet.second.size() < 2)
      continue;
    auto InterestingRange = findExpandedFileInterestingLineRange(
        InstantiationSet.second.front()->MappingRegions.front().FileID,
        *InstantiationSet.second.front());
    for (auto Function : InstantiationSet.second) {
      auto SubView = llvm::make_unique<SourceCoverageView>(
          View, InterestingRange.first, InterestingRange.second,
          Function->PrettyName);
      createInstantiationSubView(SourceFile, *Function, *SubView);
      View.addChild(std::move(SubView));
    }
  }
  return false;
}
Beispiel #28
0
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
  // Loop over all of the stack objects, assigning sequential addresses...
  MachineFrameInfo *MFI = Fn.getFrameInfo();
  const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo();
  bool StackGrowsDown =
    TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
  int64_t Offset = 0;
  unsigned MaxAlign = 0;

  // Make sure that the stack protector comes before the local variables on the
  // stack.
  SmallSet<int, 16> LargeStackObjs;
  if (MFI->getStackProtectorIndex() >= 0) {
    AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
                      StackGrowsDown, MaxAlign);

    // Assign large stack objects first.
    for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
      if (MFI->isDeadObjectIndex(i))
        continue;
      if (MFI->getStackProtectorIndex() == (int)i)
        continue;
      if (!MFI->MayNeedStackProtector(i))
        continue;

      AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
      LargeStackObjs.insert(i);
    }
  }

  // Then assign frame offsets to stack objects that are not used to spill
  // callee saved registers.
  for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
    if (MFI->isDeadObjectIndex(i))
      continue;
    if (MFI->getStackProtectorIndex() == (int)i)
      continue;
    if (LargeStackObjs.count(i))
      continue;

    AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
  }

  // Remember how big this blob of stack space is
  MFI->setLocalFrameSize(Offset);
  MFI->setLocalFrameMaxAlign(MaxAlign);
}
Beispiel #29
0
static bool
allPredsCameFromLandingPad(BasicBlock *BB,
                           SmallSet<BasicBlock *, 4> &VisitedBlocks) {
  VisitedBlocks.insert(BB);
  if (BB->isLandingPad())
    return true;
  // If we find a block with no predecessors, the search failed.
  if (pred_empty(BB))
    return false;
  for (BasicBlock *Pred : predecessors(BB)) {
    if (VisitedBlocks.count(Pred))
      continue;
    if (!allPredsCameFromLandingPad(Pred, VisitedBlocks))
      return false;
  }
  return true;
}
bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
                                        SmallSet<unsigned, 4> &ImmDefRegs,
                                 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
  const MCInstrDesc &MCID = MI->getDesc();
  if (!MI->isMoveImmediate())
    return false;
  if (MCID.getNumDefs() != 1)
    return false;
  unsigned Reg = MI->getOperand(0).getReg();
  if (TargetRegisterInfo::isVirtualRegister(Reg)) {
    ImmDefMIs.insert(std::make_pair(Reg, MI));
    ImmDefRegs.insert(Reg);
    return true;
  }

  return false;
}