Ejemplo n.º 1
0
bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
  Value *AAPtr;
  const SCEV *AlignSCEV, *OffSCEV;
  if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV))
    return false;

  const SCEV *AASCEV = SE->getSCEV(AAPtr);

  // Apply the assumption to all other users of the specified pointer.
  SmallPtrSet<Instruction *, 32> Visited;
  SmallVector<Instruction*, 16> WorkList;
  for (User *J : AAPtr->users()) {
    if (J == ACall)
      continue;

    if (Instruction *K = dyn_cast<Instruction>(J))
      if (isValidAssumeForContext(ACall, K, DT))
        WorkList.push_back(K);
  }

  while (!WorkList.empty()) {
    Instruction *J = WorkList.pop_back_val();

    if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
      unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
        LI->getPointerOperand(), SE);

      if (NewAlignment > LI->getAlignment()) {
        LI->setAlignment(NewAlignment);
        ++NumLoadAlignChanged;
      }
    } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
      unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
        SI->getPointerOperand(), SE);

      if (NewAlignment > SI->getAlignment()) {
        SI->setAlignment(NewAlignment);
        ++NumStoreAlignChanged;
      }
    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
      unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
        MI->getDest(), SE);

      // For memory transfers, we need a common alignment for both the
      // source and destination. If we have a new alignment for this
      // instruction, but only for one operand, save it. If we reach the
      // other operand through another assumption later, then we may
      // change the alignment at that point.
      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
        unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
          MTI->getSource(), SE);

        DenseMap<MemTransferInst *, unsigned>::iterator DI =
          NewDestAlignments.find(MTI);
        unsigned AltDestAlignment = (DI == NewDestAlignments.end()) ?
                                    0 : DI->second;

        DenseMap<MemTransferInst *, unsigned>::iterator SI =
          NewSrcAlignments.find(MTI);
        unsigned AltSrcAlignment = (SI == NewSrcAlignments.end()) ?
                                   0 : SI->second;

        DEBUG(dbgs() << "\tmem trans: " << NewDestAlignment << " " <<
                        AltDestAlignment << " " << NewSrcAlignment <<
                        " " << AltSrcAlignment << "\n");

        // Of these four alignments, pick the largest possible...
        unsigned NewAlignment = 0;
        if (NewDestAlignment <= std::max(NewSrcAlignment, AltSrcAlignment))
          NewAlignment = std::max(NewAlignment, NewDestAlignment);
        if (AltDestAlignment <= std::max(NewSrcAlignment, AltSrcAlignment))
          NewAlignment = std::max(NewAlignment, AltDestAlignment);
        if (NewSrcAlignment <= std::max(NewDestAlignment, AltDestAlignment))
          NewAlignment = std::max(NewAlignment, NewSrcAlignment);
        if (AltSrcAlignment <= std::max(NewDestAlignment, AltDestAlignment))
          NewAlignment = std::max(NewAlignment, AltSrcAlignment);

        if (NewAlignment > MI->getAlignment()) {
          MI->setAlignment(ConstantInt::get(Type::getInt32Ty(
            MI->getParent()->getContext()), NewAlignment));
          ++NumMemIntAlignChanged;
        }

        NewDestAlignments.insert(std::make_pair(MTI, NewDestAlignment));
        NewSrcAlignments.insert(std::make_pair(MTI, NewSrcAlignment));
      } else if (NewDestAlignment > MI->getAlignment()) {
        assert((!isa<MemIntrinsic>(MI) || isa<MemSetInst>(MI)) &&
               "Unknown memory intrinsic");

        MI->setAlignment(ConstantInt::get(Type::getInt32Ty(
          MI->getParent()->getContext()), NewDestAlignment));
        ++NumMemIntAlignChanged;
      }
    }

    // Now that we've updated that use of the pointer, look for other uses of
    // the pointer to update.
    Visited.insert(J);
    for (User *UJ : J->users()) {
      Instruction *K = cast<Instruction>(UJ);
      if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
        WorkList.push_back(K);
    }
  }

  return true;
}
Ejemplo n.º 2
0
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
  // Splitting the critical edge to a landing pad block is non-trivial. Don't do
  // it in this generic function.
  if (Succ->isLandingPad())
    return NULL;

  MachineFunction *MF = getParent();
  DebugLoc dl;  // FIXME: this is nowhere

  // We may need to update this's terminator, but we can't do that if
  // AnalyzeBranch fails. If this uses a jump table, we won't touch it.
  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
  MachineBasicBlock *TBB = 0, *FBB = 0;
  SmallVector<MachineOperand, 4> Cond;
  if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
    return NULL;

  // Avoid bugpoint weirdness: A block may end with a conditional branch but
  // jumps to the same MBB is either case. We have duplicate CFG edges in that
  // case that we can't handle. Since this never happens in properly optimized
  // code, just skip those edges.
  if (TBB && TBB == FBB) {
    DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
                 << getNumber() << '\n');
    return NULL;
  }

  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
  MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
  DEBUG(dbgs() << "Splitting critical edge:"
        " BB#" << getNumber()
        << " -- BB#" << NMBB->getNumber()
        << " -- BB#" << Succ->getNumber() << '\n');

  // On some targets like Mips, branches may kill virtual registers. Make sure
  // that LiveVariables is properly updated after updateTerminator replaces the
  // terminators.
  LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();

  // Collect a list of virtual registers killed by the terminators.
  SmallVector<unsigned, 4> KilledRegs;
  if (LV)
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;
      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0 ||
            !OI->isUse() || !OI->isKill() || OI->isUndef())
          continue;
        unsigned Reg = OI->getReg();
        if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
            LV->getVarInfo(Reg).removeKill(MI)) {
          KilledRegs.push_back(Reg);
          DEBUG(dbgs() << "Removing terminator kill: " << *MI);
          OI->setIsKill(false);
        }
      }
    }

  ReplaceUsesOfBlockWith(Succ, NMBB);
  updateTerminator();

  // Insert unconditional "jump Succ" instruction in NMBB if necessary.
  NMBB->addSuccessor(Succ);
  if (!NMBB->isLayoutSuccessor(Succ)) {
    Cond.clear();
    MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
  }

  // Fix PHI nodes in Succ so they refer to NMBB instead of this
  for (MachineBasicBlock::instr_iterator
         i = Succ->instr_begin(),e = Succ->instr_end();
       i != e && i->isPHI(); ++i)
    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
      if (i->getOperand(ni+1).getMBB() == this)
        i->getOperand(ni+1).setMBB(NMBB);

  // Inherit live-ins from the successor
  for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
         E = Succ->livein_end(); I != E; ++I)
    NMBB->addLiveIn(*I);

  // Update LiveVariables.
  const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
  if (LV) {
    // Restore kills of virtual registers that were killed by the terminators.
    while (!KilledRegs.empty()) {
      unsigned Reg = KilledRegs.pop_back_val();
      for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
        if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
          continue;
        if (TargetRegisterInfo::isVirtualRegister(Reg))
          LV->getVarInfo(Reg).Kills.push_back(I);
        DEBUG(dbgs() << "Restored terminator kill: " << *I);
        break;
      }
    }
    // Update relevant live-through information.
    LV->addNewBlock(NMBB, this, Succ);
  }

  if (MachineDominatorTree *MDT =
      P->getAnalysisIfAvailable<MachineDominatorTree>()) {
    // Update dominator information.
    MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);

    bool IsNewIDom = true;
    for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
         PI != E; ++PI) {
      MachineBasicBlock *PredBB = *PI;
      if (PredBB == NMBB)
        continue;
      if (!MDT->dominates(SucccDTNode, MDT->getNode(PredBB))) {
        IsNewIDom = false;
        break;
      }
    }

    // We know "this" dominates the newly created basic block.
    MachineDomTreeNode *NewDTNode = MDT->addNewBlock(NMBB, this);

    // If all the other predecessors of "Succ" are dominated by "Succ" itself
    // then the new block is the new immediate dominator of "Succ". Otherwise,
    // the new block doesn't dominate anything.
    if (IsNewIDom)
      MDT->changeImmediateDominator(SucccDTNode, NewDTNode);
  }

  if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
    if (MachineLoop *TIL = MLI->getLoopFor(this)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NMBB joins loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == Succ &&
                 "Should not create irreducible loops!");
          if (MachineLoop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NMBB, MLI->getBase());
        }
      }
    }

  return NMBB;
}
Ejemplo n.º 3
0
/// Try to determine the exact dynamic type of an object.
/// returns the exact dynamic type of the object, or an empty type if the exact
/// type could not be determined.
SILType swift::getExactDynamicType(SILValue S, SILModule &M,
                                   ClassHierarchyAnalysis *CHA,
                                   bool ForUnderlyingObject) {
  // Set of values to be checked for their exact types.
  SmallVector<SILValue, 8> WorkList;
  // The detected type of the underlying object.
  SILType ResultType;
  // Set of processed values.
  llvm::SmallSet<SILValue, 8> Processed;
  WorkList.push_back(S);

  while (!WorkList.empty()) {
    auto V = WorkList.pop_back_val();
    if (!V)
      return SILType();
    if (Processed.count(V))
      continue;
    Processed.insert(V);
    // For underlying object strip casts and projections.
    // For the object itself, simply strip casts.
    V = ForUnderlyingObject ? getUnderlyingObject(V) : stripCasts(V);

    if (isa<AllocRefInst>(V) || isa<MetatypeInst>(V)) {
      if (ResultType && ResultType != V->getType())
        return SILType();
      ResultType = V->getType();
      continue;
    }

    if (isa<LiteralInst>(V)) {
      if (ResultType && ResultType != V->getType())
        return SILType();
      ResultType = V->getType();
      continue;
    }

    if (isa<StructInst>(V) || isa<TupleInst>(V) || isa<EnumInst>(V)) {
      if (ResultType && ResultType != V->getType())
        return SILType();
      ResultType = V->getType();
      continue;
    }

    if (ForUnderlyingObject) {
      if (isa<AllocationInst>(V)) {
        if (ResultType && ResultType != V->getType())
          return SILType();
        ResultType = V->getType();
        continue;
      }
      // Look through strong_pin instructions.
      if (isa<StrongPinInst>(V)) {
        WorkList.push_back(cast<SILInstruction>(V)->getOperand(0));
        continue;
      }
    }

    auto Arg = dyn_cast<SILArgument>(V);
    if (!Arg) {
      // We don't know what it is.
      return SILType();
    }

    if (auto *FArg = dyn_cast<SILFunctionArgument>(Arg)) {
      // Bail on metatypes for now.
      if (FArg->getType().is<AnyMetatypeType>()) {
        return SILType();
      }
      auto *CD = FArg->getType().getClassOrBoundGenericClass();
      // If it is not class and it is a trivial type, then it
      // should be the exact type.
      if (!CD && FArg->getType().isTrivial(M)) {
        if (ResultType && ResultType != FArg->getType())
          return SILType();
        ResultType = FArg->getType();
        continue;
      }

      if (!CD) {
        // It is not a class or a trivial type, so we don't know what it is.
        return SILType();
      }

      // Check if this class is effectively final.
      if (!isKnownFinalClass(CD, M, CHA)) {
        return SILType();
      }

      if (ResultType && ResultType != FArg->getType())
        return SILType();
      ResultType = FArg->getType();
      continue;
    }

    auto *SinglePred = Arg->getParent()->getSinglePredecessorBlock();
    if (SinglePred) {
      // If it is a BB argument received on a success branch
      // of a checked_cast_br, then we know its exact type.
      auto *CCBI = dyn_cast<CheckedCastBranchInst>(SinglePred->getTerminator());
      if (CCBI && CCBI->isExact() && CCBI->getSuccessBB() == Arg->getParent()) {
        if (ResultType && ResultType != Arg->getType())
          return SILType();
        ResultType = Arg->getType();
        continue;
      }
    }

    // It is a BB argument, look through incoming values. If they all have the
    // same exact type, then we consider it to be the type of the BB argument.
    SmallVector<SILValue, 4> IncomingValues;

    if (Arg->getIncomingValues(IncomingValues)) {
      for (auto InValue : IncomingValues) {
        WorkList.push_back(InValue);
      }
      continue;
    }

    // The exact type is unknown.
    return SILType();
  }

  return ResultType;
}
Ejemplo n.º 4
0
static bool markAliveBlocks(BasicBlock *BB,
                            SmallPtrSet<BasicBlock*, 128> &Reachable) {

  SmallVector<BasicBlock*, 128> Worklist;
  Worklist.push_back(BB);
  bool Changed = false;
  do {
    BB = Worklist.pop_back_val();

    if (!Reachable.insert(BB))
      continue;

    // Do a quick scan of the basic block, turning any obviously unreachable
    // instructions into LLVM unreachable insts.  The instruction combining pass
    // canonicalizes unreachable insts into stores to null or undef.
    for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;++BBI){
      if (CallInst *CI = dyn_cast<CallInst>(BBI)) {
        if (CI->doesNotReturn()) {
          // If we found a call to a no-return function, insert an unreachable
          // instruction after it.  Make sure there isn't *already* one there
          // though.
          ++BBI;
          if (!isa<UnreachableInst>(BBI)) {
            // Don't insert a call to llvm.trap right before the unreachable.
            changeToUnreachable(BBI, false);
            Changed = true;
          }
          break;
        }
      }

      // Store to undef and store to null are undefined and used to signal that
      // they should be changed to unreachable by passes that can't modify the
      // CFG.
      if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
        // Don't touch volatile stores.
        if (SI->isVolatile()) continue;

        Value *Ptr = SI->getOperand(1);

        if (isa<UndefValue>(Ptr) ||
            (isa<ConstantPointerNull>(Ptr) &&
             SI->getPointerAddressSpace() == 0)) {
          changeToUnreachable(SI, true);
          Changed = true;
          break;
        }
      }
    }

    // Turn invokes that call 'nounwind' functions into ordinary calls.
    if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Value *Callee = II->getCalledValue();
      if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
        changeToUnreachable(II, true);
        Changed = true;
      } else if (II->doesNotThrow()) {
        if (II->use_empty() && II->onlyReadsMemory()) {
          // jump to the normal destination branch.
          BranchInst::Create(II->getNormalDest(), II);
          II->getUnwindDest()->removePredecessor(II->getParent());
          II->eraseFromParent();
        } else
          changeToCall(II);
        Changed = true;
      }
    }

    Changed |= ConstantFoldTerminator(BB, true);
    for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
      Worklist.push_back(*SI);
  } while (!Worklist.empty());
  return Changed;
}
Ejemplo n.º 5
0
/// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
static Attribute::AttrKind
determinePointerReadAttrs(Argument *A,
                          const SmallPtrSet<Argument *, 8> &SCCNodes) {

  SmallVector<Use *, 32> Worklist;
  SmallSet<Use *, 32> Visited;

  // inalloca arguments are always clobbered by the call.
  if (A->hasInAllocaAttr())
    return Attribute::None;

  bool IsRead = false;
  // We don't need to track IsWritten. If A is written to, return immediately.

  for (Use &U : A->uses()) {
    Visited.insert(&U);
    Worklist.push_back(&U);
  }

  while (!Worklist.empty()) {
    Use *U = Worklist.pop_back_val();
    Instruction *I = cast<Instruction>(U->getUser());

    switch (I->getOpcode()) {
    case Instruction::BitCast:
    case Instruction::GetElementPtr:
    case Instruction::PHI:
    case Instruction::Select:
    case Instruction::AddrSpaceCast:
      // The original value is not read/written via this if the new value isn't.
      for (Use &UU : I->uses())
        if (Visited.insert(&UU).second)
          Worklist.push_back(&UU);
      break;

    case Instruction::Call:
    case Instruction::Invoke: {
      bool Captures = true;

      if (I->getType()->isVoidTy())
        Captures = false;

      auto AddUsersToWorklistIfCapturing = [&] {
        if (Captures)
          for (Use &UU : I->uses())
            if (Visited.insert(&UU).second)
              Worklist.push_back(&UU);
      };

      CallSite CS(I);
      if (CS.doesNotAccessMemory()) {
        AddUsersToWorklistIfCapturing();
        continue;
      }

      Function *F = CS.getCalledFunction();
      if (!F) {
        if (CS.onlyReadsMemory()) {
          IsRead = true;
          AddUsersToWorklistIfCapturing();
          continue;
        }
        return Attribute::None;
      }

      // Note: the callee and the two successor blocks *follow* the argument
      // operands.  This means there is no need to adjust UseIndex to account
      // for these.

      unsigned UseIndex = std::distance(CS.arg_begin(), U);

      // U cannot be the callee operand use: since we're exploring the
      // transitive uses of an Argument, having such a use be a callee would
      // imply the CallSite is an indirect call or invoke; and we'd take the
      // early exit above.
      assert(UseIndex < CS.data_operands_size() &&
             "Data operand use expected!");

      bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands();

      if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
        assert(F->isVarArg() && "More params than args in non-varargs call");
        return Attribute::None;
      }

      Captures &= !CS.doesNotCapture(UseIndex);

      // Since the optimizer (by design) cannot see the data flow corresponding
      // to a operand bundle use, these cannot participate in the optimistic SCC
      // analysis.  Instead, we model the operand bundle uses as arguments in
      // call to a function external to the SCC.
      if (!SCCNodes.count(&*std::next(F->arg_begin(), UseIndex)) ||
          IsOperandBundleUse) {

        // The accessors used on CallSite here do the right thing for calls and
        // invokes with operand bundles.

        if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex))
          return Attribute::None;
        if (!CS.doesNotAccessMemory(UseIndex))
          IsRead = true;
      }

      AddUsersToWorklistIfCapturing();
      break;
    }

    case Instruction::Load:
      IsRead = true;
      break;

    case Instruction::ICmp:
    case Instruction::Ret:
      break;

    default:
      return Attribute::None;
    }
  }

  return IsRead ? Attribute::ReadOnly : Attribute::ReadNone;
}
Ejemplo n.º 6
0
// A soft instruction can be changed to work in other domains given by mask.
void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
    // Bitmask of available domains for this instruction after taking collapsed
    // operands into account.
    unsigned available = mask;

    // Scan the explicit use operands for incoming domains.
    SmallVector<int, 4> used;
    if (LiveRegs)
        for (unsigned i = mi->getDesc().getNumDefs(),
                e = mi->getDesc().getNumOperands(); i != e; ++i) {
            MachineOperand &mo = mi->getOperand(i);
            if (!mo.isReg()) continue;
            for (int rx : regIndizes(mo.getReg())) {
                DomainValue *dv = LiveRegs[rx].Value;
                if (dv == nullptr)
                    continue;
                // Bitmask of domains that dv and available have in common.
                unsigned common = dv->getCommonDomains(available);
                // Is it possible to use this collapsed register for free?
                if (dv->isCollapsed()) {
                    // Restrict available domains to the ones in common with the operand.
                    // If there are no common domains, we must pay the cross-domain
                    // penalty for this operand.
                    if (common) available = common;
                } else if (common)
                    // Open DomainValue is compatible, save it for merging.
                    used.push_back(rx);
                else
                    // Open DomainValue is not compatible with instruction. It is useless
                    // now.
                    kill(rx);
            }
        }

    // If the collapsed operands force a single domain, propagate the collapse.
    if (isPowerOf2_32(available)) {
        unsigned domain = countTrailingZeros(available);
        TII->setExecutionDomain(mi, domain);
        visitHardInstr(mi, domain);
        return;
    }

    // Kill off any remaining uses that don't match available, and build a list of
    // incoming DomainValues that we want to merge.
    SmallVector<LiveReg, 4> Regs;
    for (SmallVectorImpl<int>::iterator i=used.begin(), e=used.end(); i!=e; ++i) {
        int rx = *i;
        assert(LiveRegs && "no space allocated for live registers");
        const LiveReg &LR = LiveRegs[rx];
        // This useless DomainValue could have been missed above.
        if (!LR.Value->getCommonDomains(available)) {
            kill(rx);
            continue;
        }
        // Sorted insertion.
        bool Inserted = false;
        for (SmallVectorImpl<LiveReg>::iterator i = Regs.begin(), e = Regs.end();
                i != e && !Inserted; ++i) {
            if (LR.Def < i->Def) {
                Inserted = true;
                Regs.insert(i, LR);
            }
        }
        if (!Inserted)
            Regs.push_back(LR);
    }

    // doms are now sorted in order of appearance. Try to merge them all, giving
    // priority to the latest ones.
    DomainValue *dv = nullptr;
    while (!Regs.empty()) {
        if (!dv) {
            dv = Regs.pop_back_val().Value;
            // Force the first dv to match the current instruction.
            dv->AvailableDomains = dv->getCommonDomains(available);
            assert(dv->AvailableDomains && "Domain should have been filtered");
            continue;
        }

        DomainValue *Latest = Regs.pop_back_val().Value;
        // Skip already merged values.
        if (Latest == dv || Latest->Next)
            continue;
        if (merge(dv, Latest))
            continue;

        // If latest didn't merge, it is useless now. Kill all registers using it.
        for (int i : used) {
            assert(LiveRegs && "no space allocated for live registers");
            if (LiveRegs[i].Value == Latest)
                kill(i);
        }
    }

    // dv is the DomainValue we are going to use for this instruction.
    if (!dv) {
        dv = alloc();
        dv->AvailableDomains = available;
    }
    dv->Instrs.push_back(mi);

    // Finally set all defs and non-collapsed uses to dv. We must iterate through
    // all the operators, including imp-def ones.
    for (MachineInstr::mop_iterator ii = mi->operands_begin(),
            ee = mi->operands_end();
            ii != ee; ++ii) {
        MachineOperand &mo = *ii;
        if (!mo.isReg()) continue;
        for (int rx : regIndizes(mo.getReg())) {
            if (!LiveRegs[rx].Value || (mo.isDef() && LiveRegs[rx].Value != dv)) {
                kill(rx);
                setLiveReg(rx, dv);
            }
        }
    }
}
Ejemplo n.º 7
0
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
  // Splitting the critical edge to a landing pad block is non-trivial. Don't do
  // it in this generic function.
  if (Succ->isLandingPad())
    return NULL;

  MachineFunction *MF = getParent();
  DebugLoc dl;  // FIXME: this is nowhere

  // We may need to update this's terminator, but we can't do that if
  // AnalyzeBranch fails. If this uses a jump table, we won't touch it.
  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
  MachineBasicBlock *TBB = 0, *FBB = 0;
  SmallVector<MachineOperand, 4> Cond;
  if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
    return NULL;

  // Avoid bugpoint weirdness: A block may end with a conditional branch but
  // jumps to the same MBB is either case. We have duplicate CFG edges in that
  // case that we can't handle. Since this never happens in properly optimized
  // code, just skip those edges.
  if (TBB && TBB == FBB) {
    DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
                 << getNumber() << '\n');
    return NULL;
  }

  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
  MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
  DEBUG(dbgs() << "Splitting critical edge:"
        " BB#" << getNumber()
        << " -- BB#" << NMBB->getNumber()
        << " -- BB#" << Succ->getNumber() << '\n');

  LiveIntervals *LIS = P->getAnalysisIfAvailable<LiveIntervals>();
  SlotIndexes *Indexes = P->getAnalysisIfAvailable<SlotIndexes>();
  if (LIS)
    LIS->insertMBBInMaps(NMBB);
  else if (Indexes)
    Indexes->insertMBBInMaps(NMBB);

  // On some targets like Mips, branches may kill virtual registers. Make sure
  // that LiveVariables is properly updated after updateTerminator replaces the
  // terminators.
  LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();

  // Collect a list of virtual registers killed by the terminators.
  SmallVector<unsigned, 4> KilledRegs;
  if (LV)
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;
      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0 ||
            !OI->isUse() || !OI->isKill() || OI->isUndef())
          continue;
        unsigned Reg = OI->getReg();
        if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
            LV->getVarInfo(Reg).removeKill(MI)) {
          KilledRegs.push_back(Reg);
          DEBUG(dbgs() << "Removing terminator kill: " << *MI);
          OI->setIsKill(false);
        }
      }
    }

  SmallVector<unsigned, 4> UsedRegs;
  if (LIS) {
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;

      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0)
          continue;

        unsigned Reg = OI->getReg();
        if (std::find(UsedRegs.begin(), UsedRegs.end(), Reg) == UsedRegs.end())
          UsedRegs.push_back(Reg);
      }
    }
  }

  ReplaceUsesOfBlockWith(Succ, NMBB);

  // If updateTerminator() removes instructions, we need to remove them from
  // SlotIndexes.
  SmallVector<MachineInstr*, 4> Terminators;
  if (Indexes) {
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I)
      Terminators.push_back(I);
  }

  updateTerminator();

  if (Indexes) {
    SmallVector<MachineInstr*, 4> NewTerminators;
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I)
      NewTerminators.push_back(I);

    for (SmallVectorImpl<MachineInstr*>::iterator I = Terminators.begin(),
        E = Terminators.end(); I != E; ++I) {
      if (std::find(NewTerminators.begin(), NewTerminators.end(), *I) ==
          NewTerminators.end())
       Indexes->removeMachineInstrFromMaps(*I);
    }
  }

  // Insert unconditional "jump Succ" instruction in NMBB if necessary.
  NMBB->addSuccessor(Succ);
  if (!NMBB->isLayoutSuccessor(Succ)) {
    Cond.clear();
    MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);

    if (Indexes) {
      for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
           I != E; ++I) {
        // Some instructions may have been moved to NMBB by updateTerminator(),
        // so we first remove any instruction that already has an index.
        if (Indexes->hasIndex(I))
          Indexes->removeMachineInstrFromMaps(I);
        Indexes->insertMachineInstrInMaps(I);
      }
    }
  }

  // Fix PHI nodes in Succ so they refer to NMBB instead of this
  for (MachineBasicBlock::instr_iterator
         i = Succ->instr_begin(),e = Succ->instr_end();
       i != e && i->isPHI(); ++i)
    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
      if (i->getOperand(ni+1).getMBB() == this)
        i->getOperand(ni+1).setMBB(NMBB);

  // Inherit live-ins from the successor
  for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
         E = Succ->livein_end(); I != E; ++I)
    NMBB->addLiveIn(*I);

  // Update LiveVariables.
  const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
  if (LV) {
    // Restore kills of virtual registers that were killed by the terminators.
    while (!KilledRegs.empty()) {
      unsigned Reg = KilledRegs.pop_back_val();
      for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
        if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
          continue;
        if (TargetRegisterInfo::isVirtualRegister(Reg))
          LV->getVarInfo(Reg).Kills.push_back(I);
        DEBUG(dbgs() << "Restored terminator kill: " << *I);
        break;
      }
    }
    // Update relevant live-through information.
    LV->addNewBlock(NMBB, this, Succ);
  }

  if (LIS) {
    // After splitting the edge and updating SlotIndexes, live intervals may be
    // in one of two situations, depending on whether this block was the last in
    // the function. If the original block was the last in the function, all live
    // intervals will end prior to the beginning of the new split block. If the
    // original block was not at the end of the function, all live intervals will
    // extend to the end of the new split block.

    bool isLastMBB =
      llvm::next(MachineFunction::iterator(NMBB)) == getParent()->end();

    SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
    SlotIndex PrevIndex = StartIndex.getPrevSlot();
    SlotIndex EndIndex = Indexes->getMBBEndIdx(NMBB);

    // Find the registers used from NMBB in PHIs in Succ.
    SmallSet<unsigned, 8> PHISrcRegs;
    for (MachineBasicBlock::instr_iterator
         I = Succ->instr_begin(), E = Succ->instr_end();
         I != E && I->isPHI(); ++I) {
      for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
        if (I->getOperand(ni+1).getMBB() == NMBB) {
          MachineOperand &MO = I->getOperand(ni);
          unsigned Reg = MO.getReg();
          PHISrcRegs.insert(Reg);
          if (MO.isUndef())
            continue;

          LiveInterval &LI = LIS->getInterval(Reg);
          VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
          assert(VNI && "PHI sources should be live out of their predecessors.");
          LI.addRange(LiveRange(StartIndex, EndIndex, VNI));
        }
      }
    }

    MachineRegisterInfo *MRI = &getParent()->getRegInfo();
    for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
      unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
      if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
        continue;

      LiveInterval &LI = LIS->getInterval(Reg);
      if (!LI.liveAt(PrevIndex))
        continue;

      bool isLiveOut = LI.liveAt(LIS->getMBBStartIdx(Succ));
      if (isLiveOut && isLastMBB) {
        VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
        assert(VNI && "LiveInterval should have VNInfo where it is live.");
        LI.addRange(LiveRange(StartIndex, EndIndex, VNI));
      } else if (!isLiveOut && !isLastMBB) {
        LI.removeRange(StartIndex, EndIndex);
      }
    }

    // Update all intervals for registers whose uses may have been modified by
    // updateTerminator().
    LIS->repairIntervalsInRange(this, getFirstTerminator(), end(), UsedRegs);
  }

  if (MachineDominatorTree *MDT =
      P->getAnalysisIfAvailable<MachineDominatorTree>()) {
    // Update dominator information.
    MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);

    bool IsNewIDom = true;
    for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
         PI != E; ++PI) {
      MachineBasicBlock *PredBB = *PI;
      if (PredBB == NMBB)
        continue;
      if (!MDT->dominates(SucccDTNode, MDT->getNode(PredBB))) {
        IsNewIDom = false;
        break;
      }
    }

    // We know "this" dominates the newly created basic block.
    MachineDomTreeNode *NewDTNode = MDT->addNewBlock(NMBB, this);

    // If all the other predecessors of "Succ" are dominated by "Succ" itself
    // then the new block is the new immediate dominator of "Succ". Otherwise,
    // the new block doesn't dominate anything.
    if (IsNewIDom)
      MDT->changeImmediateDominator(SucccDTNode, NewDTNode);
  }

  if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
    if (MachineLoop *TIL = MLI->getLoopFor(this)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NMBB joins loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == Succ &&
                 "Should not create irreducible loops!");
          if (MachineLoop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NMBB, MLI->getBase());
        }
      }
    }

  return NMBB;
}
Ejemplo n.º 8
0
// Builds the graph + StratifiedSets for a function.
CFLAAResult::FunctionInfo CFLAAResult::buildSetsFrom(Function *Fn) {
  CFLGraphBuilder GraphBuilder(*this, TLI, *Fn);
  StratifiedSetsBuilder<Value *> SetBuilder;

  auto &Graph = GraphBuilder.getCFLGraph();
  SmallVector<Value *, 16> Worklist;
  for (auto Node : Graph.nodes())
    Worklist.push_back(Node);

  while (!Worklist.empty()) {
    auto *CurValue = Worklist.pop_back_val();
    SetBuilder.add(CurValue);
    if (canSkipAddingToSets(CurValue))
      continue;

    auto Attr = Graph.attrFor(CurValue);
    SetBuilder.noteAttributes(CurValue, Attr);

    for (const auto &Edge : Graph.edgesFor(CurValue)) {
      auto Label = Edge.Type;
      auto *OtherValue = Edge.Other;

      if (canSkipAddingToSets(OtherValue))
        continue;

      bool Added;
      switch (directionOfEdgeType(Label)) {
      case Level::Above:
        Added = SetBuilder.addAbove(CurValue, OtherValue);
        break;
      case Level::Below:
        Added = SetBuilder.addBelow(CurValue, OtherValue);
        break;
      case Level::Same:
        Added = SetBuilder.addWith(CurValue, OtherValue);
        break;
      }

      if (Added)
        Worklist.push_back(OtherValue);
    }
  }

  // Special handling for globals and arguments
  for (auto *External : GraphBuilder.getExternalValues()) {
    SetBuilder.add(External);
    auto Attr = valueToAttr(External);
    if (Attr.hasValue()) {
      SetBuilder.noteAttributes(External, *Attr);
      if (*Attr == AttrGlobal)
        SetBuilder.addAttributesBelow(External, 1, AttrUnknown);
      else
        SetBuilder.addAttributesBelow(External, 1, AttrCaller);
    }
  }

  // Special handling for interprocedural aliases
  for (auto &Edge : GraphBuilder.getInterprocEdges()) {
    auto FromVal = Edge.From.Val;
    auto ToVal = Edge.To.Val;
    SetBuilder.add(FromVal);
    SetBuilder.add(ToVal);
    SetBuilder.addBelowWith(FromVal, Edge.From.DerefLevel, ToVal,
                            Edge.To.DerefLevel);
  }

  // Special handling for interprocedural attributes
  for (auto &IPAttr : GraphBuilder.getInterprocAttrs()) {
    auto Val = IPAttr.Node.Val;
    SetBuilder.add(Val);
    SetBuilder.addAttributesBelow(Val, IPAttr.Node.DerefLevel, IPAttr.Attr);
  }

  // Special handling for opaque external functions
  for (auto *Escape : GraphBuilder.getEscapedValues()) {
    SetBuilder.add(Escape);
    SetBuilder.noteAttributes(Escape, AttrEscaped);
    SetBuilder.addAttributesBelow(Escape, 1, AttrUnknown);
  }

  return FunctionInfo(*Fn, GraphBuilder.getReturnValues(), SetBuilder.build());
}
Ejemplo n.º 9
0
  void print(raw_ostream &OS, FuncIdConversionHelper &FN,
             RootVector RootValues) {
    // Go through each of the roots, and traverse the call stack, producing the
    // aggregates as you go along. Remember these aggregates and stacks, and
    // show summary statistics about:
    //
    //   - Total number of unique stacks
    //   - Top 10 stacks by count
    //   - Top 10 stacks by aggregate duration
    SmallVector<std::pair<const StackTrieNode *, uint64_t>, 11>
        TopStacksByCount;
    SmallVector<std::pair<const StackTrieNode *, uint64_t>, 11> TopStacksBySum;
    auto greater_second =
        [](const std::pair<const StackTrieNode *, uint64_t> &A,
           const std::pair<const StackTrieNode *, uint64_t> &B) {
          return A.second > B.second;
        };
    uint64_t UniqueStacks = 0;
    for (const auto *N : RootValues) {
      SmallVector<const StackTrieNode *, 16> S;
      S.emplace_back(N);

      while (!S.empty()) {
        auto *Top = S.pop_back_val();

        // We only start printing the stack (by walking up the parent pointers)
        // when we get to a leaf function.
        if (!Top->ExtraData.TerminalDurations.empty()) {
          ++UniqueStacks;
          auto TopSum =
              std::accumulate(Top->ExtraData.TerminalDurations.begin(),
                              Top->ExtraData.TerminalDurations.end(), 0uLL);
          {
            auto E = std::make_pair(Top, TopSum);
            TopStacksBySum.insert(std::lower_bound(TopStacksBySum.begin(),
                                                   TopStacksBySum.end(), E,
                                                   greater_second),
                                  E);
            if (TopStacksBySum.size() == 11)
              TopStacksBySum.pop_back();
          }
          {
            auto E =
                std::make_pair(Top, Top->ExtraData.TerminalDurations.size());
            TopStacksByCount.insert(std::lower_bound(TopStacksByCount.begin(),
                                                     TopStacksByCount.end(), E,
                                                     greater_second),
                                    E);
            if (TopStacksByCount.size() == 11)
              TopStacksByCount.pop_back();
          }
        }
        for (const auto *C : Top->Callees)
          S.push_back(C);
      }
    }

    // Now print the statistics in the end.
    OS << "\n";
    OS << "Unique Stacks: " << UniqueStacks << "\n";
    OS << "Top 10 Stacks by leaf sum:\n\n";
    for (const auto &P : TopStacksBySum) {
      OS << "Sum: " << P.second << "\n";
      printStack(OS, P.first, FN);
    }
    OS << "\n";
    OS << "Top 10 Stacks by leaf count:\n\n";
    for (const auto &P : TopStacksByCount) {
      OS << "Count: " << P.second << "\n";
      printStack(OS, P.first, FN);
    }
    OS << "\n";
  }
Ejemplo n.º 10
0
void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
  assert(V->getType()->isPointerTy() && "Capture is for pointers only!");
  SmallVector<const Use *, Threshold> Worklist;
  SmallSet<const Use *, Threshold> Visited;
  int Count = 0;

  for (const Use &U : V->uses()) {
    // If there are lots of uses, conservatively say that the value
    // is captured to avoid taking too much compile time.
    if (Count++ >= Threshold)
      return Tracker->tooManyUses();

    if (!Tracker->shouldExplore(&U)) continue;
    Visited.insert(&U);
    Worklist.push_back(&U);
  }

  while (!Worklist.empty()) {
    const Use *U = Worklist.pop_back_val();
    Instruction *I = cast<Instruction>(U->getUser());
    V = U->get();

    switch (I->getOpcode()) {
    case Instruction::Call:
    case Instruction::Invoke: {
      CallSite CS(I);
      // Not captured if the callee is readonly, doesn't return a copy through
      // its return value and doesn't unwind (a readonly function can leak bits
      // by throwing an exception or not depending on the input value).
      if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy())
        break;

      // Volatile operations effectively capture the memory location that they
      // load and store to.
      if (auto *MI = dyn_cast<MemIntrinsic>(I))
        if (MI->isVolatile())
          if (Tracker->captured(U))
            return;

      // Not captured if only passed via 'nocapture' arguments.  Note that
      // calling a function pointer does not in itself cause the pointer to
      // be captured.  This is a subtle point considering that (for example)
      // the callee might return its own address.  It is analogous to saying
      // that loading a value from a pointer does not cause the pointer to be
      // captured, even though the loaded value might be the pointer itself
      // (think of self-referential objects).
      CallSite::data_operand_iterator B =
        CS.data_operands_begin(), E = CS.data_operands_end();
      for (CallSite::data_operand_iterator A = B; A != E; ++A)
        if (A->get() == V && !CS.doesNotCapture(A - B))
          // The parameter is not marked 'nocapture' - captured.
          if (Tracker->captured(U))
            return;
      break;
    }
    case Instruction::Load:
      // Volatile loads make the address observable.
      if (cast<LoadInst>(I)->isVolatile())
        if (Tracker->captured(U))
          return;
      break;
    case Instruction::VAArg:
      // "va-arg" from a pointer does not cause it to be captured.
      break;
    case Instruction::Store:
        // Stored the pointer - conservatively assume it may be captured.
        // Volatile stores make the address observable.
      if (V == I->getOperand(0) || cast<StoreInst>(I)->isVolatile())
        if (Tracker->captured(U))
          return;
      break;
    case Instruction::AtomicRMW: {
      // atomicrmw conceptually includes both a load and store from
      // the same location.
      // As with a store, the location being accessed is not captured,
      // but the value being stored is.
      // Volatile stores make the address observable.
      auto *ARMWI = cast<AtomicRMWInst>(I);
      if (ARMWI->getValOperand() == V || ARMWI->isVolatile())
        if (Tracker->captured(U))
          return;
      break;
    }
    case Instruction::AtomicCmpXchg: {
      // cmpxchg conceptually includes both a load and store from
      // the same location.
      // As with a store, the location being accessed is not captured,
      // but the value being stored is.
      // Volatile stores make the address observable.
      auto *ACXI = cast<AtomicCmpXchgInst>(I);
      if (ACXI->getCompareOperand() == V || ACXI->getNewValOperand() == V ||
          ACXI->isVolatile())
        if (Tracker->captured(U))
          return;
      break;
    }
    case Instruction::BitCast:
    case Instruction::GetElementPtr:
    case Instruction::PHI:
    case Instruction::Select:
    case Instruction::AddrSpaceCast:
      // The original value is not captured via this if the new value isn't.
      Count = 0;
      for (Use &UU : I->uses()) {
        // If there are lots of uses, conservatively say that the value
        // is captured to avoid taking too much compile time.
        if (Count++ >= Threshold)
          return Tracker->tooManyUses();

        if (Visited.insert(&UU).second)
          if (Tracker->shouldExplore(&UU))
            Worklist.push_back(&UU);
      }
      break;
    case Instruction::ICmp: {
      // Don't count comparisons of a no-alias return value against null as
      // captures. This allows us to ignore comparisons of malloc results
      // with null, for example.
      if (ConstantPointerNull *CPN =
          dyn_cast<ConstantPointerNull>(I->getOperand(1)))
        if (CPN->getType()->getAddressSpace() == 0)
          if (isNoAliasCall(V->stripPointerCasts()))
            break;
      // Comparison against value stored in global variable. Given the pointer
      // does not escape, its value cannot be guessed and stored separately in a
      // global variable.
      unsigned OtherIndex = (I->getOperand(0) == V) ? 1 : 0;
      auto *LI = dyn_cast<LoadInst>(I->getOperand(OtherIndex));
      if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
        break;
      // Otherwise, be conservative. There are crazy ways to capture pointers
      // using comparisons.
      if (Tracker->captured(U))
        return;
      break;
    }
    default:
      // Something else - be conservative and say it is captured.
      if (Tracker->captured(U))
        return;
      break;
    }
  }

  // All uses examined.
}
Ejemplo n.º 11
0
/// Walk the specified loop in the CFG (defined by all blocks dominated by the
/// specified header block, and that are in the current loop) in depth first
/// order w.r.t the DominatorTree. This allows us to visit definitions before
/// uses, allowing us to hoist a loop body in one pass without iteration.
///
void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
  MachineBasicBlock *Preheader = getCurPreheader();
  if (!Preheader)
    return;

  SmallVector<MachineDomTreeNode*, 32> Scopes;
  SmallVector<MachineDomTreeNode*, 8> WorkList;
  DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
  DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;

  // Perform a DFS walk to determine the order of visit.
  WorkList.push_back(HeaderN);
  while (!WorkList.empty()) {
    MachineDomTreeNode *Node = WorkList.pop_back_val();
    assert(Node && "Null dominator tree node?");
    MachineBasicBlock *BB = Node->getBlock();

    // If the header of the loop containing this basic block is a landing pad,
    // then don't try to hoist instructions out of this loop.
    const MachineLoop *ML = MLI->getLoopFor(BB);
    if (ML && ML->getHeader()->isEHPad())
      continue;

    // If this subregion is not in the top level loop at all, exit.
    if (!CurLoop->contains(BB))
      continue;

    Scopes.push_back(Node);
    const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
    unsigned NumChildren = Children.size();

    // Don't hoist things out of a large switch statement.  This often causes
    // code to be hoisted that wasn't going to be executed, and increases
    // register pressure in a situation where it's likely to matter.
    if (BB->succ_size() >= 25)
      NumChildren = 0;

    OpenChildren[Node] = NumChildren;
    // Add children in reverse order as then the next popped worklist node is
    // the first child of this node.  This means we ultimately traverse the
    // DOM tree in exactly the same order as if we'd recursed.
    for (int i = (int)NumChildren-1; i >= 0; --i) {
      MachineDomTreeNode *Child = Children[i];
      ParentMap[Child] = Node;
      WorkList.push_back(Child);
    }
  }

  if (Scopes.size() == 0)
    return;

  // Compute registers which are livein into the loop headers.
  RegSeen.clear();
  BackTrace.clear();
  InitRegPressure(Preheader);

  // Now perform LICM.
  for (MachineDomTreeNode *Node : Scopes) {
    MachineBasicBlock *MBB = Node->getBlock();

    EnterScope(MBB);

    // Process the block
    SpeculationState = SpeculateUnknown;
    for (MachineBasicBlock::iterator
         MII = MBB->begin(), E = MBB->end(); MII != E; ) {
      MachineBasicBlock::iterator NextMII = MII; ++NextMII;
      MachineInstr *MI = &*MII;
      if (!Hoist(MI, Preheader))
        UpdateRegPressure(MI);
      MII = NextMII;
    }

    // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
    ExitScopeIfDone(Node, OpenChildren, ParentMap);
  }
}
Ejemplo n.º 12
0
bool ObjCARCContract::runOnFunction(Function &F) {
  if (!EnableARCOpts)
    return false;

  // If nothing in the Module uses ARC, don't do anything.
  if (!Run)
    return false;

  Changed = false;
  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();

  PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());

  DenseMap<BasicBlock *, ColorVector> BlockColors;
  if (F.hasPersonalityFn() &&
      isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
    BlockColors = colorEHFunclets(F);

  LLVM_DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n");

  // Track whether it's ok to mark objc_storeStrong calls with the "tail"
  // keyword. Be conservative if the function has variadic arguments.
  // It seems that functions which "return twice" are also unsafe for the
  // "tail" argument, because they are setjmp, which could need to
  // return to an earlier stack state.
  bool TailOkForStoreStrongs =
      !F.isVarArg() && !F.callsFunctionThatReturnsTwice();

  // For ObjC library calls which return their argument, replace uses of the
  // argument with uses of the call return value, if it dominates the use. This
  // reduces register pressure.
  SmallPtrSet<Instruction *, 4> DependingInstructions;
  SmallPtrSet<const BasicBlock *, 4> Visited;
  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E;) {
    Instruction *Inst = &*I++;

    LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");

    // First try to peephole Inst. If there is nothing further we can do in
    // terms of undoing objc-arc-expand, process the next inst.
    if (tryToPeepholeInstruction(F, Inst, I, DependingInstructions, Visited,
                                 TailOkForStoreStrongs, BlockColors))
      continue;

    // Otherwise, try to undo objc-arc-expand.

    // Don't use GetArgRCIdentityRoot because we don't want to look through bitcasts
    // and such; to do the replacement, the argument must have type i8*.

    // Function for replacing uses of Arg dominated by Inst.
    auto ReplaceArgUses = [Inst, this](Value *Arg) {
      // If we're compiling bugpointed code, don't get in trouble.
      if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
        return;

      // Look through the uses of the pointer.
      for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
           UI != UE; ) {
        // Increment UI now, because we may unlink its element.
        Use &U = *UI++;
        unsigned OperandNo = U.getOperandNo();

        // If the call's return value dominates a use of the call's argument
        // value, rewrite the use to use the return value. We check for
        // reachability here because an unreachable call is considered to
        // trivially dominate itself, which would lead us to rewriting its
        // argument in terms of its return value, which would lead to
        // infinite loops in GetArgRCIdentityRoot.
        if (!DT->isReachableFromEntry(U) || !DT->dominates(Inst, U))
          continue;

        Changed = true;
        Instruction *Replacement = Inst;
        Type *UseTy = U.get()->getType();
        if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
          // For PHI nodes, insert the bitcast in the predecessor block.
          unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
          BasicBlock *IncomingBB = PHI->getIncomingBlock(ValNo);
          if (Replacement->getType() != UseTy) {
            // A catchswitch is both a pad and a terminator, meaning a basic
            // block with a catchswitch has no insertion point. Keep going up
            // the dominator tree until we find a non-catchswitch.
            BasicBlock *InsertBB = IncomingBB;
            while (isa<CatchSwitchInst>(InsertBB->getFirstNonPHI())) {
              InsertBB = DT->getNode(InsertBB)->getIDom()->getBlock();
            }

            assert(DT->dominates(Inst, &InsertBB->back()) &&
                   "Invalid insertion point for bitcast");
            Replacement =
                new BitCastInst(Replacement, UseTy, "", &InsertBB->back());
          }

          // While we're here, rewrite all edges for this PHI, rather
          // than just one use at a time, to minimize the number of
          // bitcasts we emit.
          for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
            if (PHI->getIncomingBlock(i) == IncomingBB) {
              // Keep the UI iterator valid.
              if (UI != UE &&
                  &PHI->getOperandUse(
                      PHINode::getOperandNumForIncomingValue(i)) == &*UI)
                ++UI;
              PHI->setIncomingValue(i, Replacement);
            }
        } else {
          if (Replacement->getType() != UseTy)
            Replacement = new BitCastInst(Replacement, UseTy, "",
                                          cast<Instruction>(U.getUser()));
          U.set(Replacement);
        }
      }
    };


    Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
    Value *OrigArg = Arg;

    // TODO: Change this to a do-while.
    for (;;) {
      ReplaceArgUses(Arg);

      // If Arg is a no-op casted pointer, strip one level of casts and iterate.
      if (const BitCastInst *BI = dyn_cast<BitCastInst>(Arg))
        Arg = BI->getOperand(0);
      else if (isa<GEPOperator>(Arg) &&
               cast<GEPOperator>(Arg)->hasAllZeroIndices())
        Arg = cast<GEPOperator>(Arg)->getPointerOperand();
      else if (isa<GlobalAlias>(Arg) &&
               !cast<GlobalAlias>(Arg)->isInterposable())
        Arg = cast<GlobalAlias>(Arg)->getAliasee();
      else {
        // If Arg is a PHI node, get PHIs that are equivalent to it and replace
        // their uses.
        if (PHINode *PN = dyn_cast<PHINode>(Arg)) {
          SmallVector<Value *, 1> PHIList;
          getEquivalentPHIs(*PN, PHIList);
          for (Value *PHI : PHIList)
            ReplaceArgUses(PHI);
        }
        break;
      }
    }

    // Replace bitcast users of Arg that are dominated by Inst.
    SmallVector<BitCastInst *, 2> BitCastUsers;

    // Add all bitcast users of the function argument first.
    for (User *U : OrigArg->users())
      if (auto *BC = dyn_cast<BitCastInst>(U))
        BitCastUsers.push_back(BC);

    // Replace the bitcasts with the call return. Iterate until list is empty.
    while (!BitCastUsers.empty()) {
      auto *BC = BitCastUsers.pop_back_val();
      for (User *U : BC->users())
        if (auto *B = dyn_cast<BitCastInst>(U))
          BitCastUsers.push_back(B);

      ReplaceArgUses(BC);
    }
  }

  // If this function has no escaping allocas or suspicious vararg usage,
  // objc_storeStrong calls can be marked with the "tail" keyword.
  if (TailOkForStoreStrongs)
    for (CallInst *CI : StoreStrongCalls)
      CI->setTailCall();
  StoreStrongCalls.clear();

  return Changed;
}
Ejemplo n.º 13
0
const CodeGenRegister::SubRegMap &
CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
  // Only compute this map once.
  if (SubRegsComplete)
    return SubRegs;
  SubRegsComplete = true;

  std::vector<Record*> SubList = TheDef->getValueAsListOfDefs("SubRegs");
  std::vector<Record*> IdxList = TheDef->getValueAsListOfDefs("SubRegIndices");
  if (SubList.size() != IdxList.size())
    throw TGError(TheDef->getLoc(), "Register " + getName() +
                  " SubRegIndices doesn't match SubRegs");

  // First insert the direct subregs and make sure they are fully indexed.
  SmallVector<CodeGenSubRegIndex*, 8> Indices;
  for (unsigned i = 0, e = SubList.size(); i != e; ++i) {
    CodeGenRegister *SR = RegBank.getReg(SubList[i]);
    CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(IdxList[i]);
    Indices.push_back(Idx);
    if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
      throw TGError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
                    " appears twice in Register " + getName());
  }

  // Keep track of inherited subregs and how they can be reached.
  SmallPtrSet<CodeGenRegister*, 8> Orphans;

  // Clone inherited subregs and place duplicate entries in Orphans.
  // Here the order is important - earlier subregs take precedence.
  for (unsigned i = 0, e = SubList.size(); i != e; ++i) {
    CodeGenRegister *SR = RegBank.getReg(SubList[i]);
    const SubRegMap &Map = SR->getSubRegs(RegBank);

    // Add this as a super-register of SR now all sub-registers are in the list.
    // This creates a topological ordering, the exact order depends on the
    // order getSubRegs is called on all registers.
    SR->SuperRegs.push_back(this);

    for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
         ++SI) {
      if (!SubRegs.insert(*SI).second)
        Orphans.insert(SI->second);

      // Noop sub-register indexes are possible, so avoid duplicates.
      if (SI->second != SR)
        SI->second->SuperRegs.push_back(this);
    }
  }

  // Expand any composed subreg indices.
  // If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
  // qsub_1 subreg, add a dsub_2 subreg.  Keep growing Indices and process
  // expanded subreg indices recursively.
  for (unsigned i = 0; i != Indices.size(); ++i) {
    CodeGenSubRegIndex *Idx = Indices[i];
    const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
    CodeGenRegister *SR = SubRegs[Idx];
    const SubRegMap &Map = SR->getSubRegs(RegBank);

    // Look at the possible compositions of Idx.
    // They may not all be supported by SR.
    for (CodeGenSubRegIndex::CompMap::const_iterator I = Comps.begin(),
           E = Comps.end(); I != E; ++I) {
      SubRegMap::const_iterator SRI = Map.find(I->first);
      if (SRI == Map.end())
        continue; // Idx + I->first doesn't exist in SR.
      // Add I->second as a name for the subreg SRI->second, assuming it is
      // orphaned, and the name isn't already used for something else.
      if (SubRegs.count(I->second) || !Orphans.erase(SRI->second))
        continue;
      // We found a new name for the orphaned sub-register.
      SubRegs.insert(std::make_pair(I->second, SRI->second));
      Indices.push_back(I->second);
    }
  }

  // Process the composites.
  ListInit *Comps = TheDef->getValueAsListInit("CompositeIndices");
  for (unsigned i = 0, e = Comps->size(); i != e; ++i) {
    DagInit *Pat = dynamic_cast<DagInit*>(Comps->getElement(i));
    if (!Pat)
      throw TGError(TheDef->getLoc(), "Invalid dag '" +
                    Comps->getElement(i)->getAsString() +
                    "' in CompositeIndices");
    DefInit *BaseIdxInit = dynamic_cast<DefInit*>(Pat->getOperator());
    if (!BaseIdxInit || !BaseIdxInit->getDef()->isSubClassOf("SubRegIndex"))
      throw TGError(TheDef->getLoc(), "Invalid SubClassIndex in " +
                    Pat->getAsString());
    CodeGenSubRegIndex *BaseIdx = RegBank.getSubRegIdx(BaseIdxInit->getDef());

    // Resolve list of subreg indices into R2.
    CodeGenRegister *R2 = this;
    for (DagInit::const_arg_iterator di = Pat->arg_begin(),
         de = Pat->arg_end(); di != de; ++di) {
      DefInit *IdxInit = dynamic_cast<DefInit*>(*di);
      if (!IdxInit || !IdxInit->getDef()->isSubClassOf("SubRegIndex"))
        throw TGError(TheDef->getLoc(), "Invalid SubClassIndex in " +
                      Pat->getAsString());
      CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(IdxInit->getDef());
      const SubRegMap &R2Subs = R2->getSubRegs(RegBank);
      SubRegMap::const_iterator ni = R2Subs.find(Idx);
      if (ni == R2Subs.end())
        throw TGError(TheDef->getLoc(), "Composite " + Pat->getAsString() +
                      " refers to bad index in " + R2->getName());
      R2 = ni->second;
    }

    // Insert composite index. Allow overriding inherited indices etc.
    SubRegs[BaseIdx] = R2;

    // R2 is no longer an orphan.
    Orphans.erase(R2);
  }

  // Now Orphans contains the inherited subregisters without a direct index.
  // Create inferred indexes for all missing entries.
  // Work backwards in the Indices vector in order to compose subregs bottom-up.
  // Consider this subreg sequence:
  //
  //   qsub_1 -> dsub_0 -> ssub_0
  //
  // The qsub_1 -> dsub_0 composition becomes dsub_2, so the ssub_0 register
  // can be reached in two different ways:
  //
  //   qsub_1 -> ssub_0
  //   dsub_2 -> ssub_0
  //
  // We pick the latter composition because another register may have [dsub_0,
  // dsub_1, dsub_2] subregs without neccessarily having a qsub_1 subreg.  The
  // dsub_2 -> ssub_0 composition can be shared.
  while (!Indices.empty() && !Orphans.empty()) {
    CodeGenSubRegIndex *Idx = Indices.pop_back_val();
    CodeGenRegister *SR = SubRegs[Idx];
    const SubRegMap &Map = SR->getSubRegs(RegBank);
    for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
         ++SI)
      if (Orphans.erase(SI->second))
        SubRegs[RegBank.getCompositeSubRegIndex(Idx, SI->first)] = SI->second;
  }

  // Initialize RegUnitList. A register with no subregisters creates its own
  // unit. Otherwise, it inherits all its subregister's units. Because
  // getSubRegs is called recursively, this processes the register hierarchy in
  // postorder.
  //
  // TODO: We currently assume all register units correspond to a named "leaf"
  // register. We should also unify register units for ad-hoc register
  // aliases. This can be done by iteratively merging units for aliasing
  // registers using a worklist.
  assert(RegUnits.empty() && "Should only initialize RegUnits once");
  if (SubRegs.empty()) {
    RegUnits.push_back(RegBank.newRegUnit());
  }
  else {
    for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
         I != E; ++I) {
      // Strangely a register may have itself as a subreg (self-cycle) e.g. XMM.
      CodeGenRegister *SR = I->second;
      if (SR == this) {
        if (RegUnits.empty())
          RegUnits.push_back(RegBank.newRegUnit());
        continue;
      }
      // Merge the subregister's units into this register's RegUnits.
      mergeRegUnits(RegUnits, SR->RegUnits);
    }
  }
  return SubRegs;
}
Ejemplo n.º 14
0
// Builds the graph + StratifiedSets for a function.
CFLAAResult::FunctionInfo CFLAAResult::buildSetsFrom(Function *Fn) {
  NodeMapT Map;
  GraphT Graph;
  SmallVector<Value *, 4> ReturnedValues;

  buildGraphFrom(*this, Fn, ReturnedValues, Map, Graph);

  DenseMap<GraphT::Node, Value *> NodeValueMap;
  NodeValueMap.reserve(Map.size());
  for (const auto &Pair : Map)
    NodeValueMap.insert(std::make_pair(Pair.second, Pair.first));

  const auto findValueOrDie = [&NodeValueMap](GraphT::Node Node) {
    auto ValIter = NodeValueMap.find(Node);
    assert(ValIter != NodeValueMap.end());
    return ValIter->second;
  };

  StratifiedSetsBuilder<Value *> Builder;

  SmallVector<GraphT::Node, 16> Worklist;
  for (auto &Pair : Map) {
    Worklist.clear();

    auto *Value = Pair.first;
    Builder.add(Value);
    auto InitialNode = Pair.second;
    Worklist.push_back(InitialNode);
    while (!Worklist.empty()) {
      auto Node = Worklist.pop_back_val();
      auto *CurValue = findValueOrDie(Node);
      if (canSkipAddingToSets(CurValue))
        continue;

      Optional<StratifiedAttr> MaybeCurIndex = valueToAttrIndex(CurValue);
      if (MaybeCurIndex)
        Builder.noteAttributes(CurValue, *MaybeCurIndex);

      for (const auto &EdgeTuple : Graph.edgesFor(Node)) {
        auto Weight = std::get<0>(EdgeTuple);
        auto Label = Weight.first;
        auto &OtherNode = std::get<1>(EdgeTuple);
        auto *OtherValue = findValueOrDie(OtherNode);

        if (canSkipAddingToSets(OtherValue))
          continue;

        bool Added;
        switch (directionOfEdgeType(Label)) {
        case Level::Above:
          Added = Builder.addAbove(CurValue, OtherValue);
          break;
        case Level::Below:
          Added = Builder.addBelow(CurValue, OtherValue);
          break;
        case Level::Same:
          Added = Builder.addWith(CurValue, OtherValue);
          break;
        }

        auto Aliasing = Weight.second;
        if (MaybeCurIndex)
          Aliasing.set(*MaybeCurIndex);
        if (auto MaybeOtherIndex = valueToAttrIndex(OtherValue))
          Aliasing.set(*MaybeOtherIndex);
        Builder.noteAttributes(CurValue, Aliasing);
        Builder.noteAttributes(OtherValue, Aliasing);

        if (Added)
          Worklist.push_back(OtherNode);
      }
    }
  }

  // There are times when we end up with parameters not in our graph (i.e. if
  // it's only used as the condition of a branch). Other bits of code depend on
  // things that were present during construction being present in the graph.
  // So, we add all present arguments here.
  for (auto &Arg : Fn->args()) {
    if (!Builder.add(&Arg))
      continue;

    auto Attrs = valueToAttrIndex(&Arg);
    if (Attrs.hasValue())
      Builder.noteAttributes(&Arg, *Attrs);
  }

  return FunctionInfo(Builder.build(), std::move(ReturnedValues));
}
Ejemplo n.º 15
0
/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
/// the alloca, and if the source pointer is a pointer to a constant global, we
/// can optimize this.
static bool
isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
                               SmallVectorImpl<Instruction *> &ToDelete) {
  // We track lifetime intrinsics as we encounter them.  If we decide to go
  // ahead and replace the value with the global, this lets the caller quickly
  // eliminate the markers.

  SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
  ValuesToInspect.push_back(std::make_pair(V, false));
  while (!ValuesToInspect.empty()) {
    auto ValuePair = ValuesToInspect.pop_back_val();
    const bool IsOffset = ValuePair.second;
    for (auto &U : ValuePair.first->uses()) {
      Instruction *I = cast<Instruction>(U.getUser());

      if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
        // Ignore non-volatile loads, they are always ok.
        if (!LI->isSimple()) return false;
        continue;
      }

      if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
        // If uses of the bitcast are ok, we are ok.
        ValuesToInspect.push_back(std::make_pair(I, IsOffset));
        continue;
      }
      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
        // If the GEP has all zero indices, it doesn't offset the pointer. If it
        // doesn't, it does.
        ValuesToInspect.push_back(
            std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
        continue;
      }

      if (auto CS = CallSite(I)) {
        // If this is the function being called then we treat it like a load and
        // ignore it.
        if (CS.isCallee(&U))
          continue;

        unsigned DataOpNo = CS.getDataOperandNo(&U);
        bool IsArgOperand = CS.isArgOperand(&U);

        // Inalloca arguments are clobbered by the call.
        if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
          return false;

        // If this is a readonly/readnone call site, then we know it is just a
        // load (but one that potentially returns the value itself), so we can
        // ignore it if we know that the value isn't captured.
        if (CS.onlyReadsMemory() &&
            (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
          continue;

        // If this is being passed as a byval argument, the caller is making a
        // copy, so it is only a read of the alloca.
        if (IsArgOperand && CS.isByValArgument(DataOpNo))
          continue;
      }

      // Lifetime intrinsics can be handled by the caller.
      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
        if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
            II->getIntrinsicID() == Intrinsic::lifetime_end) {
          assert(II->use_empty() && "Lifetime markers have no result to use!");
          ToDelete.push_back(II);
          continue;
        }
      }

      // If this is isn't our memcpy/memmove, reject it as something we can't
      // handle.
      MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
      if (!MI)
        return false;

      // If the transfer is using the alloca as a source of the transfer, then
      // ignore it since it is a load (unless the transfer is volatile).
      if (U.getOperandNo() == 1) {
        if (MI->isVolatile()) return false;
        continue;
      }

      // If we already have seen a copy, reject the second one.
      if (TheCopy) return false;

      // If the pointer has been offset from the start of the alloca, we can't
      // safely handle this.
      if (IsOffset) return false;

      // If the memintrinsic isn't using the alloca as the dest, reject it.
      if (U.getOperandNo() != 0) return false;

      // If the source of the memcpy/move is not a constant global, reject it.
      if (!pointsToConstantGlobal(MI->getSource()))
        return false;

      // Otherwise, the transform is safe.  Remember the copy instruction.
      TheCopy = MI;
    }
  }
  return true;
}
Ejemplo n.º 16
0
static bool verifyCTRBranch(MachineBasicBlock *MBB,
                            MachineBasicBlock::iterator I) {
  MachineBasicBlock::iterator BI = I;
  SmallSet<MachineBasicBlock *, 16>   Visited;
  SmallVector<MachineBasicBlock *, 8> Preds;
  bool CheckPreds;

  if (I == MBB->begin()) {
    Visited.insert(MBB);
    goto queue_preds;
  } else
    --I;

check_block:
  Visited.insert(MBB);
  if (I == MBB->end())
    goto queue_preds;

  CheckPreds = true;
  for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
    unsigned Opc = I->getOpcode();
    if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
      CheckPreds = false;
      break;
    }

    if (I != BI && clobbersCTR(I)) {
      DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
                      MBB->getFullName() << ") instruction " << *I <<
                      " clobbers CTR, invalidating " << "BB#" <<
                      BI->getParent()->getNumber() << " (" <<
                      BI->getParent()->getFullName() << ") instruction " <<
                      *BI << "\n");
      return false;
    }

    if (I == IE)
      break;
  }

  if (!CheckPreds && Preds.empty())
    return true;

  if (CheckPreds) {
queue_preds:
    if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
      DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
                      BI->getParent()->getNumber() << " (" <<
                      BI->getParent()->getFullName() << ") instruction " <<
                      *BI << "\n");
      return false;
    }

    for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
         PIE = MBB->pred_end(); PI != PIE; ++PI)
      Preds.push_back(*PI);
  }

  do {
    MBB = Preds.pop_back_val();
    if (!Visited.count(MBB)) {
      I = MBB->getLastNonDebugInstr();
      goto check_block;
    }
  } while (!Preds.empty());

  return true;
}
Ejemplo n.º 17
0
/// performStoreOnlyObjectElimination - Scan the graph of uses of the specified
/// object allocation.  If the object does not escape and is only stored to
/// (this happens because GVN and other optimizations hoists forward substitutes
/// all stores to the object to eliminate all loads from it), then zap the
/// object and all accesses related to it.
static bool performStoreOnlyObjectElimination(CallInst &Allocation,
                                              BasicBlock::iterator &BBI) {
  DtorKind DtorInfo = analyzeDestructor(Allocation.getArgOperand(0));

  // We can't delete the object if its destructor has side effects.
  if (DtorInfo != DtorKind::NoSideEffects)
    return false;

  // Do a depth first search exploring all of the uses of the object pointer,
  // following through casts, pointer adjustments etc.  If we find any loads or
  // any escape sites of the object, we give up.  If we succeed in walking the
  // entire graph of uses, we can remove the resultant set.
  SmallSetVector<Instruction*, 16> InvolvedInstructions;
  SmallVector<Instruction*, 16> Worklist;
  Worklist.push_back(&Allocation);

  // Stores - Keep track of all of the store instructions we see.
  SmallVector<StoreInst*, 16> Stores;

  while (!Worklist.empty()) {
    Instruction *I = Worklist.pop_back_val();

    // Insert the instruction into our InvolvedInstructions set.  If we have
    // already seen it, then don't reprocess all of the uses.
    if (!InvolvedInstructions.insert(I)) continue;

    // Okay, this is the first time we've seen this instruction, proceed.
    switch (classifyInstruction(*I)) {
    // These instructions should not reach here based on the pass ordering.
    // i.e. LLVMARCOpt -> LLVMContractOpt.
    case RT_RetainN:
    case RT_UnknownRetainN:
    case RT_BridgeRetainN:
    case RT_ReleaseN:
    case RT_UnknownReleaseN:
    case RT_BridgeReleaseN:
      llvm_unreachable("These are only created by LLVMARCContract !");
    case RT_AllocObject:
      // If this is a different swift_allocObject than we started with, then
      // there is some computation feeding into a size or alignment computation
      // that we have to keep... unless we can delete *that* entire object as
      // well.
      break;

    case RT_NoMemoryAccessed:
      // If no memory is accessed, then something is being done with the
      // pointer: maybe it is bitcast or GEP'd. Since there are no side effects,
      // it is perfectly fine to delete this instruction if all uses of the
      // instruction are also eliminable.

      if (I->mayHaveSideEffects() || isa<TerminatorInst>(I))
        return false;
      break;

    case RT_Release:
    case RT_Retain:
    case RT_FixLifetime:
    case RT_CheckUnowned:
      // It is perfectly fine to eliminate various retains and releases of this
      // object: we are zapping all accesses or none.
      break;

    // If this is an unknown instruction, we have more interesting things to
    // consider.
    case RT_Unknown:
    case RT_ObjCRelease:
    case RT_ObjCRetain:
    case RT_UnknownRetain:
    case RT_UnknownRelease:
    case RT_BridgeRetain:
    case RT_BridgeRelease:
    case RT_RetainUnowned:

      // Otherwise, this really is some unhandled instruction.  Bail out.
      return false;
    }

    // Okay, if we got here, the instruction can be eaten so-long as all of its
    // uses can be.  Scan through the uses and add them to the worklist for
    // recursive processing.
    for (auto UI = I->user_begin(), E = I->user_end(); UI != E; ++UI) {
      Instruction *User = cast<Instruction>(*UI);

      // Handle stores as a special case here: we want to make sure that the
      // object is being stored *to*, not itself being stored (which would be an
      // escape point).  Since stores themselves don't have any uses, we can
      // short-cut the classification scheme above.
      if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
        // If this is a store *to* the object, we can zap it.
        if (UI.getUse().getOperandNo() == StoreInst::getPointerOperandIndex()) {
          InvolvedInstructions.insert(SI);
          continue;
        }
        // Otherwise, using the object as a source (or size) is an escape.
        return false;
      }
      if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
        // If this is a memset/memcpy/memmove *to* the object, we can zap it.
        if (UI.getUse().getOperandNo() == 0) {
          InvolvedInstructions.insert(MI);
          continue;
        }
        // Otherwise, using the object as a source (or size) is an escape.
        return false;
      }

      // Otherwise, normal instructions just go on the worklist for processing.
      Worklist.push_back(User);
    }
  }

  // Ok, we succeeded!  This means we can zap all of the instructions that use
  // the object.  One thing we have to be careful of is to make sure that we
  // don't invalidate "BBI" (the iterator the outer walk of the optimization
  // pass is using, and indicates the next instruction to process).  This would
  // happen if we delete the instruction it is pointing to.  Advance the
  // iterator if that would happen.
  while (InvolvedInstructions.count(&*BBI))
    ++BBI;

  // Zap all of the instructions.
  for (auto I : InvolvedInstructions) {
    if (!I->use_empty())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
    I->eraseFromParent();
  }

  ++NumStoreOnlyObjectsEliminated;
  return true;
}
Ejemplo n.º 18
0
void swift::ide::printSubmoduleInterface(
       Module *M,
       ArrayRef<StringRef> FullModuleName,
       ArrayRef<StringRef> GroupNames,
       ModuleTraversalOptions TraversalOptions,
       ASTPrinter &Printer,
       const PrintOptions &Options,
       const bool PrintSynthesizedExtensions) {
  auto AdjustedOptions = Options;
  adjustPrintOptions(AdjustedOptions);

  SmallVector<Decl *, 1> Decls;
  M->getDisplayDecls(Decls);

  auto &SwiftContext = M->getASTContext();
  auto &Importer =
      static_cast<ClangImporter &>(*SwiftContext.getClangModuleLoader());

  const clang::Module *InterestingClangModule = nullptr;

  SmallVector<ImportDecl *, 1> ImportDecls;
  llvm::DenseSet<const clang::Module *> ClangModulesForImports;
  SmallVector<Decl *, 1> SwiftDecls;
  llvm::DenseMap<const clang::Module *,
                 SmallVector<std::pair<Decl *, clang::SourceLocation>, 1>>
    ClangDecls;

  // Drop top-level module name.
  FullModuleName = FullModuleName.slice(1);

  InterestingClangModule = M->findUnderlyingClangModule();
  if (InterestingClangModule) {
    for (StringRef Name : FullModuleName) {
      InterestingClangModule = InterestingClangModule->findSubmodule(Name);
      if (!InterestingClangModule)
        return;
    }
  } else {
    assert(FullModuleName.empty());
  }

  // If we're printing recursively, find all of the submodules to print.
  if (InterestingClangModule) {
    if (TraversalOptions) {
      SmallVector<const clang::Module *, 8> Worklist;
      SmallPtrSet<const clang::Module *, 8> Visited;
      Worklist.push_back(InterestingClangModule);
      Visited.insert(InterestingClangModule);
      while (!Worklist.empty()) {
        const clang::Module *CM = Worklist.pop_back_val();
        if (!(TraversalOptions & ModuleTraversal::VisitHidden) &&
            CM->IsExplicit)
          continue;

        ClangDecls.insert({ CM, {} });

        // If we're supposed to visit submodules, add them now.
        if (TraversalOptions & ModuleTraversal::VisitSubmodules) {
          for (auto Sub = CM->submodule_begin(), SubEnd = CM->submodule_end();
               Sub != SubEnd; ++Sub) {
            if (Visited.insert(*Sub).second)
              Worklist.push_back(*Sub);
          }
        }
      }
    } else {
      ClangDecls.insert({ InterestingClangModule, {} });
    }
  }

  // Collect those submodules that are actually imported but have no import decls
  // in the module.
  llvm::SmallPtrSet<const clang::Module *, 16> NoImportSubModules;
  if (InterestingClangModule) {
    // Assume all submodules are missing.
    for (auto It =InterestingClangModule->submodule_begin();
         It != InterestingClangModule->submodule_end(); It ++) {
      NoImportSubModules.insert(*It);
    }
  }
  llvm::StringMap<std::vector<Decl*>> FileRangedDecls;
  // Separate the declarations that we are going to print into different
  // buckets.
  for (Decl *D : Decls) {

    // Skip declarations that are not accessible.
    if (auto *VD = dyn_cast<ValueDecl>(D)) {
      if (Options.AccessibilityFilter > Accessibility::Private &&
          VD->hasAccessibility() &&
          VD->getFormalAccess() < Options.AccessibilityFilter)
        continue;
    }

    auto ShouldPrintImport = [&](ImportDecl *ImportD) -> bool {
      if (!InterestingClangModule)
        return true;
      auto ClangMod = ImportD->getClangModule();
      if (!ClangMod)
        return true;
      if (!ClangMod->isSubModule())
        return true;
      if (ClangMod == InterestingClangModule)
        return false;
      // FIXME: const-ness on the clang API.
      return ClangMod->isSubModuleOf(
                          const_cast<clang::Module*>(InterestingClangModule));
    };

    if (auto ID = dyn_cast<ImportDecl>(D)) {
      if (ShouldPrintImport(ID)) {
        if (ID->getClangModule())
          // Erase those submodules that are not missing.
          NoImportSubModules.erase(ID->getClangModule());
        if (ID->getImportKind() == ImportKind::Module) {
          // Make sure we don't print duplicate imports, due to getting imports
          // for both a clang module and its overlay.
          if (auto *ClangMod = getUnderlyingClangModuleForImport(ID)) {
            auto P = ClangModulesForImports.insert(ClangMod);
            bool IsNew = P.second;
            if (!IsNew)
              continue;
          }
        }
        ImportDecls.push_back(ID);
      }
      continue;
    }

    auto addToClangDecls = [&](Decl *D) {
      assert(D->hasClangNode());
      auto CN = D->getClangNode();
      clang::SourceLocation Loc = CN.getLocation();

      auto *OwningModule = Importer.getClangOwningModule(CN);
      auto I = ClangDecls.find(OwningModule);
      if (I != ClangDecls.end()) {
        I->second.push_back({ D, Loc });
      }
    };

    if (D->hasClangNode()) {
      addToClangDecls(D);
      continue;
    }
    if (FullModuleName.empty()) {
      // If group name is given and the decl does not belong to the group, skip it.
      if (!GroupNames.empty()){
        if (auto Target = D->getGroupName()) {
          if (std::find(GroupNames.begin(), GroupNames.end(),
                        Target.getValue()) != GroupNames.end()) {
            FileRangedDecls.insert(std::make_pair(D->getSourceFileName().getValue(),
              std::vector<Decl*>())).first->getValue().push_back(D);
          }
        }
        continue;
      }
      // Add Swift decls if we are printing the top-level module.
      SwiftDecls.push_back(D);
    }
  }
  if (!GroupNames.empty()) {
    assert(SwiftDecls.empty());
    for (auto &Entry : FileRangedDecls) {
      auto &DeclsInFile = Entry.getValue();
      std::sort(DeclsInFile.begin(), DeclsInFile.end(),
                [](Decl* LHS, Decl *RHS) {
                  assert(LHS->getSourceOrder().hasValue());
                  assert(RHS->getSourceOrder().hasValue());
                  return LHS->getSourceOrder().getValue() <
                         RHS->getSourceOrder().getValue();
                });

      for (auto D : DeclsInFile) {
        SwiftDecls.push_back(D);
      }
    }
  }

  // Create the missing import decls and add to the collector.
  for (auto *SM : NoImportSubModules) {
    ImportDecls.push_back(createImportDecl(M->getASTContext(), M, SM, {}));
  }

  auto &ClangSourceManager = Importer.getClangASTContext().getSourceManager();

  // Sort imported declarations in source order *within a submodule*.
  for (auto &P : ClangDecls) {
    std::sort(P.second.begin(), P.second.end(),
              [&](std::pair<Decl *, clang::SourceLocation> LHS,
                  std::pair<Decl *, clang::SourceLocation> RHS) -> bool {
                return ClangSourceManager.isBeforeInTranslationUnit(LHS.second,
                                                                    RHS.second);
              });
  }

  // Sort Swift declarations so that we print them in a consistent order.
  std::sort(ImportDecls.begin(), ImportDecls.end(),
            [](ImportDecl *LHS, ImportDecl *RHS) -> bool {
    auto LHSPath = LHS->getFullAccessPath();
    auto RHSPath = RHS->getFullAccessPath();
    for (unsigned i = 0, e = std::min(LHSPath.size(), RHSPath.size()); i != e;
         i++) {
      if (int Ret = LHSPath[i].first.str().compare(RHSPath[i].first.str()))
        return Ret < 0;
    }
    return false;
  });

  // If the group name is specified, we sort them according to their source order,
  // which is the order preserved by getTopLeveDecls.
  if (GroupNames.empty()) {
    std::sort(SwiftDecls.begin(), SwiftDecls.end(),
      [&](Decl *LHS, Decl *RHS) -> bool {
        auto *LHSValue = dyn_cast<ValueDecl>(LHS);
        auto *RHSValue = dyn_cast<ValueDecl>(RHS);

        if (LHSValue && RHSValue) {
          StringRef LHSName = LHSValue->getName().str();
          StringRef RHSName = RHSValue->getName().str();
          if (int Ret = LHSName.compare(RHSName))
            return Ret < 0;
          // FIXME: this is not sufficient to establish a total order for overloaded
          // decls.
          return LHS->getKind() < RHS->getKind();
        }

        return LHS->getKind() < RHS->getKind();
      });
  }

  ASTPrinter *PrinterToUse = &Printer;

  ClangCommentPrinter RegularCommentPrinter(Printer, Importer);
  if (Options.PrintRegularClangComments)
    PrinterToUse = &RegularCommentPrinter;

  auto PrintDecl = [&](Decl *D) -> bool {
    ASTPrinter &Printer = *PrinterToUse;
    if (!shouldPrint(D, AdjustedOptions)) {
      Printer.callAvoidPrintDeclPost(D);
      return false;
    }
    if (auto Ext = dyn_cast<ExtensionDecl>(D)) {
      // Clang extensions (categories) are always printed in source order.
      // Swift extensions are printed with their associated type unless it's
      // a cross-module extension.
      if (!Ext->hasClangNode()) {
        auto ExtendedNominal = Ext->getExtendedType()->getAnyNominal();
        if (Ext->getModuleContext() == ExtendedNominal->getModuleContext())
          return false;
      }
    }
    std::unique_ptr<SynthesizedExtensionAnalyzer> pAnalyzer;
    if (auto NTD = dyn_cast<NominalTypeDecl>(D)) {
      if (PrintSynthesizedExtensions) {
        pAnalyzer.reset(new SynthesizedExtensionAnalyzer(NTD, AdjustedOptions));
        AdjustedOptions.shouldCloseNominal = !pAnalyzer->hasMergeGroup(
          SynthesizedExtensionAnalyzer::MergeGroupKind::MergableWithTypeDef);
      }
    }
    if (D->print(Printer, AdjustedOptions)) {
      if (AdjustedOptions.shouldCloseNominal)
        Printer << "\n";
      AdjustedOptions.shouldCloseNominal = true;
      if (auto NTD = dyn_cast<NominalTypeDecl>(D)) {
        std::queue<NominalTypeDecl *> SubDecls{{NTD}};

        while (!SubDecls.empty()) {
          auto NTD = SubDecls.front();
          SubDecls.pop();

          // Add sub-types of NTD.
          for (auto Sub : NTD->getMembers())
            if (auto N = dyn_cast<NominalTypeDecl>(Sub))
              SubDecls.push(N);

          if (!PrintSynthesizedExtensions) {
            // Print Ext and add sub-types of Ext.
            for (auto Ext : NTD->getExtensions()) {
              if (!shouldPrint(Ext, AdjustedOptions)) {
                Printer.callAvoidPrintDeclPost(Ext);
                continue;
              }
              if (Ext->hasClangNode())
                continue; // will be printed in its source location, see above.
              Printer << "\n";
              Ext->print(Printer, AdjustedOptions);
              Printer << "\n";
              for (auto Sub : Ext->getMembers())
                if (auto N = dyn_cast<NominalTypeDecl>(Sub))
                  SubDecls.push(N);
            }
            continue;
          }

          bool IsTopLevelDecl = D == NTD;

          // If printed Decl is the top-level, merge the constraint-free extensions
          // into the main body.
          if (IsTopLevelDecl) {
          // Print the part that should be merged with the type decl.
          pAnalyzer->forEachExtensionMergeGroup(
            SynthesizedExtensionAnalyzer::MergeGroupKind::MergableWithTypeDef,
            [&](ArrayRef<ExtensionAndIsSynthesized> Decls){
              for (auto ET : Decls) {
                AdjustedOptions.shouldOpenExtension = false;
                AdjustedOptions.shouldCloseExtension =
                  Decls.back().first == ET.first;
                if (ET.second)
                  AdjustedOptions.
                    initArchetypeTransformerForSynthesizedExtensions(NTD,
                                                               pAnalyzer.get());
                ET.first->print(Printer, AdjustedOptions);
                if (ET.second)
                  AdjustedOptions.
                    clearArchetypeTransformerForSynthesizedExtensions();
                if (AdjustedOptions.shouldCloseExtension)
                  Printer << "\n";
              }
          });
          }

          // If the printed Decl is not the top-level one, reset analyzer.
          if (!IsTopLevelDecl)
            pAnalyzer.reset(new SynthesizedExtensionAnalyzer(NTD, AdjustedOptions));

          // Print the rest as synthesized extensions.
          pAnalyzer->forEachExtensionMergeGroup(
            // For top-level decls, only contraint extensions are to print;
            // Since the rest are merged into the main body.
            IsTopLevelDecl ?
              SynthesizedExtensionAnalyzer::MergeGroupKind::UnmergableWithTypeDef :
            // For sub-decls, all extensions should be printed.
              SynthesizedExtensionAnalyzer::MergeGroupKind::All,
            [&](ArrayRef<ExtensionAndIsSynthesized> Decls){
              for (auto ET : Decls) {
                AdjustedOptions.shouldOpenExtension =
                  Decls.front().first == ET.first;
                AdjustedOptions.shouldCloseExtension =
                  Decls.back().first == ET.first;
                if (AdjustedOptions.shouldOpenExtension)
                  Printer << "\n";
                if (ET.second)
                  AdjustedOptions.
                    initArchetypeTransformerForSynthesizedExtensions(NTD,
                                                               pAnalyzer.get());
                ET.first->print(Printer, AdjustedOptions);
                if (ET.second)
                  AdjustedOptions.
                    clearArchetypeTransformerForSynthesizedExtensions();
                if (AdjustedOptions.shouldCloseExtension)
                  Printer << "\n";
            }
          });
        }
      }
      return true;
    }
    return false;
  };

  // Imports from the stdlib are internal details that don't need to be exposed.
  if (!M->isStdlibModule()) {
    for (auto *D : ImportDecls)
      PrintDecl(D);
    Printer << "\n";
  }

  {
    using ModuleAndName = std::pair<const clang::Module *, std::string>;
    SmallVector<ModuleAndName, 8> ClangModules;
    for (auto P : ClangDecls) {
      ClangModules.push_back({ P.first, P.first->getFullModuleName() });
    }
    // Sort modules by name.
    std::sort(ClangModules.begin(), ClangModules.end(),
              [](const ModuleAndName &LHS, const ModuleAndName &RHS)
                -> bool {
                  return LHS.second < RHS.second;
              });

    for (auto CM : ClangModules) {
      for (auto DeclAndLoc : ClangDecls[CM.first])
        PrintDecl(DeclAndLoc.first);
    }
  }

  if (!(TraversalOptions & ModuleTraversal::SkipOverlay) ||
      !InterestingClangModule) {
    for (auto *D : SwiftDecls) {
      if (PrintDecl(D))
        Printer << "\n";
    }
  }
}
Ejemplo n.º 19
0
/// Iteratively perform simplification on a worklist of users
/// of the specified induction variable. Each successive simplification may push
/// more users which may themselves be candidates for simplification.
///
/// This algorithm does not require IVUsers analysis. Instead, it simplifies
/// instructions in-place during analysis. Rather than rewriting induction
/// variables bottom-up from their users, it transforms a chain of IVUsers
/// top-down, updating the IR only when it encounters a clear optimization
/// opportunity.
///
/// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
///
void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
  if (!SE->isSCEVable(CurrIV->getType()))
    return;

  // Instructions processed by SimplifyIndvar for CurrIV.
  SmallPtrSet<Instruction*,16> Simplified;

  // Use-def pairs if IV users waiting to be processed for CurrIV.
  SmallVector<std::pair<Instruction*, Instruction*>, 8> SimpleIVUsers;

  // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
  // called multiple times for the same LoopPhi. This is the proper thing to
  // do for loop header phis that use each other.
  pushIVUsers(CurrIV, L, Simplified, SimpleIVUsers);

  while (!SimpleIVUsers.empty()) {
    std::pair<Instruction*, Instruction*> UseOper =
      SimpleIVUsers.pop_back_val();
    Instruction *UseInst = UseOper.first;

    // Bypass back edges to avoid extra work.
    if (UseInst == CurrIV) continue;

    // Try to replace UseInst with a loop invariant before any other
    // simplifications.
    if (replaceIVUserWithLoopInvariant(UseInst))
      continue;

    Instruction *IVOperand = UseOper.second;
    for (unsigned N = 0; IVOperand; ++N) {
      assert(N <= Simplified.size() && "runaway iteration");

      Value *NewOper = foldIVUser(UseOper.first, IVOperand);
      if (!NewOper)
        break; // done folding
      IVOperand = dyn_cast<Instruction>(NewOper);
    }
    if (!IVOperand)
      continue;

    if (eliminateIVUser(UseOper.first, IVOperand)) {
      pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
      continue;
    }

    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseOper.first)) {
      if ((isa<OverflowingBinaryOperator>(BO) &&
           strengthenOverflowingOperation(BO, IVOperand)) ||
          (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand))) {
        // re-queue uses of the now modified binary operator and fall
        // through to the checks that remain.
        pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
      }
    }

    CastInst *Cast = dyn_cast<CastInst>(UseOper.first);
    if (V && Cast) {
      V->visitCast(Cast);
      continue;
    }
    if (isSimpleIVUser(UseOper.first, L, SE)) {
      pushIVUsers(UseOper.first, L, Simplified, SimpleIVUsers);
    }
  }
}
Ejemplo n.º 20
0
void llvm::calculateClrEHStateNumbers(const Function *Fn,
                                      WinEHFuncInfo &FuncInfo) {
  // Return if it's already been done.
  if (!FuncInfo.EHPadStateMap.empty())
    return;

  // This numbering assigns one state number to each catchpad and cleanuppad.
  // It also computes two tree-like relations over states:
  // 1) Each state has a "HandlerParentState", which is the state of the next
  //    outer handler enclosing this state's handler (same as nearest ancestor
  //    per the ParentPad linkage on EH pads, but skipping over catchswitches).
  // 2) Each state has a "TryParentState", which:
  //    a) for a catchpad that's not the last handler on its catchswitch, is
  //       the state of the next catchpad on that catchswitch
  //    b) for all other pads, is the state of the pad whose try region is the
  //       next outer try region enclosing this state's try region.  The "try
  //       regions are not present as such in the IR, but will be inferred
  //       based on the placement of invokes and pads which reach each other
  //       by exceptional exits
  // Catchswitches do not get their own states, but each gets mapped to the
  // state of its first catchpad.

  // Step one: walk down from outermost to innermost funclets, assigning each
  // catchpad and cleanuppad a state number.  Add an entry to the
  // ClrEHUnwindMap for each state, recording its HandlerParentState and
  // handler attributes.  Record the TryParentState as well for each catchpad
  // that's not the last on its catchswitch, but initialize all other entries'
  // TryParentStates to a sentinel -1 value that the next pass will update.

  // Seed a worklist with pads that have no parent.
  SmallVector<std::pair<const Instruction *, int>, 8> Worklist;
  for (const BasicBlock &BB : *Fn) {
    const Instruction *FirstNonPHI = BB.getFirstNonPHI();
    const Value *ParentPad;
    if (const auto *CPI = dyn_cast<CleanupPadInst>(FirstNonPHI))
      ParentPad = CPI->getParentPad();
    else if (const auto *CSI = dyn_cast<CatchSwitchInst>(FirstNonPHI))
      ParentPad = CSI->getParentPad();
    else
      continue;
    if (isa<ConstantTokenNone>(ParentPad))
      Worklist.emplace_back(FirstNonPHI, -1);
  }

  // Use the worklist to visit all pads, from outer to inner.  Record
  // HandlerParentState for all pads.  Record TryParentState only for catchpads
  // that aren't the last on their catchswitch (setting all other entries'
  // TryParentStates to an initial value of -1).  This loop is also responsible
  // for setting the EHPadStateMap entry for all catchpads, cleanuppads, and
  // catchswitches.
  while (!Worklist.empty()) {
    const Instruction *Pad;
    int HandlerParentState;
    std::tie(Pad, HandlerParentState) = Worklist.pop_back_val();

    if (const auto *Cleanup = dyn_cast<CleanupPadInst>(Pad)) {
      // Create the entry for this cleanup with the appropriate handler
      // properties.  Finally and fault handlers are distinguished by arity.
      ClrHandlerType HandlerType =
          (Cleanup->getNumArgOperands() ? ClrHandlerType::Fault
                                        : ClrHandlerType::Finally);
      int CleanupState = addClrEHHandler(FuncInfo, HandlerParentState, -1,
                                         HandlerType, 0, Pad->getParent());
      // Queue any child EH pads on the worklist.
      for (const User *U : Cleanup->users())
        if (const auto *I = dyn_cast<Instruction>(U))
          if (I->isEHPad())
            Worklist.emplace_back(I, CleanupState);
      // Remember this pad's state.
      FuncInfo.EHPadStateMap[Cleanup] = CleanupState;
    } else {
      // Walk the handlers of this catchswitch in reverse order since all but
      // the last need to set the following one as its TryParentState.
      const auto *CatchSwitch = cast<CatchSwitchInst>(Pad);
      int CatchState = -1, FollowerState = -1;
      SmallVector<const BasicBlock *, 4> CatchBlocks(CatchSwitch->handlers());
      for (auto CBI = CatchBlocks.rbegin(), CBE = CatchBlocks.rend();
           CBI != CBE; ++CBI, FollowerState = CatchState) {
        const BasicBlock *CatchBlock = *CBI;
        // Create the entry for this catch with the appropriate handler
        // properties.
        const auto *Catch = cast<CatchPadInst>(CatchBlock->getFirstNonPHI());
        uint32_t TypeToken = static_cast<uint32_t>(
            cast<ConstantInt>(Catch->getArgOperand(0))->getZExtValue());
        CatchState =
            addClrEHHandler(FuncInfo, HandlerParentState, FollowerState,
                            ClrHandlerType::Catch, TypeToken, CatchBlock);
        // Queue any child EH pads on the worklist.
        for (const User *U : Catch->users())
          if (const auto *I = dyn_cast<Instruction>(U))
            if (I->isEHPad())
              Worklist.emplace_back(I, CatchState);
        // Remember this catch's state.
        FuncInfo.EHPadStateMap[Catch] = CatchState;
      }
      // Associate the catchswitch with the state of its first catch.
      assert(CatchSwitch->getNumHandlers());
      FuncInfo.EHPadStateMap[CatchSwitch] = CatchState;
    }
  }

  // Step two: record the TryParentState of each state.  For cleanuppads that
  // don't have cleanuprets, we may need to infer this from their child pads,
  // so visit pads in descendant-most to ancestor-most order.
  for (auto Entry = FuncInfo.ClrEHUnwindMap.rbegin(),
            End = FuncInfo.ClrEHUnwindMap.rend();
       Entry != End; ++Entry) {
    const Instruction *Pad =
        Entry->Handler.get<const BasicBlock *>()->getFirstNonPHI();
    // For most pads, the TryParentState is the state associated with the
    // unwind dest of exceptional exits from it.
    const BasicBlock *UnwindDest;
    if (const auto *Catch = dyn_cast<CatchPadInst>(Pad)) {
      // If a catch is not the last in its catchswitch, its TryParentState is
      // the state associated with the next catch in the switch, even though
      // that's not the unwind dest of exceptions escaping the catch.  Those
      // cases were already assigned a TryParentState in the first pass, so
      // skip them.
      if (Entry->TryParentState != -1)
        continue;
      // Otherwise, get the unwind dest from the catchswitch.
      UnwindDest = Catch->getCatchSwitch()->getUnwindDest();
    } else {
      const auto *Cleanup = cast<CleanupPadInst>(Pad);
      UnwindDest = nullptr;
      for (const User *U : Cleanup->users()) {
        if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
          // Common and unambiguous case -- cleanupret indicates cleanup's
          // unwind dest.
          UnwindDest = CleanupRet->getUnwindDest();
          break;
        }

        // Get an unwind dest for the user
        const BasicBlock *UserUnwindDest = nullptr;
        if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
          UserUnwindDest = Invoke->getUnwindDest();
        } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(U)) {
          UserUnwindDest = CatchSwitch->getUnwindDest();
        } else if (auto *ChildCleanup = dyn_cast<CleanupPadInst>(U)) {
          int UserState = FuncInfo.EHPadStateMap[ChildCleanup];
          int UserUnwindState =
              FuncInfo.ClrEHUnwindMap[UserState].TryParentState;
          if (UserUnwindState != -1)
            UserUnwindDest = FuncInfo.ClrEHUnwindMap[UserUnwindState]
                                 .Handler.get<const BasicBlock *>();
        }

        // Not having an unwind dest for this user might indicate that it
        // doesn't unwind, so can't be taken as proof that the cleanup itself
        // may unwind to caller (see e.g. SimplifyUnreachable and
        // RemoveUnwindEdge).
        if (!UserUnwindDest)
          continue;

        // Now we have an unwind dest for the user, but we need to see if it
        // unwinds all the way out of the cleanup or if it stays within it.
        const Instruction *UserUnwindPad = UserUnwindDest->getFirstNonPHI();
        const Value *UserUnwindParent;
        if (auto *CSI = dyn_cast<CatchSwitchInst>(UserUnwindPad))
          UserUnwindParent = CSI->getParentPad();
        else
          UserUnwindParent =
              cast<CleanupPadInst>(UserUnwindPad)->getParentPad();

        // The unwind stays within the cleanup iff it targets a child of the
        // cleanup.
        if (UserUnwindParent == Cleanup)
          continue;

        // This unwind exits the cleanup, so its dest is the cleanup's dest.
        UnwindDest = UserUnwindDest;
        break;
      }
    }

    // Record the state of the unwind dest as the TryParentState.
    int UnwindDestState;

    // If UnwindDest is null at this point, either the pad in question can
    // be exited by unwind to caller, or it cannot be exited by unwind.  In
    // either case, reporting such cases as unwinding to caller is correct.
    // This can lead to EH tables that "look strange" -- if this pad's is in
    // a parent funclet which has other children that do unwind to an enclosing
    // pad, the try region for this pad will be missing the "duplicate" EH
    // clause entries that you'd expect to see covering the whole parent.  That
    // should be benign, since the unwind never actually happens.  If it were
    // an issue, we could add a subsequent pass that pushes unwind dests down
    // from parents that have them to children that appear to unwind to caller.
    if (!UnwindDest) {
      UnwindDestState = -1;
    } else {
      UnwindDestState = FuncInfo.EHPadStateMap[UnwindDest->getFirstNonPHI()];
    }

    Entry->TryParentState = UnwindDestState;
  }

  // Step three: transfer information from pads to invokes.
  calculateStateNumbersForInvokes(Fn, FuncInfo);
}
Ejemplo n.º 21
0
static unsigned scanFromBlock(const CFGBlock *Start,
                              llvm::BitVector &Reachable,
                              Preprocessor *PP,
                              bool IncludeSometimesUnreachableEdges) {
  unsigned count = 0;

  // Prep work queue
  SmallVector<const CFGBlock*, 32> WL;

  // The entry block may have already been marked reachable
  // by the caller.
  if (!Reachable[Start->getBlockID()]) {
    ++count;
    Reachable[Start->getBlockID()] = true;
  }

  WL.push_back(Start);

  // Find the reachable blocks from 'Start'.
  while (!WL.empty()) {
    const CFGBlock *item = WL.pop_back_val();

    // There are cases where we want to treat all successors as reachable.
    // The idea is that some "sometimes unreachable" code is not interesting,
    // and that we should forge ahead and explore those branches anyway.
    // This allows us to potentially uncover some "always unreachable" code
    // within the "sometimes unreachable" code.
    // Look at the successors and mark then reachable.
    Optional<bool> TreatAllSuccessorsAsReachable;
    if (!IncludeSometimesUnreachableEdges)
      TreatAllSuccessorsAsReachable = false;

    for (CFGBlock::const_succ_iterator I = item->succ_begin(),
         E = item->succ_end(); I != E; ++I) {
      const CFGBlock *B = *I;
      if (!B) do {
        const CFGBlock *UB = I->getPossiblyUnreachableBlock();
        if (!UB)
          break;

        if (!TreatAllSuccessorsAsReachable.hasValue()) {
          assert(PP);
          TreatAllSuccessorsAsReachable =
            shouldTreatSuccessorsAsReachable(item, *PP);
        }

        if (TreatAllSuccessorsAsReachable.getValue()) {
          B = UB;
          break;
        }
      }
      while (false);

      if (B) {
        unsigned blockID = B->getBlockID();
        if (!Reachable[blockID]) {
          Reachable.set(blockID);
          WL.push_back(B);
          ++count;
        }
      }
    }
  }
  return count;
}
Ejemplo n.º 22
0
void WinEHPrepare::cloneCommonBlocks(Function &F) {
  // We need to clone all blocks which belong to multiple funclets.  Values are
  // remapped throughout the funclet to propagate both the new instructions
  // *and* the new basic blocks themselves.
  for (auto &Funclets : FuncletBlocks) {
    BasicBlock *FuncletPadBB = Funclets.first;
    std::vector<BasicBlock *> &BlocksInFunclet = Funclets.second;
    Value *FuncletToken;
    if (FuncletPadBB == &F.getEntryBlock())
      FuncletToken = ConstantTokenNone::get(F.getContext());
    else
      FuncletToken = FuncletPadBB->getFirstNonPHI();

    std::vector<std::pair<BasicBlock *, BasicBlock *>> Orig2Clone;
    ValueToValueMapTy VMap;
    for (BasicBlock *BB : BlocksInFunclet) {
      ColorVector &ColorsForBB = BlockColors[BB];
      // We don't need to do anything if the block is monochromatic.
      size_t NumColorsForBB = ColorsForBB.size();
      if (NumColorsForBB == 1)
        continue;

      DEBUG_WITH_TYPE("winehprepare-coloring",
                      dbgs() << "  Cloning block \'" << BB->getName()
                              << "\' for funclet \'" << FuncletPadBB->getName()
                              << "\'.\n");

      // Create a new basic block and copy instructions into it!
      BasicBlock *CBB =
          CloneBasicBlock(BB, VMap, Twine(".for.", FuncletPadBB->getName()));
      // Insert the clone immediately after the original to ensure determinism
      // and to keep the same relative ordering of any funclet's blocks.
      CBB->insertInto(&F, BB->getNextNode());

      // Add basic block mapping.
      VMap[BB] = CBB;

      // Record delta operations that we need to perform to our color mappings.
      Orig2Clone.emplace_back(BB, CBB);
    }

    // If nothing was cloned, we're done cloning in this funclet.
    if (Orig2Clone.empty())
      continue;

    // Update our color mappings to reflect that one block has lost a color and
    // another has gained a color.
    for (auto &BBMapping : Orig2Clone) {
      BasicBlock *OldBlock = BBMapping.first;
      BasicBlock *NewBlock = BBMapping.second;

      BlocksInFunclet.push_back(NewBlock);
      ColorVector &NewColors = BlockColors[NewBlock];
      assert(NewColors.empty() && "A new block should only have one color!");
      NewColors.push_back(FuncletPadBB);

      DEBUG_WITH_TYPE("winehprepare-coloring",
                      dbgs() << "  Assigned color \'" << FuncletPadBB->getName()
                              << "\' to block \'" << NewBlock->getName()
                              << "\'.\n");

      BlocksInFunclet.erase(
          std::remove(BlocksInFunclet.begin(), BlocksInFunclet.end(), OldBlock),
          BlocksInFunclet.end());
      ColorVector &OldColors = BlockColors[OldBlock];
      OldColors.erase(
          std::remove(OldColors.begin(), OldColors.end(), FuncletPadBB),
          OldColors.end());

      DEBUG_WITH_TYPE("winehprepare-coloring",
                      dbgs() << "  Removed color \'" << FuncletPadBB->getName()
                              << "\' from block \'" << OldBlock->getName()
                              << "\'.\n");
    }

    // Loop over all of the instructions in this funclet, fixing up operand
    // references as we go.  This uses VMap to do all the hard work.
    for (BasicBlock *BB : BlocksInFunclet)
      // Loop over all instructions, fixing each one as we find it...
      for (Instruction &I : *BB)
        RemapInstruction(&I, VMap,
                         RF_IgnoreMissingLocals | RF_NoModuleLevelChanges);

    // Catchrets targeting cloned blocks need to be updated separately from
    // the loop above because they are not in the current funclet.
    SmallVector<CatchReturnInst *, 2> FixupCatchrets;
    for (auto &BBMapping : Orig2Clone) {
      BasicBlock *OldBlock = BBMapping.first;
      BasicBlock *NewBlock = BBMapping.second;

      FixupCatchrets.clear();
      for (BasicBlock *Pred : predecessors(OldBlock))
        if (auto *CatchRet = dyn_cast<CatchReturnInst>(Pred->getTerminator()))
          if (CatchRet->getCatchSwitchParentPad() == FuncletToken)
            FixupCatchrets.push_back(CatchRet);

      for (CatchReturnInst *CatchRet : FixupCatchrets)
        CatchRet->setSuccessor(NewBlock);
    }

    auto UpdatePHIOnClonedBlock = [&](PHINode *PN, bool IsForOldBlock) {
      unsigned NumPreds = PN->getNumIncomingValues();
      for (unsigned PredIdx = 0, PredEnd = NumPreds; PredIdx != PredEnd;
           ++PredIdx) {
        BasicBlock *IncomingBlock = PN->getIncomingBlock(PredIdx);
        bool EdgeTargetsFunclet;
        if (auto *CRI =
                dyn_cast<CatchReturnInst>(IncomingBlock->getTerminator())) {
          EdgeTargetsFunclet = (CRI->getCatchSwitchParentPad() == FuncletToken);
        } else {
          ColorVector &IncomingColors = BlockColors[IncomingBlock];
          assert(!IncomingColors.empty() && "Block not colored!");
          assert((IncomingColors.size() == 1 ||
                  llvm::all_of(IncomingColors,
                               [&](BasicBlock *Color) {
                                 return Color != FuncletPadBB;
                               })) &&
                 "Cloning should leave this funclet's blocks monochromatic");
          EdgeTargetsFunclet = (IncomingColors.front() == FuncletPadBB);
        }
        if (IsForOldBlock != EdgeTargetsFunclet)
          continue;
        PN->removeIncomingValue(IncomingBlock, /*DeletePHIIfEmpty=*/false);
        // Revisit the next entry.
        --PredIdx;
        --PredEnd;
      }
    };

    for (auto &BBMapping : Orig2Clone) {
      BasicBlock *OldBlock = BBMapping.first;
      BasicBlock *NewBlock = BBMapping.second;
      for (PHINode &OldPN : OldBlock->phis()) {
        UpdatePHIOnClonedBlock(&OldPN, /*IsForOldBlock=*/true);
      }
      for (PHINode &NewPN : NewBlock->phis()) {
        UpdatePHIOnClonedBlock(&NewPN, /*IsForOldBlock=*/false);
      }
    }

    // Check to see if SuccBB has PHI nodes. If so, we need to add entries to
    // the PHI nodes for NewBB now.
    for (auto &BBMapping : Orig2Clone) {
      BasicBlock *OldBlock = BBMapping.first;
      BasicBlock *NewBlock = BBMapping.second;
      for (BasicBlock *SuccBB : successors(NewBlock)) {
        for (PHINode &SuccPN : SuccBB->phis()) {
          // Ok, we have a PHI node.  Figure out what the incoming value was for
          // the OldBlock.
          int OldBlockIdx = SuccPN.getBasicBlockIndex(OldBlock);
          if (OldBlockIdx == -1)
            break;
          Value *IV = SuccPN.getIncomingValue(OldBlockIdx);

          // Remap the value if necessary.
          if (auto *Inst = dyn_cast<Instruction>(IV)) {
            ValueToValueMapTy::iterator I = VMap.find(Inst);
            if (I != VMap.end())
              IV = I->second;
          }

          SuccPN.addIncoming(IV, NewBlock);
        }
      }
    }

    for (ValueToValueMapTy::value_type VT : VMap) {
      // If there were values defined in BB that are used outside the funclet,
      // then we now have to update all uses of the value to use either the
      // original value, the cloned value, or some PHI derived value.  This can
      // require arbitrary PHI insertion, of which we are prepared to do, clean
      // these up now.
      SmallVector<Use *, 16> UsesToRename;

      auto *OldI = dyn_cast<Instruction>(const_cast<Value *>(VT.first));
      if (!OldI)
        continue;
      auto *NewI = cast<Instruction>(VT.second);
      // Scan all uses of this instruction to see if it is used outside of its
      // funclet, and if so, record them in UsesToRename.
      for (Use &U : OldI->uses()) {
        Instruction *UserI = cast<Instruction>(U.getUser());
        BasicBlock *UserBB = UserI->getParent();
        ColorVector &ColorsForUserBB = BlockColors[UserBB];
        assert(!ColorsForUserBB.empty());
        if (ColorsForUserBB.size() > 1 ||
            *ColorsForUserBB.begin() != FuncletPadBB)
          UsesToRename.push_back(&U);
      }

      // If there are no uses outside the block, we're done with this
      // instruction.
      if (UsesToRename.empty())
        continue;

      // We found a use of OldI outside of the funclet.  Rename all uses of OldI
      // that are outside its funclet to be uses of the appropriate PHI node
      // etc.
      SSAUpdater SSAUpdate;
      SSAUpdate.Initialize(OldI->getType(), OldI->getName());
      SSAUpdate.AddAvailableValue(OldI->getParent(), OldI);
      SSAUpdate.AddAvailableValue(NewI->getParent(), NewI);

      while (!UsesToRename.empty())
        SSAUpdate.RewriteUseAfterInsertions(*UsesToRename.pop_back_val());
    }
  }
}
Ejemplo n.º 23
0
RequirementCheckResult TypeChecker::checkGenericArguments(
    DeclContext *dc, SourceLoc loc, SourceLoc noteLoc, Type owner,
    GenericSignature *genericSig, TypeSubstitutionFn substitutions,
    LookupConformanceFn conformances,
    UnsatisfiedDependency *unsatisfiedDependency,
    ConformanceCheckOptions conformanceOptions,
    GenericRequirementsCheckListener *listener,
    SubstOptions options) {
  bool valid = true;

  struct RequirementSet {
    ArrayRef<Requirement> Requirements;
    SmallVector<ParentConditionalConformance, 4> Parents;
  };

  SmallVector<RequirementSet, 8> pendingReqs;
  pendingReqs.push_back({genericSig->getRequirements(), {}});

  while (!pendingReqs.empty()) {
    auto current = pendingReqs.pop_back_val();

    for (const auto &rawReq : current.Requirements) {
      auto req = rawReq;
      if (current.Parents.empty()) {
        auto substed = rawReq.subst(substitutions, conformances, options);
        if (!substed) {
          // Another requirement will fail later; just continue.
          valid = false;
          continue;
        }

        req = *substed;
      }

      auto kind = req.getKind();
      Type rawFirstType = rawReq.getFirstType();
      Type firstType = req.getFirstType();
      Type rawSecondType, secondType;
      if (kind != RequirementKind::Layout) {
        rawSecondType = rawReq.getSecondType();
        secondType = req.getSecondType();
      }

      bool requirementFailure = false;
      if (listener && !listener->shouldCheck(kind, firstType, secondType))
        continue;

      Diag<Type, Type, Type> diagnostic;
      Diag<Type, Type, StringRef> diagnosticNote;

      switch (kind) {
      case RequirementKind::Conformance: {
        // Protocol conformance requirements.
        auto proto = secondType->castTo<ProtocolType>();
        // FIXME: This should track whether this should result in a private
        // or non-private dependency.
        // FIXME: Do we really need "used" at this point?
        // FIXME: Poor location information. How much better can we do here?
        // FIXME: This call should support listener to be able to properly
        //        diagnose problems with conformances.
        auto result =
            conformsToProtocol(firstType, proto->getDecl(), dc,
                               conformanceOptions, loc, unsatisfiedDependency);

        // Unsatisfied dependency case.
        auto status = result.getStatus();
        switch (status) {
        case RequirementCheckResult::Failure:
          // A failure at the top level is diagnosed elsewhere.
          if (current.Parents.empty())
            return status;

          diagnostic = diag::type_does_not_conform_owner;
          diagnosticNote = diag::type_does_not_inherit_or_conform_requirement;
          requirementFailure = true;
          break;
        case RequirementCheckResult::UnsatisfiedDependency:
        case RequirementCheckResult::SubstitutionFailure:
          // pass it on up.
          return status;
        case RequirementCheckResult::Success: {
          auto conformance = result.getConformance();
          // Report the conformance.
          if (listener && valid && current.Parents.empty()) {
            listener->satisfiedConformance(rawFirstType, firstType,
                                           conformance);
          }

          auto conditionalReqs = conformance.getConditionalRequirements();
          if (!conditionalReqs.empty()) {
            auto history = current.Parents;
            history.push_back({firstType, proto});
            pendingReqs.push_back({conditionalReqs, std::move(history)});
          }
          continue;
        }
        }

        // Failure needs to emit a diagnostic.
        break;
      }

      case RequirementKind::Layout: {
        // TODO: Statically check if a the first type
        // conforms to the layout constraint, once we
        // support such static checks.
        continue;
      }

      case RequirementKind::Superclass:
        // Superclass requirements.
        if (!isSubclassOf(firstType, secondType, dc)) {
          diagnostic = diag::type_does_not_inherit;
          diagnosticNote = diag::type_does_not_inherit_or_conform_requirement;
          requirementFailure = true;
        }
        break;

      case RequirementKind::SameType:
        if (!firstType->isEqual(secondType)) {
          diagnostic = diag::types_not_equal;
          diagnosticNote = diag::types_not_equal_requirement;
          requirementFailure = true;
        }
        break;
      }

      if (!requirementFailure)
        continue;

      if (listener &&
          listener->diagnoseUnsatisfiedRequirement(rawReq, firstType,
                                                   secondType, current.Parents))
        return RequirementCheckResult::Failure;

      if (loc.isValid()) {
        // FIXME: Poor source-location information.
        diagnose(loc, diagnostic, owner, firstType, secondType);
        diagnose(noteLoc, diagnosticNote, rawFirstType, rawSecondType,
                 genericSig->gatherGenericParamBindingsText(
                     {rawFirstType, rawSecondType}, substitutions));

        ParentConditionalConformance::diagnoseConformanceStack(Diags, noteLoc,
                                                               current.Parents);
      }

      return RequirementCheckResult::Failure;
    }
  }

  if (valid)
    return RequirementCheckResult::Success;
  return RequirementCheckResult::SubstitutionFailure;
}
Ejemplo n.º 24
0
bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
  DEBUG(dbgs() << "********** Register Stackifying **********\n"
                  "********** Function: "
               << MF.getName() << '\n');

  bool Changed = false;
  MachineRegisterInfo &MRI = MF.getRegInfo();
  WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
  const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
  const auto *TRI = MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
  AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
  LiveIntervals &LIS = getAnalysis<LiveIntervals>();

  // Walk the instructions from the bottom up. Currently we don't look past
  // block boundaries, and the blocks aren't ordered so the block visitation
  // order isn't significant, but we may want to change this in the future.
  for (MachineBasicBlock &MBB : MF) {
    // Don't use a range-based for loop, because we modify the list as we're
    // iterating over it and the end iterator may change.
    for (auto MII = MBB.rbegin(); MII != MBB.rend(); ++MII) {
      MachineInstr *Insert = &*MII;
      // Don't nest anything inside a phi.
      if (Insert->getOpcode() == TargetOpcode::PHI)
        break;

      // Don't nest anything inside an inline asm, because we don't have
      // constraints for $push inputs.
      if (Insert->getOpcode() == TargetOpcode::INLINEASM)
        break;

      // Iterate through the inputs in reverse order, since we'll be pulling
      // operands off the stack in LIFO order.
      bool AnyStackified = false;
      for (MachineOperand &Op : reverse(Insert->uses())) {
        // We're only interested in explicit virtual register operands.
        if (!Op.isReg() || Op.isImplicit() || !Op.isUse())
          continue;

        unsigned Reg = Op.getReg();

        // Only consider registers with a single definition.
        // TODO: Eventually we may relax this, to stackify phi transfers.
        MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
        if (!Def)
          continue;

        // Don't nest an INLINE_ASM def into anything, because we don't have
        // constraints for $pop outputs.
        if (Def->getOpcode() == TargetOpcode::INLINEASM)
          continue;

        // Don't nest PHIs inside of anything.
        if (Def->getOpcode() == TargetOpcode::PHI)
          continue;

        // Argument instructions represent live-in registers and not real
        // instructions.
        if (Def->getOpcode() == WebAssembly::ARGUMENT_I32 ||
            Def->getOpcode() == WebAssembly::ARGUMENT_I64 ||
            Def->getOpcode() == WebAssembly::ARGUMENT_F32 ||
            Def->getOpcode() == WebAssembly::ARGUMENT_F64)
          continue;

        if (MRI.hasOneUse(Reg) && Def->getParent() == &MBB &&
            IsSafeToMove(Def, Insert, AA, LIS, MRI)) {
          // A single-use def in the same block with no intervening memory or
          // register dependencies; move the def down and nest it with the
          // current instruction.
          // TODO: Stackify multiple-use values, taking advantage of set_local
          // returning its result.
          Changed = true;
          AnyStackified = true;
          MBB.splice(Insert, &MBB, Def);
          LIS.handleMove(Def);
          MFI.stackifyVReg(Reg);
          ImposeStackOrdering(Def);
          Insert = Def;
        } else if (Def->isAsCheapAsAMove() &&
                   TII->isTriviallyReMaterializable(Def, &AA)) {
          // A trivially cloneable instruction; clone it and nest the new copy
          // with the current instruction.
          Changed = true;
          AnyStackified = true;
          unsigned OldReg = Def->getOperand(0).getReg();
          unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
          TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI);
          Op.setReg(NewReg);
          MachineInstr *Clone =
              &*std::prev(MachineBasicBlock::instr_iterator(Insert));
          LIS.InsertMachineInstrInMaps(Clone);
          LIS.createAndComputeVirtRegInterval(NewReg);
          MFI.stackifyVReg(NewReg);
          ImposeStackOrdering(Clone);
          Insert = Clone;

          // If that was the last use of the original, delete the original.
          // Otherwise shrink the LiveInterval.
          if (MRI.use_empty(OldReg)) {
            SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot();
            LIS.removePhysRegDefAt(WebAssembly::ARGUMENTS, Idx);
            LIS.removeVRegDefAt(LIS.getInterval(OldReg), Idx);
            LIS.removeInterval(OldReg);
            LIS.RemoveMachineInstrFromMaps(Def);
            Def->eraseFromParent();
          } else {
            LIS.shrinkToUses(&LIS.getInterval(OldReg));
          }
        }
      }
      if (AnyStackified)
        ImposeStackOrdering(&*MII);
    }
  }

  // If we used EXPR_STACK anywhere, add it to the live-in sets everywhere
  // so that it never looks like a use-before-def.
  if (Changed) {
    MF.getRegInfo().addLiveIn(WebAssembly::EXPR_STACK);
    for (MachineBasicBlock &MBB : MF)
      MBB.addLiveIn(WebAssembly::EXPR_STACK);
  }

#ifndef NDEBUG
  // Verify that pushes and pops are performed in LIFO order.
  SmallVector<unsigned, 0> Stack;
  for (MachineBasicBlock &MBB : MF) {
    for (MachineInstr &MI : MBB) {
      for (MachineOperand &MO : reverse(MI.explicit_operands())) {
        if (!MO.isReg())
          continue;
        unsigned VReg = MO.getReg();

        // Don't stackify physregs like SP or FP.
        if (!TargetRegisterInfo::isVirtualRegister(VReg))
          continue;

        if (MFI.isVRegStackified(VReg)) {
          if (MO.isDef())
            Stack.push_back(VReg);
          else
            assert(Stack.pop_back_val() == VReg);
        }
      }
    }
    // TODO: Generalize this code to support keeping values on the stack across
    // basic block boundaries.
    assert(Stack.empty());
  }
#endif

  return Changed;
}
Ejemplo n.º 25
0
void StackAllocationPromoter::promoteAllocationToPhi() {
  DEBUG(llvm::dbgs() << "*** Placing Phis for : " << *ASI);

  // A list of blocks that will require new Phi values.
  BlockSet PhiBlocks;

  // The "piggy-bank" data-structure that we use for processing the dom-tree
  // bottom-up.
  NodePriorityQueue PQ;

  // Collect all of the stores into the AllocStack. We know that at this point
  // we have at most one store per block.
  for (auto UI = ASI->use_begin(), E = ASI->use_end(); UI != E; ++UI) {
    SILInstruction *II = UI->getUser();
    // We need to place Phis for this block.
    if (isa<StoreInst>(II)) {
      // If the block is in the dom tree (dominated by the entry block).
      if (DomTreeNode *Node = DT->getNode(II->getParent()))
        PQ.push(std::make_pair(Node, DomTreeLevels[Node]));
    }
  }

  DEBUG(llvm::dbgs() << "*** Found: " << PQ.size() << " Defs\n");

  // A list of nodes for which we already calculated the dominator frontier.
  llvm::SmallPtrSet<DomTreeNode *, 32> Visited;

  SmallVector<DomTreeNode *, 32> Worklist;

  // Scan all of the definitions in the function bottom-up using the priority
  // queue.
  while (!PQ.empty()) {
    DomTreeNodePair RootPair = PQ.top();
    PQ.pop();
    DomTreeNode *Root = RootPair.first;
    unsigned RootLevel = RootPair.second;

    // Walk all dom tree children of Root, inspecting their successors. Only
    // J-edges, whose target level is at most Root's level are added to the
    // dominance frontier.
    Worklist.clear();
    Worklist.push_back(Root);

    while (!Worklist.empty()) {
      DomTreeNode *Node = Worklist.pop_back_val();
      SILBasicBlock *BB = Node->getBlock();

      // For all successors of the node:
      for (auto &Succ : BB->getSuccessors()) {
        DomTreeNode *SuccNode = DT->getNode(Succ);

        // Skip D-edges (edges that are dom-tree edges).
        if (SuccNode->getIDom() == Node)
          continue;

        // Ignore J-edges that point to nodes that are not smaller or equal
        // to the root level.
        unsigned SuccLevel = DomTreeLevels[SuccNode];
        if (SuccLevel > RootLevel)
          continue;

        // Ignore visited nodes.
        if (!Visited.insert(SuccNode).second)
          continue;

        // If the new PHInode is not dominated by the allocation then it's dead.
        if (!DT->dominates(ASI->getParent(), SuccNode->getBlock()))
            continue;

        // If the new PHInode is properly dominated by the deallocation then it
        // is obviously a dead PHInode, so we don't need to insert it.
        if (DSI && DT->properlyDominates(DSI->getParent(),
                                         SuccNode->getBlock()))
          continue;

        // The successor node is a new PHINode. If this is a new PHI node
        // then it may require additional definitions, so add it to the PQ.
        if (PhiBlocks.insert(Succ).second)
          PQ.push(std::make_pair(SuccNode, SuccLevel));
      }

      // Add the children in the dom-tree to the worklist.
      for (auto CI = Node->begin(), CE = Node->end(); CI != CE; ++CI)
        if (!Visited.count(*CI))
          Worklist.push_back(*CI);
    }
  }

  DEBUG(llvm::dbgs() << "*** Found: " << PhiBlocks.size() << " new PHIs\n");
  NumPhiPlaced += PhiBlocks.size();

  // At this point we calculated the locations of all of the new Phi values.
  // Next, add the Phi values and promote all of the loads and stores into the
  // new locations.

  // Replace the dummy values with new block arguments.
  addBlockArguments(PhiBlocks);

  // Hook up the Phi nodes, loads, and debug_value_addr with incoming values.
  fixBranchesAndUses(PhiBlocks);

  DEBUG(llvm::dbgs() << "*** Finished placing Phis ***\n");
}
Ejemplo n.º 26
0
void PathDiagnosticConsumer::HandlePathDiagnostic(
    std::unique_ptr<PathDiagnostic> D) {
  if (!D || D->path.empty())
    return;

  // We need to flatten the locations (convert Stmt* to locations) because
  // the referenced statements may be freed by the time the diagnostics
  // are emitted.
  D->flattenLocations();

  // If the PathDiagnosticConsumer does not support diagnostics that
  // cross file boundaries, prune out such diagnostics now.
  if (!supportsCrossFileDiagnostics()) {
    // Verify that the entire path is from the same FileID.
    FileID FID;
    const SourceManager &SMgr = D->path.front()->getLocation().getManager();
    SmallVector<const PathPieces *, 5> WorkList;
    WorkList.push_back(&D->path);
    SmallString<128> buf;
    llvm::raw_svector_ostream warning(buf);
    warning << "warning: Path diagnostic report is not generated. Current "
            << "output format does not support diagnostics that cross file "
            << "boundaries. Refer to --analyzer-output for valid output "
            << "formats\n";

    while (!WorkList.empty()) {
      const PathPieces &path = *WorkList.pop_back_val();

      for (const auto &I : path) {
        const PathDiagnosticPiece *piece = I.get();
        FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();

        if (FID.isInvalid()) {
          FID = SMgr.getFileID(L);
        } else if (SMgr.getFileID(L) != FID) {
          llvm::errs() << warning.str();
          return;
        }

        // Check the source ranges.
        ArrayRef<SourceRange> Ranges = piece->getRanges();
        for (const auto &I : Ranges) {
          SourceLocation L = SMgr.getExpansionLoc(I.getBegin());
          if (!L.isFileID() || SMgr.getFileID(L) != FID) {
            llvm::errs() << warning.str();
            return;
          }
          L = SMgr.getExpansionLoc(I.getEnd());
          if (!L.isFileID() || SMgr.getFileID(L) != FID) {
            llvm::errs() << warning.str();
            return;
          }
        }

        if (const auto *call = dyn_cast<PathDiagnosticCallPiece>(piece))
          WorkList.push_back(&call->path);
        else if (const auto *macro = dyn_cast<PathDiagnosticMacroPiece>(piece))
          WorkList.push_back(&macro->subPieces);
      }
    }

    if (FID.isInvalid())
      return; // FIXME: Emit a warning?
  }

  // Profile the node to see if we already have something matching it
  llvm::FoldingSetNodeID profile;
  D->Profile(profile);
  void *InsertPos = nullptr;

  if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
    // Keep the PathDiagnostic with the shorter path.
    // Note, the enclosing routine is called in deterministic order, so the
    // results will be consistent between runs (no reason to break ties if the
    // size is the same).
    const unsigned orig_size = orig->full_size();
    const unsigned new_size = D->full_size();
    if (orig_size <= new_size)
      return;

    assert(orig != D.get());
    Diags.RemoveNode(orig);
    delete orig;
  }

  Diags.InsertNode(D.release());
}
Ejemplo n.º 27
0
void ScheduleDAGSDNodes::BuildSchedUnits() {
  // During scheduling, the NodeId field of SDNode is used to map SDNodes
  // to their associated SUnits by holding SUnits table indices. A value
  // of -1 means the SDNode does not yet have an associated SUnit.
  unsigned NumNodes = 0;
  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
       E = DAG->allnodes_end(); NI != E; ++NI) {
    NI->setNodeId(-1);
    ++NumNodes;
  }

  // Reserve entries in the vector for each of the SUnits we are creating.  This
  // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
  // invalidated.
  // FIXME: Multiply by 2 because we may clone nodes during scheduling.
  // This is a temporary workaround.
  SUnits.reserve(NumNodes * 2);

  // Add all nodes in depth first order.
  SmallVector<SDNode*, 64> Worklist;
  SmallPtrSet<SDNode*, 64> Visited;
  Worklist.push_back(DAG->getRoot().getNode());
  Visited.insert(DAG->getRoot().getNode());

  SmallVector<SUnit*, 8> CallSUnits;
  while (!Worklist.empty()) {
    SDNode *NI = Worklist.pop_back_val();

    // Add all operands to the worklist unless they've already been added.
    for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
      if (Visited.insert(NI->getOperand(i).getNode()))
        Worklist.push_back(NI->getOperand(i).getNode());

    if (isPassiveNode(NI))  // Leaf node, e.g. a TargetImmediate.
      continue;

    // If this node has already been processed, stop now.
    if (NI->getNodeId() != -1) continue;

    SUnit *NodeSUnit = newSUnit(NI);

    // See if anything is glued to this node, if so, add them to glued
    // nodes.  Nodes can have at most one glue input and one glue output.  Glue
    // is required to be the last operand and result of a node.

    // Scan up to find glued preds.
    SDNode *N = NI;
    while (N->getNumOperands() &&
           N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
      N = N->getOperand(N->getNumOperands()-1).getNode();
      assert(N->getNodeId() == -1 && "Node already inserted!");
      N->setNodeId(NodeSUnit->NodeNum);
      if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
        NodeSUnit->isCall = true;
    }

    // Scan down to find any glued succs.
    N = NI;
    while (N->getValueType(N->getNumValues()-1) == MVT::Glue) {
      SDValue GlueVal(N, N->getNumValues()-1);

      // There are either zero or one users of the Glue result.
      bool HasGlueUse = false;
      for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
           UI != E; ++UI)
        if (GlueVal.isOperandOf(*UI)) {
          HasGlueUse = true;
          assert(N->getNodeId() == -1 && "Node already inserted!");
          N->setNodeId(NodeSUnit->NodeNum);
          N = *UI;
          if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
            NodeSUnit->isCall = true;
          break;
        }
      if (!HasGlueUse) break;
    }

    if (NodeSUnit->isCall)
      CallSUnits.push_back(NodeSUnit);

    // Schedule zero-latency TokenFactor below any nodes that may increase the
    // schedule height. Otherwise, ancestors of the TokenFactor may appear to
    // have false stalls.
    if (NI->getOpcode() == ISD::TokenFactor)
      NodeSUnit->isScheduleLow = true;

    // If there are glue operands involved, N is now the bottom-most node
    // of the sequence of nodes that are glued together.
    // Update the SUnit.
    NodeSUnit->setNode(N);
    assert(N->getNodeId() == -1 && "Node already inserted!");
    N->setNodeId(NodeSUnit->NodeNum);

    // Compute NumRegDefsLeft. This must be done before AddSchedEdges.
    InitNumRegDefsLeft(NodeSUnit);

    // Assign the Latency field of NodeSUnit using target-provided information.
    computeLatency(NodeSUnit);
  }

  // Find all call operands.
  while (!CallSUnits.empty()) {
    SUnit *SU = CallSUnits.pop_back_val();
    for (const SDNode *SUNode = SU->getNode(); SUNode;
         SUNode = SUNode->getGluedNode()) {
      if (SUNode->getOpcode() != ISD::CopyToReg)
        continue;
      SDNode *SrcN = SUNode->getOperand(2).getNode();
      if (isPassiveNode(SrcN)) continue;   // Not scheduled.
      SUnit *SrcSU = &SUnits[SrcN->getNodeId()];
      SrcSU->isCallOp = true;
    }
  }
}
Ejemplo n.º 28
0
/// traceSiblingValue - Trace a value that is about to be spilled back to the
/// real defining instructions by looking through sibling copies. Always stay
/// within the range of OrigVNI so the registers are known to carry the same
/// value.
///
/// Determine if the value is defined by all reloads, so spilling isn't
/// necessary - the value is already in the stack slot.
///
/// Return a defining instruction that may be a candidate for rematerialization.
///
MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
                                               VNInfo *OrigVNI) {
  // Check if a cached value already exists.
  SibValueMap::iterator SVI;
  bool Inserted;
  tie(SVI, Inserted) =
    SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI)));
  if (!Inserted) {
    DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':'
                 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second);
    return SVI->second.DefMI;
  }

  DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'
               << UseVNI->id << '@' << UseVNI->def << '\n');

  // List of (Reg, VNI) that have been inserted into SibValues, but need to be
  // processed.
  SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;
  WorkList.push_back(std::make_pair(UseReg, UseVNI));

  do {
    unsigned Reg;
    VNInfo *VNI;
    tie(Reg, VNI) = WorkList.pop_back_val();
    DEBUG(dbgs() << "  " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def
                 << ":\t");

    // First check if this value has already been computed.
    SVI = SibValues.find(VNI);
    assert(SVI != SibValues.end() && "Missing SibValues entry");

    // Trace through PHI-defs created by live range splitting.
    if (VNI->isPHIDef()) {
      // Stop at original PHIs.  We don't know the value at the predecessors.
      if (VNI->def == OrigVNI->def) {
        DEBUG(dbgs() << "orig phi value\n");
        SVI->second.DefByOrigPHI = true;
        SVI->second.AllDefsAreReloads = false;
        propagateSiblingValue(SVI);
        continue;
      }

      // This is a PHI inserted by live range splitting.  We could trace the
      // live-out value from predecessor blocks, but that search can be very
      // expensive if there are many predecessors and many more PHIs as
      // generated by tail-dup when it sees an indirectbr.  Instead, look at
      // all the non-PHI defs that have the same value as OrigVNI.  They must
      // jointly dominate VNI->def.  This is not optimal since VNI may actually
      // be jointly dominated by a smaller subset of defs, so there is a change
      // we will miss a AllDefsAreReloads optimization.

      // Separate all values dominated by OrigVNI into PHIs and non-PHIs.
      SmallVector<VNInfo*, 8> PHIs, NonPHIs;
      LiveInterval &LI = LIS.getInterval(Reg);
      LiveInterval &OrigLI = LIS.getInterval(Original);

      for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end();
           VI != VE; ++VI) {
        VNInfo *VNI2 = *VI;
        if (VNI2->isUnused())
          continue;
        if (!OrigLI.containsOneValue() &&
            OrigLI.getVNInfoAt(VNI2->def) != OrigVNI)
          continue;
        if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def)
          PHIs.push_back(VNI2);
        else
          NonPHIs.push_back(VNI2);
      }
      DEBUG(dbgs() << "split phi value, checking " << PHIs.size()
                   << " phi-defs, and " << NonPHIs.size()
                   << " non-phi/orig defs\n");

      // Create entries for all the PHIs.  Don't add them to the worklist, we
      // are processing all of them in one go here.
      for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
        SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i])));

      // Add every PHI as a dependent of all the non-PHIs.
      for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) {
        VNInfo *NonPHI = NonPHIs[i];
        // Known value? Try an insertion.
        tie(SVI, Inserted) =
          SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI)));
        // Add all the PHIs as dependents of NonPHI.
        for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi)
          SVI->second.Deps.push_back(PHIs[pi]);
        // This is the first time we see NonPHI, add it to the worklist.
        if (Inserted)
          WorkList.push_back(std::make_pair(Reg, NonPHI));
        else
          // Propagate to all inserted PHIs, not just VNI.
          propagateSiblingValue(SVI);
      }

      // Next work list item.
      continue;
    }

    MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
    assert(MI && "Missing def");

    // Trace through sibling copies.
    if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
      if (isSibling(SrcReg)) {
        LiveInterval &SrcLI = LIS.getInterval(SrcReg);
        LiveRangeQuery SrcQ(SrcLI, VNI->def);
        assert(SrcQ.valueIn() && "Copy from non-existing value");
        // Check if this COPY kills its source.
        SVI->second.KillsSource = SrcQ.isKill();
        VNInfo *SrcVNI = SrcQ.valueIn();
        DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':'
                     << SrcVNI->id << '@' << SrcVNI->def
                     << " kill=" << unsigned(SVI->second.KillsSource) << '\n');
        // Known sibling source value? Try an insertion.
        tie(SVI, Inserted) = SibValues.insert(std::make_pair(SrcVNI,
                                                 SibValueInfo(SrcReg, SrcVNI)));
        // This is the first time we see Src, add it to the worklist.
        if (Inserted)
          WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
        propagateSiblingValue(SVI, VNI);
        // Next work list item.
        continue;
      }
    }

    // Track reachable reloads.
    SVI->second.DefMI = MI;
    SVI->second.SpillMBB = MI->getParent();
    int FI;
    if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {
      DEBUG(dbgs() << "reload\n");
      propagateSiblingValue(SVI);
      // Next work list item.
      continue;
    }

    // Potential remat candidate.
    DEBUG(dbgs() << "def " << *MI);
    SVI->second.AllDefsAreReloads = false;
    propagateSiblingValue(SVI);
  } while (!WorkList.empty());

  // Look up the value we were looking for.  We already did this lookup at the
  // top of the function, but SibValues may have been invalidated.
  SVI = SibValues.find(UseVNI);
  assert(SVI != SibValues.end() && "Didn't compute requested info");
  DEBUG(dbgs() << "  traced to:\t" << SVI->second);
  return SVI->second.DefMI;
}
Ejemplo n.º 29
0
bool SILValueOwnershipChecker::gatherUsers(
    SmallVectorImpl<BranchPropagatedUser> &lifetimeEndingUsers,
    SmallVectorImpl<BranchPropagatedUser> &nonLifetimeEndingUsers,
    SmallVectorImpl<BranchPropagatedUser> &implicitRegularUsers) {

  // See if Value is guaranteed. If we are guaranteed and not forwarding, then
  // we need to look through subobject uses for more uses. Otherwise, if we are
  // forwarding, we do not create any lifetime ending users/non lifetime ending
  // users since we verify against our base.
  auto ownershipKind = value.getOwnershipKind();
  bool isGuaranteed = ownershipKind == ValueOwnershipKind::Guaranteed;
  bool isOwned = ownershipKind == ValueOwnershipKind::Owned;

  if (isGuaranteed && isGuaranteedForwardingValue(value))
    return true;

  // Then gather up our initial list of users.
  SmallVector<Operand *, 8> users;
  std::copy(value->use_begin(), value->use_end(), std::back_inserter(users));

  auto addCondBranchToList = [](SmallVectorImpl<BranchPropagatedUser> &list,
                                CondBranchInst *cbi, unsigned operandIndex) {
    if (cbi->isConditionOperandIndex(operandIndex)) {
      list.emplace_back(cbi);
      return;
    }

    bool isTrueOperand = cbi->isTrueOperandIndex(operandIndex);
    list.emplace_back(cbi, isTrueOperand ? CondBranchInst::TrueIdx
                                         : CondBranchInst::FalseIdx);
  };

  bool foundError = false;
  while (!users.empty()) {
    Operand *op = users.pop_back_val();
    SILInstruction *user = op->getUser();

    // If this op is a type dependent operand, skip it. It is not interesting
    // from an ownership perspective.
    if (user->isTypeDependentOperand(*op))
      continue;

    bool isGuaranteedSubValue = false;
    if (isGuaranteed && isGuaranteedForwardingInst(op->getUser())) {
      isGuaranteedSubValue = true;
    }

    auto opOwnershipKindMap = op->getOwnershipKindMap(isGuaranteedSubValue);
    // If our ownership kind doesn't match, track that we found an error, emit
    // an error message optionally and then continue.
    if (!opOwnershipKindMap.canAcceptKind(ownershipKind)) {
      foundError = true;

      // If we did not support /any/ ownership kind, it means that we found a
      // conflicting answer so the kind map that was returned is the empty
      // map. Put out a more specific error here.
      if (!opOwnershipKindMap.data.any()) {
        handleError([&]() {
          llvm::errs() << "Function: '" << user->getFunction()->getName()
                       << "'\n"
                       << "Ill-formed SIL! Unable to compute ownership kind "
                          "map for user?!\n"
                       << "For terminator users, check that successors have "
                          "compatible ownership kinds.\n"
                       << "Value: " << op->get() << "User: "******"Operand Number: " << op->getOperandNumber() << '\n'
                       << "Conv: " << ownershipKind << "\n\n";
        });
        continue;
      }

      handleError([&]() {
        llvm::errs() << "Function: '" << user->getFunction()->getName() << "'\n"
                     << "Have operand with incompatible ownership?!\n"
                     << "Value: " << op->get() << "User: "******"Operand Number: " << op->getOperandNumber() << '\n'
                     << "Conv: " << ownershipKind << '\n'
                     << "OwnershipMap:\n"
                     << opOwnershipKindMap << '\n';
      });
      continue;
    }

    auto lifetimeConstraint =
        opOwnershipKindMap.getLifetimeConstraint(ownershipKind);
    if (lifetimeConstraint == UseLifetimeConstraint::MustBeInvalidated) {
      LLVM_DEBUG(llvm::dbgs() << "        Lifetime Ending User: "******"        Regular User: "******"Our value is guaranteed and this is a forwarding instruction. "
               "Should have guaranteed ownership as well.");
        copy(result->getUses(), std::back_inserter(users));
      }

      continue;
    }

    assert(user->getResults().empty());

    auto *ti = dyn_cast<TermInst>(user);
    if (!ti) {
      continue;
    }

    // Otherwise if we have a terminator, add any as uses any end_borrow to
    // ensure that the subscope is completely enclsed within the super scope. We
    // require all of our arguments to be either trivial or guaranteed.
    for (auto &succ : ti->getSuccessors()) {
      auto *succBlock = succ.getBB();

      // If we do not have any arguments, then continue.
      if (succBlock->args_empty())
        continue;

      // Otherwise, make sure that all arguments are trivial or guaranteed. If
      // we fail, emit an error.
      //
      // TODO: We could ignore this error and emit a more specific error on the
      // actual terminator.
      for (auto *succArg : succBlock->getPhiArguments()) {
        // *NOTE* We do not emit an error here since we want to allow for more
        // specific errors to be found during use_verification.
        //
        // TODO: Add a flag that associates the terminator instruction with
        // needing to be verified. If it isn't verified appropriately, assert
        // when the verifier is destroyed.
        auto succArgOwnershipKind = succArg->getOwnershipKind();
        if (!succArgOwnershipKind.isTrivialOrCompatibleWith(ownershipKind)) {
          // This is where the error would go.
          continue;
        }

        // If we have a trivial value, just continue.
        if (succArgOwnershipKind == ValueOwnershipKind::Trivial)
          continue;

        // Otherwise add all end_borrow users for this BBArg to the
        // implicit regular user list. We know that BBArg must be
        // completely joint post-dominated by these users, so we use
        // them to ensure that all of BBArg's uses are completely
        // enclosed within the end_borrow of this argument.
        for (auto *op : succArg->getUses()) {
          if (auto *ebi = dyn_cast<EndBorrowInst>(op->getUser())) {
            implicitRegularUsers.push_back(ebi);
          }
        }
      }
    }
  }

  // Return true if we did not have an error and false if we did find an error.
  //
  // The reason why we use this extra variable is to make sure that when we are
  // testing, we print out all mismatching pairs rather than just the first.
  return !foundError;
}
void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
  SmallVector<WeakVH, 16> DeadInsts;

  // Rewrite all induction variable expressions in terms of the canonical
  // induction variable.
  //
  // If there were induction variables of other sizes or offsets, manually
  // add the offsets to the primary induction variable and cast, avoiding
  // the need for the code evaluation methods to insert induction variables
  // of different sizes.
  for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) {
    const SCEV *Stride = UI->getStride();
    Value *Op = UI->getOperandValToReplace();
    const Type *UseTy = Op->getType();
    Instruction *User = UI->getUser();

    // Compute the final addrec to expand into code.
    const SCEV *AR = IU->getReplacementExpr(*UI);

    // Evaluate the expression out of the loop, if possible.
    if (!L->contains(UI->getUser())) {
      const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop());
      if (ExitVal->isLoopInvariant(L))
        AR = ExitVal;
    }

    // FIXME: It is an extremely bad idea to indvar substitute anything more
    // complex than affine induction variables.  Doing so will put expensive
    // polynomial evaluations inside of the loop, and the str reduction pass
    // currently can only reduce affine polynomials.  For now just disable
    // indvar subst on anything more complex than an affine addrec, unless
    // it can be expanded to a trivial value.
    if (!AR->isLoopInvariant(L) && !Stride->isLoopInvariant(L))
      continue;

    // Determine the insertion point for this user. By default, insert
    // immediately before the user. The SCEVExpander class will automatically
    // hoist loop invariants out of the loop. For PHI nodes, there may be
    // multiple uses, so compute the nearest common dominator for the
    // incoming blocks.
    Instruction *InsertPt = User;
    if (PHINode *PHI = dyn_cast<PHINode>(InsertPt))
      for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
        if (PHI->getIncomingValue(i) == Op) {
          if (InsertPt == User)
            InsertPt = PHI->getIncomingBlock(i)->getTerminator();
          else
            InsertPt =
              DT->findNearestCommonDominator(InsertPt->getParent(),
                                             PHI->getIncomingBlock(i))
                    ->getTerminator();
        }

    // Now expand it into actual Instructions and patch it into place.
    Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt);

    // Patch the new value into place.
    if (Op->hasName())
      NewVal->takeName(Op);
    User->replaceUsesOfWith(Op, NewVal);
    UI->setOperandValToReplace(NewVal);
    DEBUG(dbgs() << "INDVARS: Rewrote IV '" << *AR << "' " << *Op << '\n'
                 << "   into = " << *NewVal << "\n");
    ++NumRemoved;
    Changed = true;

    // The old value may be dead now.
    DeadInsts.push_back(Op);
  }

  // Clear the rewriter cache, because values that are in the rewriter's cache
  // can be deleted in the loop below, causing the AssertingVH in the cache to
  // trigger.
  Rewriter.clear();
  // Now that we're done iterating through lists, clean up any instructions
  // which are now dead.
  while (!DeadInsts.empty())
    if (Instruction *Inst =
          dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
      RecursivelyDeleteTriviallyDeadInstructions(Inst);
}