Exemple #1
0
AbstractFunctionDecl *SILDeclRef::getOverriddenWitnessTableEntry(
                                                 AbstractFunctionDecl *func) {
  if (!isa<ProtocolDecl>(func->getDeclContext()))
    return func;

  AbstractFunctionDecl *bestOverridden = nullptr;

  SmallVector<AbstractFunctionDecl *, 4> stack;
  SmallPtrSet<AbstractFunctionDecl *, 4> visited;
  stack.push_back(func);
  visited.insert(func);

  while (!stack.empty()) {
    auto current = stack.back();
    stack.pop_back();

    auto overriddenDecls = current->getOverriddenDecls();
    if (overriddenDecls.empty()) {
      // This entry introduced a witness table entry. Determine whether it is
      // better than the best entry we've seen thus far.
      if (!bestOverridden ||
          ProtocolDecl::compare(
                        cast<ProtocolDecl>(current->getDeclContext()),
                        cast<ProtocolDecl>(bestOverridden->getDeclContext()))
            < 0) {
        bestOverridden = cast<AbstractFunctionDecl>(current);
      }

      continue;
    }

    // Add overridden declarations to the stack.
    for (auto overridden : overriddenDecls) {
      auto overriddenFunc = cast<AbstractFunctionDecl>(overridden);
      if (visited.insert(overriddenFunc).second)
        stack.push_back(overriddenFunc);
    }
  }

  return bestOverridden;
}
Exemple #2
0
bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
                                void *OpaqueData,
                                bool AllowShortCircuit) const {
  SmallVector<const CXXRecordDecl*, 8> Queue;

  const CXXRecordDecl *Record = this;
  bool AllMatches = true;
  while (true) {
    for (CXXRecordDecl::base_class_const_iterator
           I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
      const RecordType *Ty = I->getType()->getAs<RecordType>();
      if (!Ty) {
        if (AllowShortCircuit) return false;
        AllMatches = false;
        continue;
      }

      CXXRecordDecl *Base = 
            cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
      if (!Base) {
        if (AllowShortCircuit) return false;
        AllMatches = false;
        continue;
      }
      
      Queue.push_back(Base);
      if (!BaseMatches(Base, OpaqueData)) {
        if (AllowShortCircuit) return false;
        AllMatches = false;
        continue;
      }
    }

    if (Queue.empty()) break;
    Record = Queue.back(); // not actually a queue.
    Queue.pop_back();
  }

  return AllMatches;
}
void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context, 
                                                NestedNameSpecifier *Qualifier, 
                                                SourceRange R) {
  Representation = Qualifier;
  
  // Construct bogus (but well-formed) source information for the 
  // nested-name-specifier.
  BufferSize = 0;
  SmallVector<NestedNameSpecifier *, 4> Stack;
  for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
    Stack.push_back(NNS);
  while (!Stack.empty()) {
    NestedNameSpecifier *NNS = Stack.back();
    Stack.pop_back();
    switch (NNS->getKind()) {
      case NestedNameSpecifier::Identifier:
      case NestedNameSpecifier::Namespace:
      case NestedNameSpecifier::NamespaceAlias:
        SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
        break;
        
      case NestedNameSpecifier::TypeSpec:
      case NestedNameSpecifier::TypeSpecWithTemplate: {
        TypeSourceInfo *TSInfo
        = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
                                           R.getBegin());
        SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize, 
                    BufferCapacity);
        break;
      }
        
      case NestedNameSpecifier::Global:
        break;
    }
    
    // Save the location of the '::'.
    SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(), 
                       Buffer, BufferSize, BufferCapacity);
  }
}
/// constructScopeNest
void LexicalScopes::constructScopeNest(LexicalScope *Scope) {
  assert(Scope && "Unable to calculate scope dominance graph!");
  SmallVector<LexicalScope *, 4> WorkStack;
  WorkStack.push_back(Scope);
  unsigned Counter = 0;
  while (!WorkStack.empty()) {
    LexicalScope *WS = WorkStack.back();
    const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren();
    bool visitedChildren = false;
    for (auto &ChildScope : Children)
      if (!ChildScope->getDFSOut()) {
        WorkStack.push_back(ChildScope);
        visitedChildren = true;
        ChildScope->setDFSIn(++Counter);
        break;
      }
    if (!visitedChildren) {
      WorkStack.pop_back();
      WS->setDFSOut(++Counter);
    }
  }
}
Exemple #5
0
// Look through full copies and PHIs to get the set of non-copy MachineInstrs
// that can produce MI.
void A15SDOptimizer::elideCopiesAndPHIs(MachineInstr *MI,
                                        SmallVectorImpl<MachineInstr*> &Outs) {
   // Looking through PHIs may create loops so we need to track what
   // instructions we have visited before.
   std::set<MachineInstr *> Reached;
   SmallVector<MachineInstr *, 8> Front;
   Front.push_back(MI);
   while (Front.size() != 0) {
     MI = Front.back();
     Front.pop_back();

     // If we have already explored this MachineInstr, ignore it.
     if (Reached.find(MI) != Reached.end())
       continue;
     Reached.insert(MI);
     if (MI->isPHI()) {
       for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) {
         unsigned Reg = MI->getOperand(I).getReg();
         if (!TRI->isVirtualRegister(Reg)) {
           continue;
         }
         MachineInstr *NewMI = MRI->getVRegDef(Reg);
         if (!NewMI)
           continue;
         Front.push_back(NewMI);
       }
     } else if (MI->isFullCopy()) {
       if (!TRI->isVirtualRegister(MI->getOperand(1).getReg()))
         continue;
       MachineInstr *NewMI = MRI->getVRegDef(MI->getOperand(1).getReg());
       if (!NewMI)
         continue;
       Front.push_back(NewMI);
     } else {
       DEBUG(dbgs() << "Found partial copy" << *MI <<"\n");
       Outs.push_back(MI);
     }
   }
}
Exemple #6
0
void ScheduleDAGSDNodes::dumpNode(const SUnit &SU) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  dumpNodeName(SU);
  dbgs() << ": ";

  if (!SU.getNode()) {
    dbgs() << "PHYS REG COPY\n";
    return;
  }

  SU.getNode()->dump(DAG);
  dbgs() << "\n";
  SmallVector<SDNode *, 4> GluedNodes;
  for (SDNode *N = SU.getNode()->getGluedNode(); N; N = N->getGluedNode())
    GluedNodes.push_back(N);
  while (!GluedNodes.empty()) {
    dbgs() << "    ";
    GluedNodes.back()->dump(DAG);
    dbgs() << "\n";
    GluedNodes.pop_back();
  }
#endif
}
Exemple #7
0
void Module::markUnavailable(bool MissingRequirement) {
  if (!IsAvailable)
    return;

  SmallVector<Module *, 2> Stack;
  Stack.push_back(this);
  while (!Stack.empty()) {
    Module *Current = Stack.back();
    Stack.pop_back();

    if (!Current->IsAvailable)
      continue;

    Current->IsAvailable = false;
    Current->IsMissingRequirement |= MissingRequirement;
    for (submodule_iterator Sub = Current->submodule_begin(),
                         SubEnd = Current->submodule_end();
         Sub != SubEnd; ++Sub) {
      if ((*Sub)->IsAvailable)
        Stack.push_back(*Sub);
    }
  }
}
static bool IsValueIntLoadRecursive(Value *V) {
  SmallVector<Value*, 4> Worklist;
  DenseSet<const Value *> VisitedValues;
  Worklist.push_back(V);
  VisitedValues.insert(V);

  while (!Worklist.empty()) {
    Value *CurValue = Worklist.back();
    Worklist.pop_back();

    if (!CanCheckValue(CurValue))
      continue;

    if (IsLoad(CurValue))
      return true;

    if (User *U = dyn_cast<User>(CurValue)) {
      // Do not walk through call instructions. The call arguments are not
      // necessarily directly related to the result, so it makes no sense to
      // cross-check them.
      //
      // Alternatively we could check call return values as if they were loads
      // expected to be datarando'd, but this may add false positives. Will
      // just ignore calls for now.
      if (isa<CallInst>(CurValue) ||
          isa<InvokeInst>(CurValue)) {
        continue;
      }

      for (Value *V : U->operands())
        if (VisitedValues.insert(V).second)
          Worklist.push_back(V);
    }
  }

  return false;
}
      bool Find(const TypedValueRegion *R) {
        QualType T = R->getValueType();
        if (const RecordType *RT = T->getAsStructureType()) {
          const RecordDecl *RD = RT->getDecl()->getDefinition();
          assert(RD && "Referred record has no definition");
          for (const auto *I : RD->fields()) {
            const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
            FieldChain.push_back(I);
            T = I->getType();
            if (T->getAsStructureType()) {
              if (Find(FR))
                return true;
            }
            else {
              const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
              if (V.isUndef())
                return true;
            }
            FieldChain.pop_back();
          }
        }

        return false;
      }
Exemple #10
0
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
/// trivially dead instruction, delete it.  If that makes any of its operands
/// trivially dead, delete them too, recursively.
///
/// If DeadInst is specified, the vector is filled with the instructions that
/// are actually deleted.
void llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
                                      SmallVectorImpl<Instruction*> *DeadInst) {
  Instruction *I = dyn_cast<Instruction>(V);
  if (!I || !I->use_empty() || !isInstructionTriviallyDead(I))
    return;
  
  SmallVector<Instruction*, 16> DeadInsts;
  DeadInsts.push_back(I);
  
  while (!DeadInsts.empty()) {
    I = DeadInsts.back();
    DeadInsts.pop_back();

    // If the client wanted to know, tell it about deleted instructions.
    if (DeadInst)
      DeadInst->push_back(I);
    
    // Null out all of the instruction's operands to see if any operand becomes
    // dead as we go.
    for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
      Value *OpV = I->getOperand(i);
      I->setOperand(i, 0);
      
      if (!OpV->use_empty()) continue;
    
      // If the operand is an instruction that became dead as we nulled out the
      // operand, and if it is 'trivially' dead, delete it in a future loop
      // iteration.
      if (Instruction *OpI = dyn_cast<Instruction>(OpV))
        if (isInstructionTriviallyDead(OpI))
          DeadInsts.push_back(OpI);
    }
    
    I->eraseFromParent();
  }
}
Exemple #11
0
/// DeleteUnusedInstr - If an instruction with a tied register operand can
/// be safely deleted, just delete it.
bool
TwoAddressInstructionPass::DeleteUnusedInstr(MachineBasicBlock::iterator &mi,
                                             MachineBasicBlock::iterator &nmi,
                                             MachineFunction::iterator &mbbi,
                                             unsigned Dist) {
  // Check if the instruction has no side effects and if all its defs are dead.
  SmallVector<unsigned, 4> Kills;
  if (!isSafeToDelete(mi, TII, Kills))
    return false;

  // If this instruction kills some virtual registers, we need to
  // update the kill information. If it's not possible to do so,
  // then bail out.
  SmallVector<NewKill, 4> NewKills;
  if (!canUpdateDeletedKills(Kills, NewKills, &*mbbi, Dist))
    return false;

  if (LV) {
    while (!NewKills.empty()) {
      MachineInstr *NewKill = NewKills.back().second;
      unsigned Kill = NewKills.back().first.first;
      bool isDead = NewKills.back().first.second;
      NewKills.pop_back();
      if (LV->removeVirtualRegisterKilled(Kill, mi)) {
        if (isDead)
          LV->addVirtualRegisterDead(Kill, NewKill);
        else
          LV->addVirtualRegisterKilled(Kill, NewKill);
      }
    }
  }

  mbbi->erase(mi); // Nuke the old inst.
  mi = nmi;
  return true;
}
Exemple #12
0
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
  AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
  auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
  const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
  AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();

  SmallPtrSet<Function*, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F) SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;
  
  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function*, int>, 8> InlineHistory;

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) continue;
    
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        CallSite CS(cast<Value>(I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;
        
        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
          continue;
        
        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;
  
  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  
  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, AA, ACT);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;
      
      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
        DEBUG(dbgs() << "    -> Deleting dead call: "
                     << *CS.getInstruction() << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // We can only inline direct calls to non-declarations.
        if (!Callee || Callee->isDeclaration()) continue;
      
        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;
        
        LLVMContext &CallerCtx = Caller->getContext();

        // Get DebugLoc to report. CS will be invalid after Inliner.
        DebugLoc DLoc = CS.getInstruction()->getDebugLoc();

        // If the policy determines that we should inline this function,
        // try to do so.
        if (!shouldInline(CS)) {
          emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
                                       Twine(Callee->getName() +
                                             " will not be inlined into " +
                                             Caller->getName()));
          continue;
        }

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime)) {
          emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
                                       Twine(Callee->getName() +
                                             " will not be inlined into " +
                                             Caller->getName()));
          continue;
        }
        ++NumInlined;

        // Report the inline decision.
        emitOptimizationRemark(
            CallerCtx, DEBUG_TYPE, *Caller, DLoc,
            Twine(Callee->getName() + " inlined into " + Caller->getName()));

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
               i != e; ++i) {
            Value *Ptr = InlineInfo.InlinedCalls[i];
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
          }
        }
      }
      
      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&
          
          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: "
              << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];
        
        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();
        
        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use 
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin()+CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
void WebAssemblyMCInstLower::lower(const MachineInstr *MI,
                                   MCInst &OutMI) const {
  OutMI.setOpcode(MI->getOpcode());

  const MCInstrDesc &Desc = MI->getDesc();
  for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
    const MachineOperand &MO = MI->getOperand(I);

    MCOperand MCOp;
    switch (MO.getType()) {
    default:
      MI->print(errs());
      llvm_unreachable("unknown operand type");
    case MachineOperand::MO_MachineBasicBlock:
      MI->print(errs());
      llvm_unreachable("MachineBasicBlock operand should have been rewritten");
    case MachineOperand::MO_Register: {
      // Ignore all implicit register operands.
      if (MO.isImplicit())
        continue;
      const WebAssemblyFunctionInfo &MFI =
          *MI->getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
      unsigned WAReg = MFI.getWAReg(MO.getReg());
      MCOp = MCOperand::createReg(WAReg);
      break;
    }
    case MachineOperand::MO_Immediate:
      if (I < Desc.NumOperands) {
        const MCOperandInfo &Info = Desc.OpInfo[I];
        if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
          MCSymbol *Sym = Printer.createTempSymbol("typeindex");

          SmallVector<wasm::ValType, 4> Returns;
          SmallVector<wasm::ValType, 4> Params;

          const MachineRegisterInfo &MRI =
              MI->getParent()->getParent()->getRegInfo();
          for (const MachineOperand &MO : MI->defs())
            Returns.push_back(getType(MRI.getRegClass(MO.getReg())));
          for (const MachineOperand &MO : MI->explicit_uses())
            if (MO.isReg())
              Params.push_back(getType(MRI.getRegClass(MO.getReg())));

          // call_indirect instructions have a callee operand at the end which
          // doesn't count as a param.
          if (WebAssembly::isCallIndirect(*MI))
            Params.pop_back();

          auto *WasmSym = cast<MCSymbolWasm>(Sym);
          auto Signature = make_unique<wasm::WasmSignature>(std::move(Returns),
                                                            std::move(Params));
          WasmSym->setSignature(Signature.get());
          Printer.addSignature(std::move(Signature));
          WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);

          const MCExpr *Expr = MCSymbolRefExpr::create(
              WasmSym, MCSymbolRefExpr::VK_WASM_TYPEINDEX, Ctx);
          MCOp = MCOperand::createExpr(Expr);
          break;
        }
      }
      MCOp = MCOperand::createImm(MO.getImm());
      break;
    case MachineOperand::MO_FPImmediate: {
      // TODO: MC converts all floating point immediate operands to double.
      // This is fine for numeric values, but may cause NaNs to change bits.
      const ConstantFP *Imm = MO.getFPImm();
      if (Imm->getType()->isFloatTy())
        MCOp = MCOperand::createFPImm(Imm->getValueAPF().convertToFloat());
      else if (Imm->getType()->isDoubleTy())
        MCOp = MCOperand::createFPImm(Imm->getValueAPF().convertToDouble());
      else
        llvm_unreachable("unknown floating point immediate type");
      break;
    }
    case MachineOperand::MO_GlobalAddress:
      MCOp = lowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
      break;
    case MachineOperand::MO_ExternalSymbol:
      // The target flag indicates whether this is a symbol for a
      // variable or a function.
      assert(MO.getTargetFlags() == 0 &&
             "WebAssembly uses only symbol flags on ExternalSymbols");
      MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
      break;
    case MachineOperand::MO_MCSymbol:
      // This is currently used only for LSDA symbols (GCC_except_table),
      // because global addresses or other external symbols are handled above.
      assert(MO.getTargetFlags() == 0 &&
             "WebAssembly does not use target flags on MCSymbol");
      MCOp = lowerSymbolOperand(MO, MO.getMCSymbol());
      break;
    }

    OutMI.addOperand(MCOp);
  }

  if (!WasmKeepRegisters)
    removeRegisterOperands(MI, OutMI);
}
/// EmitSchedule - Emit the machine code in scheduled order. Return the new
/// InsertPos and MachineBasicBlock that contains this insertion
/// point. ScheduleDAGSDNodes holds a BB pointer for convenience, but this does
/// not necessarily refer to returned BB. The emitter may split blocks.
MachineBasicBlock *ScheduleDAGSDNodes::
EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
  InstrEmitter Emitter(BB, InsertPos);
  DenseMap<SDValue, unsigned> VRBaseMap;
  DenseMap<SUnit*, unsigned> CopyVRBaseMap;
  SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
  SmallSet<unsigned, 8> Seen;
  bool HasDbg = DAG->hasDebugValues();

  // If this is the first BB, emit byval parameter dbg_value's.
  if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
    SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
    SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
    for (; PDI != PDE; ++PDI) {
      MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
      if (DbgMI)
        BB->insert(InsertPos, DbgMI);
    }
  }

  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
    SUnit *SU = Sequence[i];
    if (!SU) {
      // Null SUnit* is a noop.
      TII->insertNoop(*Emitter.getBlock(), InsertPos);
      continue;
    }

    // For pre-regalloc scheduling, create instructions corresponding to the
    // SDNode and any glued SDNodes and append them to the block.
    if (!SU->getNode()) {
      // Emit a copy.
      EmitPhysRegCopy(SU, CopyVRBaseMap, InsertPos);
      continue;
    }

    SmallVector<SDNode *, 4> GluedNodes;
    for (SDNode *N = SU->getNode()->getGluedNode(); N;
         N = N->getGluedNode())
      GluedNodes.push_back(N);
    while (!GluedNodes.empty()) {
      SDNode *N = GluedNodes.back();
      Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned,
                       VRBaseMap);
      // Remember the source order of the inserted instruction.
      if (HasDbg)
        ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
      GluedNodes.pop_back();
    }
    Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
                     VRBaseMap);
    // Remember the source order of the inserted instruction.
    if (HasDbg)
      ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
                        Seen);
  }

  // Insert all the dbg_values which have not already been inserted in source
  // order sequence.
  if (HasDbg) {
    MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();

    // Sort the source order instructions and use the order to insert debug
    // values.
    std::sort(Orders.begin(), Orders.end(), OrderSorter());

    SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
    SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
    // Now emit the rest according to source order.
    unsigned LastOrder = 0;
    for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
      unsigned Order = Orders[i].first;
      MachineInstr *MI = Orders[i].second;
      // Insert all SDDbgValue's whose order(s) are before "Order".
      if (!MI)
        continue;
      for (; DI != DE &&
             (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
        if ((*DI)->isInvalidated())
          continue;
        MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
        if (DbgMI) {
          if (!LastOrder)
            // Insert to start of the BB (after PHIs).
            BB->insert(BBBegin, DbgMI);
          else {
            // Insert at the instruction, which may be in a different
            // block, if the block was split by a custom inserter.
            MachineBasicBlock::iterator Pos = MI;
            MI->getParent()->insert(llvm::next(Pos), DbgMI);
          }
        }
      }
      LastOrder = Order;
    }
    // Add trailing DbgValue's before the terminator. FIXME: May want to add
    // some of them before one or more conditional branches?
    SmallVector<MachineInstr*, 8> DbgMIs;
    while (DI != DE) {
      if (!(*DI)->isInvalidated())
        if (MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap))
          DbgMIs.push_back(DbgMI);
      ++DI;
    }

    MachineBasicBlock *InsertBB = Emitter.getBlock();
    MachineBasicBlock::iterator Pos = InsertBB->getFirstTerminator();
    InsertBB->insert(Pos, DbgMIs.begin(), DbgMIs.end());
  }

  InsertPos = Emitter.getInsertPos();
  return Emitter.getBlock();
}
Exemple #15
0
void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
  OwningPtr<PathDiagnostic> OwningD(D);
  
  if (!D || D->path.empty())
    return;
  
  // We need to flatten the locations (convert Stmt* to locations) because
  // the referenced statements may be freed by the time the diagnostics
  // are emitted.
  D->flattenLocations();

  // If the PathDiagnosticConsumer does not support diagnostics that
  // cross file boundaries, prune out such diagnostics now.
  if (!supportsCrossFileDiagnostics()) {
    // Verify that the entire path is from the same FileID.
    FileID FID;
    const SourceManager &SMgr = (*D->path.begin())->getLocation().getManager();
    SmallVector<const PathPieces *, 5> WorkList;
    WorkList.push_back(&D->path);

    while (!WorkList.empty()) {
      const PathPieces &path = *WorkList.back();
      WorkList.pop_back();

      for (PathPieces::const_iterator I = path.begin(), E = path.end();
           I != E; ++I) {
        const PathDiagnosticPiece *piece = I->getPtr();
        FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
      
        if (FID.isInvalid()) {
          FID = SMgr.getFileID(L);
        } else if (SMgr.getFileID(L) != FID)
          return; // FIXME: Emit a warning?
      
        // Check the source ranges.
        ArrayRef<SourceRange> Ranges = piece->getRanges();
        for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
                                             E = Ranges.end(); I != E; ++I) {
          SourceLocation L = SMgr.getExpansionLoc(I->getBegin());
          if (!L.isFileID() || SMgr.getFileID(L) != FID)
            return; // FIXME: Emit a warning?
          L = SMgr.getExpansionLoc(I->getEnd());
          if (!L.isFileID() || SMgr.getFileID(L) != FID)
            return; // FIXME: Emit a warning?
        }
        
        if (const PathDiagnosticCallPiece *call =
            dyn_cast<PathDiagnosticCallPiece>(piece)) {
          WorkList.push_back(&call->path);
        }
        else if (const PathDiagnosticMacroPiece *macro =
                 dyn_cast<PathDiagnosticMacroPiece>(piece)) {
          WorkList.push_back(&macro->subPieces);
        }
      }
    }
    
    if (FID.isInvalid())
      return; // FIXME: Emit a warning?
  }  

  // Profile the node to see if we already have something matching it
  llvm::FoldingSetNodeID profile;
  D->Profile(profile);
  void *InsertPos = 0;

  if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
    // Keep the PathDiagnostic with the shorter path.
    // Note, the enclosing routine is called in deterministic order, so the
    // results will be consistent between runs (no reason to break ties if the
    // size is the same).
    const unsigned orig_size = orig->full_size();
    const unsigned new_size = D->full_size();
    if (orig_size <= new_size)
      return;

    assert(orig != D);
    Diags.RemoveNode(orig);
    delete orig;
  }
  
  Diags.InsertNode(OwningD.take());
}
Exemple #16
0
bool DeclContext::lookupQualified(Type type,
                                  DeclName member,
                                  NLOptions options,
                                  LazyResolver *typeResolver,
                                  SmallVectorImpl<ValueDecl *> &decls) const {
  using namespace namelookup;
  assert(decls.empty() && "additive lookup not supported");

  if (type->is<ErrorType>())
    return false;

  auto checkLookupCascading = [this, options]() -> Optional<bool> {
    switch (static_cast<unsigned>(options & NL_KnownDependencyMask)) {
    case 0:
      return isCascadingContextForLookup(/*excludeFunctions=*/false);
    case NL_KnownNonCascadingDependency:
      return false;
    case NL_KnownCascadingDependency:
      return true;
    case NL_KnownNoDependency:
      return None;
    default:
      // FIXME: Use llvm::CountPopulation_64 when that's declared constexpr.
      static_assert(__builtin_popcountll(NL_KnownDependencyMask) == 2,
                    "mask should only include four values");
      llvm_unreachable("mask only includes four values");
    }
  };

  // Look through lvalue and inout types.
  type = type->getLValueOrInOutObjectType();

  // Look through metatypes.
  if (auto metaTy = type->getAs<AnyMetatypeType>())
    type = metaTy->getInstanceType();

  // Look through DynamicSelf.
  if (auto dynamicSelf = type->getAs<DynamicSelfType>())
    type = dynamicSelf->getSelfType();

  // Look for module references.
  if (auto moduleTy = type->getAs<ModuleType>()) {
    Module *module = moduleTy->getModule();
    auto topLevelScope = getModuleScopeContext();
    if (module == topLevelScope->getParentModule()) {
      if (auto maybeLookupCascade = checkLookupCascading()) {
        recordLookupOfTopLevelName(topLevelScope, member,
                                   maybeLookupCascade.getValue());
      }
      lookupInModule(module, /*accessPath=*/{}, member, decls,
                     NLKind::QualifiedLookup, ResolutionKind::Overloadable,
                     typeResolver, topLevelScope);
    } else {
      // Note: This is a lookup into another module. Unless we're compiling
      // multiple modules at once, or if the other module re-exports this one,
      // it shouldn't be possible to have a dependency from that module on
      // anything in this one.

      // Perform the lookup in all imports of this module.
      forAllVisibleModules(this,
                           [&](const Module::ImportedModule &import) -> bool {
        if (import.second != module)
          return true;
        lookupInModule(import.second, import.first, member, decls,
                       NLKind::QualifiedLookup, ResolutionKind::Overloadable,
                       typeResolver, topLevelScope);
        // If we're able to do an unscoped lookup, we see everything. No need
        // to keep going.
        return !import.first.empty();
      });
    }

    llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
    decls.erase(std::remove_if(decls.begin(), decls.end(),
                               [&](ValueDecl *vd) -> bool {
      // If we're performing a type lookup, don't even attempt to validate
      // the decl if its not a type.
      if ((options & NL_OnlyTypes) && !isa<TypeDecl>(vd))
        return true;

      return !knownDecls.insert(vd).second;
    }), decls.end());

    if (auto *debugClient = topLevelScope->getParentModule()->getDebugClient())
      filterForDiscriminator(decls, debugClient);
    
    return !decls.empty();
  }

  auto &ctx = getASTContext();
  if (!ctx.LangOpts.EnableAccessControl)
    options |= NL_IgnoreAccessibility;

  // The set of nominal type declarations we should (and have) visited.
  SmallVector<NominalTypeDecl *, 4> stack;
  llvm::SmallPtrSet<NominalTypeDecl *, 4> visited;

  // Handle nominal types.
  bool wantProtocolMembers = false;
  bool wantLookupInAllClasses = false;
  if (auto nominal = type->getAnyNominal()) {
    visited.insert(nominal);
    stack.push_back(nominal);
    
    wantProtocolMembers = (options & NL_ProtocolMembers) &&
                          !isa<ProtocolDecl>(nominal);

    // If we want dynamic lookup and we're searching in the
    // AnyObject protocol, note this for later.
    if (options & NL_DynamicLookup) {
      if (auto proto = dyn_cast<ProtocolDecl>(nominal)) {
        if (proto->isSpecificProtocol(KnownProtocolKind::AnyObject))
          wantLookupInAllClasses = true;
      }
    }
  }
  // Handle archetypes
  else if (auto archetypeTy = type->getAs<ArchetypeType>()) {
    // Look in the protocols to which the archetype conforms (always).
    for (auto proto : archetypeTy->getConformsTo())
      if (visited.insert(proto).second)
        stack.push_back(proto);

    // If requested, look into the superclasses of this archetype.
    if (options & NL_VisitSupertypes) {
      if (auto superclassTy = archetypeTy->getSuperclass()) {
        if (auto superclassDecl = superclassTy->getAnyNominal()) {
          if (visited.insert(superclassDecl).second) {
            stack.push_back(superclassDecl);

            wantProtocolMembers = (options & NL_ProtocolMembers) &&
                                  !isa<ProtocolDecl>(superclassDecl);
          }
        }
      }
    }
  }
  // Handle protocol compositions.
  else if (auto compositionTy = type->getAs<ProtocolCompositionType>()) {
    SmallVector<ProtocolDecl *, 4> protocols;
    if (compositionTy->isExistentialType(protocols)) {
      for (auto proto : protocols) {
        if (visited.insert(proto).second) {
          stack.push_back(proto);

          // If we want dynamic lookup and this is the AnyObject
          // protocol, note this for later.
          if ((options & NL_DynamicLookup) &&
              proto->isSpecificProtocol(KnownProtocolKind::AnyObject))
            wantLookupInAllClasses = true;
        }
      }
    }
  }

  // Allow filtering of the visible declarations based on various
  // criteria.
  bool onlyCompleteObjectInits = false;
  auto isAcceptableDecl = [&](NominalTypeDecl *current, Decl *decl) -> bool {
    // If the decl is currently being type checked, then we have something
    // cyclic going on.  Instead of poking at parts that are potentially not
    // set up, just assume it is acceptable.  This will make sure we produce an
    // error later.
    if (decl->isBeingTypeChecked())
      return true;
    
    // Filter out designated initializers, if requested.
    if (onlyCompleteObjectInits) {
      if (auto ctor = dyn_cast<ConstructorDecl>(decl)) {
        if (!ctor->isInheritable())
          return false;
      } else {
        return false;
      }
    }

    // Ignore stub implementations.
    if (auto ctor = dyn_cast<ConstructorDecl>(decl)) {
      if (ctor->hasStubImplementation())
        return false;
    }

    // Check access.
    if (!(options & NL_IgnoreAccessibility))
      if (auto VD = dyn_cast<ValueDecl>(decl))
        return VD->isAccessibleFrom(this);

    return true;
  };

  ReferencedNameTracker *tracker = nullptr;
  if (auto containingSourceFile = dyn_cast<SourceFile>(getModuleScopeContext()))
    tracker = containingSourceFile->getReferencedNameTracker();

  bool isLookupCascading;
  if (tracker) {
    if (auto maybeLookupCascade = checkLookupCascading())
      isLookupCascading = maybeLookupCascade.getValue();
    else
      tracker = nullptr;
  }

  // Visit all of the nominal types we know about, discovering any others
  // we need along the way.
  while (!stack.empty()) {
    auto current = stack.back();
    stack.pop_back();

    if (tracker)
      tracker->addUsedMember({current, member.getBaseName()},isLookupCascading);

    // Make sure we've resolved implicit constructors, if we need them.
    if (member.getBaseName() == ctx.Id_init && typeResolver)
      typeResolver->resolveImplicitConstructors(current);

    // Look for results within the current nominal type and its extensions.
    bool currentIsProtocol = isa<ProtocolDecl>(current);
    for (auto decl : current->lookupDirect(member)) {
      // If we're performing a type lookup, don't even attempt to validate
      // the decl if its not a type.
      if ((options & NL_OnlyTypes) && !isa<TypeDecl>(decl))
        continue;

      // Resolve the declaration signature when we find the
      // declaration.
      if (typeResolver && !decl->isBeingTypeChecked()) {
        typeResolver->resolveDeclSignature(decl);

        if (!decl->hasType())
          continue;
      }

      if (isAcceptableDecl(current, decl))
        decls.push_back(decl);
    }

    // If we're not supposed to visit our supertypes, we're done.
    if ((options & NL_VisitSupertypes) == 0)
      continue;

    // Visit superclass.
    if (auto classDecl = dyn_cast<ClassDecl>(current)) {
      // If we're looking for initializers, only look at the superclass if the
      // current class permits inheritance. Even then, only find complete
      // object initializers.
      bool visitSuperclass = true;
      if (member.getBaseName() == ctx.Id_init) {
        if (classDecl->inheritsSuperclassInitializers(typeResolver))
          onlyCompleteObjectInits = true;
        else
          visitSuperclass = false;
      }

      if (visitSuperclass) {
        if (auto superclassType = classDecl->getSuperclass())
          if (auto superclassDecl = superclassType->getClassOrBoundGenericClass())
            if (visited.insert(superclassDecl).second)
              stack.push_back(superclassDecl);
      }
    }

    // If we're not looking at a protocol and we're not supposed to
    // visit the protocols that this type conforms to, skip the next
    // step.
    if (!wantProtocolMembers && !currentIsProtocol)
      continue;

    SmallVector<ProtocolDecl *, 4> protocols;
    for (auto proto : current->getAllProtocols()) {
      if (visited.insert(proto).second) {
        stack.push_back(proto);
      }
    }

    // For a class, we don't need to visit the protocol members of the
    // superclass: that's already handled.
    if (isa<ClassDecl>(current))
      wantProtocolMembers = false;
  }

  // If we want to perform lookup into all classes, do so now.
  if (wantLookupInAllClasses) {
    if (tracker)
      tracker->addDynamicLookupName(member.getBaseName(), isLookupCascading);

    // Collect all of the visible declarations.
    SmallVector<ValueDecl *, 4> allDecls;
    forAllVisibleModules(this, [&](Module::ImportedModule import) {
      import.second->lookupClassMember(import.first, member, allDecls);
    });

    // For each declaration whose context is not something we've
    // already visited above, add it to the list of declarations.
    llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
    for (auto decl : allDecls) {
      // If we're performing a type lookup, don't even attempt to validate
      // the decl if its not a type.
      if ((options & NL_OnlyTypes) && !isa<TypeDecl>(decl))
        continue;

      if (typeResolver && !decl->isBeingTypeChecked()) {
        typeResolver->resolveDeclSignature(decl);
        if (!decl->hasType())
          continue;
      }

      // If the declaration has an override, name lookup will also have
      // found the overridden method. Skip this declaration, because we
      // prefer the overridden method.
      if (decl->getOverriddenDecl())
        continue;

      auto dc = decl->getDeclContext();
      auto nominal = dyn_cast<NominalTypeDecl>(dc);
      if (!nominal) {
        auto ext = cast<ExtensionDecl>(dc);
        nominal = ext->getExtendedType()->getAnyNominal();
        assert(nominal && "Couldn't find nominal type?");
      }

      // If we didn't visit this nominal type above, add this
      // declaration to the list.
      if (!visited.count(nominal) && knownDecls.insert(decl).second &&
          isAcceptableDecl(nominal, decl))
        decls.push_back(decl);
    }
  }

  // If we're supposed to remove overridden declarations, do so now.
  if (options & NL_RemoveOverridden)
    removeOverriddenDecls(decls);

  // If we're supposed to remove shadowed/hidden declarations, do so now.
  Module *M = getParentModule();
  if (options & NL_RemoveNonVisible)
    removeShadowedDecls(decls, M, typeResolver);

  if (auto *debugClient = M->getDebugClient())
    filterForDiscriminator(decls, debugClient);

  // We're done. Report success/failure.
  return !decls.empty();
}
Exemple #17
0
void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, 
                                      bool VirtualBase,
                                      const CXXRecordDecl *InVirtualSubobject,
                                      CXXFinalOverriderMap &Overriders) {
  unsigned SubobjectNumber = 0;
  if (!VirtualBase)
    SubobjectNumber
      = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];

  for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
         BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
    if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
      const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
      if (!BaseDecl->isPolymorphic())
        continue;

      if (Overriders.empty() && !Base->isVirtual()) {
        // There are no other overriders of virtual member functions,
        // so let the base class fill in our overriders for us.
        Collect(BaseDecl, false, InVirtualSubobject, Overriders);
        continue;
      }

      // Collect all of the overridders from the base class subobject
      // and merge them into the set of overridders for this class.
      // For virtual base classes, populate or use the cached virtual
      // overrides so that we do not walk the virtual base class (and
      // its base classes) more than once.
      CXXFinalOverriderMap ComputedBaseOverriders;
      CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
      if (Base->isVirtual()) {
        CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
        BaseOverriders = MyVirtualOverriders;
        if (!MyVirtualOverriders) {
          MyVirtualOverriders = new CXXFinalOverriderMap;

          // Collect may cause VirtualOverriders to reallocate, invalidating the
          // MyVirtualOverriders reference. Set BaseOverriders to the right
          // value now.
          BaseOverriders = MyVirtualOverriders;

          Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
        }
      } else
        Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);

      // Merge the overriders from this base class into our own set of
      // overriders.
      for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(), 
                               OMEnd = BaseOverriders->end();
           OM != OMEnd;
           ++OM) {
        const CXXMethodDecl *CanonOM
          = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
        Overriders[CanonOM].add(OM->second);
      }
    }
  }

  for (CXXRecordDecl::method_iterator M = RD->method_begin(), 
                                   MEnd = RD->method_end();
       M != MEnd;
       ++M) {
    // We only care about virtual methods.
    if (!M->isVirtual())
      continue;

    CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());

    if (CanonM->begin_overridden_methods()
                                       == CanonM->end_overridden_methods()) {
      // This is a new virtual function that does not override any
      // other virtual function. Add it to the map of virtual
      // functions for which we are tracking overridders. 

      // C++ [class.virtual]p2:
      //   For convenience we say that any virtual function overrides itself.
      Overriders[CanonM].add(SubobjectNumber,
                             UniqueVirtualMethod(CanonM, SubobjectNumber,
                                                 InVirtualSubobject));
      continue;
    }

    // This virtual method overrides other virtual methods, so it does
    // not add any new slots into the set of overriders. Instead, we
    // replace entries in the set of overriders with the new
    // overrider. To do so, we dig down to the original virtual
    // functions using data recursion and update all of the methods it
    // overrides.
    typedef std::pair<CXXMethodDecl::method_iterator, 
                      CXXMethodDecl::method_iterator> OverriddenMethods;
    SmallVector<OverriddenMethods, 4> Stack;
    Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(),
                                   CanonM->end_overridden_methods()));
    while (!Stack.empty()) {
      OverriddenMethods OverMethods = Stack.back();
      Stack.pop_back();

      for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
        const CXXMethodDecl *CanonOM
          = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());

        // C++ [class.virtual]p2:
        //   A virtual member function C::vf of a class object S is
        //   a final overrider unless the most derived class (1.8)
        //   of which S is a base class subobject (if any) declares
        //   or inherits another member function that overrides vf.
        //
        // Treating this object like the most derived class, we
        // replace any overrides from base classes with this
        // overriding virtual function.
        Overriders[CanonOM].replaceAll(
                               UniqueVirtualMethod(CanonM, SubobjectNumber,
                                                   InVirtualSubobject));

        if (CanonOM->begin_overridden_methods()
                                       == CanonOM->end_overridden_methods())
          continue;

        // Continue recursion to the methods that this virtual method
        // overrides.
        Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(),
                                       CanonOM->end_overridden_methods()));
      }
    }

    // C++ [class.virtual]p2:
    //   For convenience we say that any virtual function overrides itself.
    Overriders[CanonM].add(SubobjectNumber,
                           UniqueVirtualMethod(CanonM, SubobjectNumber,
                                               InVirtualSubobject));
  }
}
/// removeInstruction - Remove an instruction from the dependence analysis,
/// updating the dependence of instructions that previously depended on it.
/// This method attempts to keep the cache coherent using the reverse map.
void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
  // Walk through the Non-local dependencies, removing this one as the value
  // for any cached queries.
  NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
  if (NLDI != NonLocalDeps.end()) {
    NonLocalDepInfo &BlockMap = NLDI->second.first;
    for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
         DI != DE; ++DI)
      if (Instruction *Inst = DI->second.getInst())
        RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
    NonLocalDeps.erase(NLDI);
  }

  // If we have a cached local dependence query for this instruction, remove it.
  //
  LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
  if (LocalDepEntry != LocalDeps.end()) {
    // Remove us from DepInst's reverse set now that the local dep info is gone.
    if (Instruction *Inst = LocalDepEntry->second.getInst())
      RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);

    // Remove this local dependency info.
    LocalDeps.erase(LocalDepEntry);
  }
  
  // If we have any cached pointer dependencies on this instruction, remove
  // them.  If the instruction has non-pointer type, then it can't be a pointer
  // base.
  
  // Remove it from both the load info and the store info.  The instruction
  // can't be in either of these maps if it is non-pointer.
  if (isa<PointerType>(RemInst->getType())) {
    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
  }
  
  // Loop over all of the things that depend on the instruction we're removing.
  // 
  SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;

  // If we find RemInst as a clobber or Def in any of the maps for other values,
  // we need to replace its entry with a dirty version of the instruction after
  // it.  If RemInst is a terminator, we use a null dirty value.
  //
  // Using a dirty version of the instruction after RemInst saves having to scan
  // the entire block to get to this point.
  MemDepResult NewDirtyVal;
  if (!RemInst->isTerminator())
    NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
  
  ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
  if (ReverseDepIt != ReverseLocalDeps.end()) {
    SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
    // RemInst can't be the terminator if it has local stuff depending on it.
    assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
           "Nothing can locally depend on a terminator");
    
    for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
         E = ReverseDeps.end(); I != E; ++I) {
      Instruction *InstDependingOnRemInst = *I;
      assert(InstDependingOnRemInst != RemInst &&
             "Already removed our local dep info");
                        
      LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
      
      // Make sure to remember that new things depend on NewDepInst.
      assert(NewDirtyVal.getInst() && "There is no way something else can have "
             "a local dep on this if it is a terminator!");
      ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), 
                                                InstDependingOnRemInst));
    }
    
    ReverseLocalDeps.erase(ReverseDepIt);

    // Add new reverse deps after scanning the set, to avoid invalidating the
    // 'ReverseDeps' reference.
    while (!ReverseDepsToAdd.empty()) {
      ReverseLocalDeps[ReverseDepsToAdd.back().first]
        .insert(ReverseDepsToAdd.back().second);
      ReverseDepsToAdd.pop_back();
    }
  }
  
  ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
  if (ReverseDepIt != ReverseNonLocalDeps.end()) {
    SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second;
    for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end();
         I != E; ++I) {
      assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
      
      PerInstNLInfo &INLD = NonLocalDeps[*I];
      // The information is now dirty!
      INLD.second = true;
      
      for (NonLocalDepInfo::iterator DI = INLD.first.begin(), 
           DE = INLD.first.end(); DI != DE; ++DI) {
        if (DI->second.getInst() != RemInst) continue;
        
        // Convert to a dirty entry for the subsequent instruction.
        DI->second = NewDirtyVal;
        
        if (Instruction *NextI = NewDirtyVal.getInst())
          ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
      }
    }

    ReverseNonLocalDeps.erase(ReverseDepIt);

    // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
    while (!ReverseDepsToAdd.empty()) {
      ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
        .insert(ReverseDepsToAdd.back().second);
      ReverseDepsToAdd.pop_back();
    }
  }
  
  // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
  // value in the NonLocalPointerDeps info.
  ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
    ReverseNonLocalPtrDeps.find(RemInst);
  if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
    SmallPtrSet<void*, 4> &Set = ReversePtrDepIt->second;
    SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
    
    for (SmallPtrSet<void*, 4>::iterator I = Set.begin(), E = Set.end();
         I != E; ++I) {
      ValueIsLoadPair P;
      P.setFromOpaqueValue(*I);
      assert(P.getPointer() != RemInst &&
             "Already removed NonLocalPointerDeps info for RemInst");
      
      NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second;
      
      // The cache is not valid for any specific block anymore.
      NonLocalPointerDeps[P].first = BBSkipFirstBlockPair();
      
      // Update any entries for RemInst to use the instruction after it.
      for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
           DI != DE; ++DI) {
        if (DI->second.getInst() != RemInst) continue;
        
        // Convert to a dirty entry for the subsequent instruction.
        DI->second = NewDirtyVal;
        
        if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
          ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
      }
      
      // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
      // subsequent value may invalidate the sortedness.
      std::sort(NLPDI.begin(), NLPDI.end());
    }
    
    ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
    
    while (!ReversePtrDepsToAdd.empty()) {
      ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
        .insert(ReversePtrDepsToAdd.back().second.getOpaqueValue());
      ReversePtrDepsToAdd.pop_back();
    }
  }
  
  
  assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
  AA->deleteValue(RemInst);
  DEBUG(verifyRemoved(RemInst));
}
Exemple #19
0
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from Tokens.
void TokenLexer::ExpandFunctionArguments() {

  SmallVector<Token, 128> ResultToks;

  // Loop through 'Tokens', expanding them into ResultToks.  Keep
  // track of whether we change anything.  If not, no need to keep them.  If so,
  // we install the newly expanded sequence as the new 'Tokens' list.
  bool MadeChange = false;

  // NextTokGetsSpace - When this is true, the next token appended to the
  // output list will get a leading space, regardless of whether it had one to
  // begin with or not.  This is used for placemarker support.
  bool NextTokGetsSpace = false;

  for (unsigned i = 0, e = NumTokens; i != e; ++i) {
    // If we found the stringify operator, get the argument stringified.  The
    // preprocessor already verified that the following token is a macro name
    // when the #define was parsed.
    const Token &CurTok = Tokens[i];
    if (CurTok.is(tok::hash) || CurTok.is(tok::hashat)) {
      int ArgNo = Macro->getArgumentNum(Tokens[i+1].getIdentifierInfo());
      assert(ArgNo != -1 && "Token following # is not an argument?");

      SourceLocation ExpansionLocStart =
          getExpansionLocForMacroDefLoc(CurTok.getLocation());
      SourceLocation ExpansionLocEnd =
          getExpansionLocForMacroDefLoc(Tokens[i+1].getLocation());

      Token Res;
      if (CurTok.is(tok::hash))  // Stringify
        Res = ActualArgs->getStringifiedArgument(ArgNo, PP,
                                                 ExpansionLocStart,
                                                 ExpansionLocEnd);
      else {
        // 'charify': don't bother caching these.
        Res = MacroArgs::StringifyArgument(ActualArgs->getUnexpArgument(ArgNo),
                                           PP, true,
                                           ExpansionLocStart,
                                           ExpansionLocEnd);
      }

      // The stringified/charified string leading space flag gets set to match
      // the #/#@ operator.
      if (CurTok.hasLeadingSpace() || NextTokGetsSpace)
        Res.setFlag(Token::LeadingSpace);

      ResultToks.push_back(Res);
      MadeChange = true;
      ++i;  // Skip arg name.
      NextTokGetsSpace = false;
      continue;
    }

    // Otherwise, if this is not an argument token, just add the token to the
    // output buffer.
    IdentifierInfo *II = CurTok.getIdentifierInfo();
    int ArgNo = II ? Macro->getArgumentNum(II) : -1;
    if (ArgNo == -1) {
      // This isn't an argument, just add it.
      ResultToks.push_back(CurTok);

      if (NextTokGetsSpace) {
        ResultToks.back().setFlag(Token::LeadingSpace);
        NextTokGetsSpace = false;
      }
      continue;
    }

    // An argument is expanded somehow, the result is different than the
    // input.
    MadeChange = true;

    // Otherwise, this is a use of the argument.  Find out if there is a paste
    // (##) operator before or after the argument.
    bool PasteBefore =
      !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
    bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);

    // If it is not the LHS/RHS of a ## operator, we must pre-expand the
    // argument and substitute the expanded tokens into the result.  This is
    // C99 6.10.3.1p1.
    if (!PasteBefore && !PasteAfter) {
      const Token *ResultArgToks;

      // Only preexpand the argument if it could possibly need it.  This
      // avoids some work in common cases.
      const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
      if (ActualArgs->ArgNeedsPreexpansion(ArgTok, PP))
        ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, Macro, PP)[0];
      else
        ResultArgToks = ArgTok;  // Use non-preexpanded tokens.

      // If the arg token expanded into anything, append it.
      if (ResultArgToks->isNot(tok::eof)) {
        unsigned FirstResult = ResultToks.size();
        unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
        ResultToks.append(ResultArgToks, ResultArgToks+NumToks);

        // If the '##' came from expanding an argument, turn it into 'unknown'
        // to avoid pasting.
        for (unsigned i = FirstResult, e = ResultToks.size(); i != e; ++i) {
          Token &Tok = ResultToks[i];
          if (Tok.is(tok::hashhash))
            Tok.setKind(tok::unknown);
        }

        if(ExpandLocStart.isValid()) {
          updateLocForMacroArgTokens(CurTok.getLocation(),
                                     ResultToks.begin()+FirstResult,
                                     ResultToks.end());
        }

        // If any tokens were substituted from the argument, the whitespace
        // before the first token should match the whitespace of the arg
        // identifier.
        ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
                                             CurTok.hasLeadingSpace() ||
                                             NextTokGetsSpace);
        NextTokGetsSpace = false;
      } else {
        // If this is an empty argument, and if there was whitespace before the
        // formal token, make sure the next token gets whitespace before it.
        NextTokGetsSpace = CurTok.hasLeadingSpace();
      }
      continue;
    }

    // Okay, we have a token that is either the LHS or RHS of a paste (##)
    // argument.  It gets substituted as its non-pre-expanded tokens.
    const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
    unsigned NumToks = MacroArgs::getArgLength(ArgToks);
    if (NumToks) {  // Not an empty argument?
      // If this is the GNU ", ## __VA_ARGS__" extension, and we just learned
      // that __VA_ARGS__ expands to multiple tokens, avoid a pasting error when
      // the expander trys to paste ',' with the first token of the __VA_ARGS__
      // expansion.
      if (PasteBefore && ResultToks.size() >= 2 &&
          ResultToks[ResultToks.size()-2].is(tok::comma) &&
          (unsigned)ArgNo == Macro->getNumArgs()-1 &&
          Macro->isVariadic()) {
        // Remove the paste operator, report use of the extension.
        PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
        ResultToks.pop_back();
      }

      ResultToks.append(ArgToks, ArgToks+NumToks);

      // If the '##' came from expanding an argument, turn it into 'unknown'
      // to avoid pasting.
      for (unsigned i = ResultToks.size() - NumToks, e = ResultToks.size();
             i != e; ++i) {
        Token &Tok = ResultToks[i];
        if (Tok.is(tok::hashhash))
          Tok.setKind(tok::unknown);
      }

      if (ExpandLocStart.isValid()) {
        updateLocForMacroArgTokens(CurTok.getLocation(),
                                   ResultToks.end()-NumToks, ResultToks.end());
      }

      // If this token (the macro argument) was supposed to get leading
      // whitespace, transfer this information onto the first token of the
      // expansion.
      //
      // Do not do this if the paste operator occurs before the macro argument,
      // as in "A ## MACROARG".  In valid code, the first token will get
      // smooshed onto the preceding one anyway (forming AMACROARG).  In
      // assembler-with-cpp mode, invalid pastes are allowed through: in this
      // case, we do not want the extra whitespace to be added.  For example,
      // we want ". ## foo" -> ".foo" not ". foo".
      if ((CurTok.hasLeadingSpace() || NextTokGetsSpace) &&
          !PasteBefore)
        ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);

      NextTokGetsSpace = false;
      continue;
    }

    // If an empty argument is on the LHS or RHS of a paste, the standard (C99
    // 6.10.3.3p2,3) calls for a bunch of placemarker stuff to occur.  We
    // implement this by eating ## operators when a LHS or RHS expands to
    // empty.
    NextTokGetsSpace |= CurTok.hasLeadingSpace();
    if (PasteAfter) {
      // Discard the argument token and skip (don't copy to the expansion
      // buffer) the paste operator after it.
      NextTokGetsSpace |= Tokens[i+1].hasLeadingSpace();
      ++i;
      continue;
    }

    // If this is on the RHS of a paste operator, we've already copied the
    // paste operator to the ResultToks list.  Remove it.
    assert(PasteBefore && ResultToks.back().is(tok::hashhash));
    NextTokGetsSpace |= ResultToks.back().hasLeadingSpace();
    ResultToks.pop_back();

    // If this is the __VA_ARGS__ token, and if the argument wasn't provided,
    // and if the macro had at least one real argument, and if the token before
    // the ## was a comma, remove the comma.
    if ((unsigned)ArgNo == Macro->getNumArgs()-1 && // is __VA_ARGS__
        ActualArgs->isVarargsElidedUse() &&       // Argument elided.
        !ResultToks.empty() && ResultToks.back().is(tok::comma)) {
      // Never add a space, even if the comma, ##, or arg had a space.
      NextTokGetsSpace = false;
      // Remove the paste operator, report use of the extension.
      PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
      ResultToks.pop_back();
      
      // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
      // then removal of the comma should produce a placemarker token (in C99
      // terms) which we model by popping off the previous ##, giving us a plain
      // "X" when __VA_ARGS__ is empty.
      if (!ResultToks.empty() && ResultToks.back().is(tok::hashhash))
        ResultToks.pop_back();
    }
    continue;
  }

  // If anything changed, install this as the new Tokens list.
  if (MadeChange) {
    assert(!OwnsTokens && "This would leak if we already own the token list");
    // This is deleted in the dtor.
    NumTokens = ResultToks.size();
    // The tokens will be added to Preprocessor's cache and will be removed
    // when this TokenLexer finishes lexing them.
    Tokens = PP.cacheMacroExpandedTokens(this, ResultToks);

    // The preprocessor cache of macro expanded tokens owns these tokens,not us.
    OwnsTokens = false;
  }
}
Exemple #20
0
/// removeBlocks - Remove basic block DeadBB and all blocks dominated by DeadBB.
/// This routine is used to remove split condition's dead branch, dominated by
/// DeadBB. LiveBB dominates split conidition's other branch.
void LoopIndexSplit::removeBlocks(BasicBlock *DeadBB, Loop *LP, 
                                  BasicBlock *LiveBB) {

  // First update DeadBB's dominance frontier. 
  SmallVector<BasicBlock *, 8> FrontierBBs;
  DominanceFrontier::iterator DeadBBDF = DF->find(DeadBB);
  if (DeadBBDF != DF->end()) {
    SmallVector<BasicBlock *, 8> PredBlocks;
    
    DominanceFrontier::DomSetType DeadBBSet = DeadBBDF->second;
    for (DominanceFrontier::DomSetType::iterator DeadBBSetI = DeadBBSet.begin(),
           DeadBBSetE = DeadBBSet.end(); DeadBBSetI != DeadBBSetE; ++DeadBBSetI) 
      {
      BasicBlock *FrontierBB = *DeadBBSetI;
      FrontierBBs.push_back(FrontierBB);

      // Rremove any PHI incoming edge from blocks dominated by DeadBB.
      PredBlocks.clear();
      for(pred_iterator PI = pred_begin(FrontierBB), PE = pred_end(FrontierBB);
          PI != PE; ++PI) {
        BasicBlock *P = *PI;
        if (P == DeadBB || DT->dominates(DeadBB, P))
          PredBlocks.push_back(P);
      }

      for(BasicBlock::iterator FBI = FrontierBB->begin(), FBE = FrontierBB->end();
          FBI != FBE; ++FBI) {
        if (PHINode *PN = dyn_cast<PHINode>(FBI)) {
          for(SmallVector<BasicBlock *, 8>::iterator PI = PredBlocks.begin(),
                PE = PredBlocks.end(); PI != PE; ++PI) {
            BasicBlock *P = *PI;
            PN->removeIncomingValue(P);
          }
        }
        else
          break;
      }      
    }
  }
  
  // Now remove DeadBB and all nodes dominated by DeadBB in df order.
  SmallVector<BasicBlock *, 32> WorkList;
  DomTreeNode *DN = DT->getNode(DeadBB);
  for (df_iterator<DomTreeNode*> DI = df_begin(DN),
         E = df_end(DN); DI != E; ++DI) {
    BasicBlock *BB = DI->getBlock();
    WorkList.push_back(BB);
    BB->replaceAllUsesWith(UndefValue::get(
                                       Type::getLabelTy(DeadBB->getContext())));
  }

  while (!WorkList.empty()) {
    BasicBlock *BB = WorkList.back(); WorkList.pop_back();
    LPM->deleteSimpleAnalysisValue(BB, LP);
    for(BasicBlock::iterator BBI = BB->begin(), BBE = BB->end(); 
        BBI != BBE; ) {
      Instruction *I = BBI;
      ++BBI;
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
      LPM->deleteSimpleAnalysisValue(I, LP);
      I->eraseFromParent();
    }
    DT->eraseNode(BB);
    DF->removeBlock(BB);
    LI->removeBlock(BB);
    BB->eraseFromParent();
  }

  // Update Frontier BBs' dominator info.
  while (!FrontierBBs.empty()) {
    BasicBlock *FBB = FrontierBBs.back(); FrontierBBs.pop_back();
    BasicBlock *NewDominator = FBB->getSinglePredecessor();
    if (!NewDominator) {
      pred_iterator PI = pred_begin(FBB), PE = pred_end(FBB);
      NewDominator = *PI;
      ++PI;
      if (NewDominator != LiveBB) {
        for(; PI != PE; ++PI) {
          BasicBlock *P = *PI;
          if (P == LiveBB) {
            NewDominator = LiveBB;
            break;
          }
          NewDominator = DT->findNearestCommonDominator(NewDominator, P);
        }
      }
    }
    assert (NewDominator && "Unable to fix dominator info.");
    DT->changeImmediateDominator(FBB, NewDominator);
    DF->changeImmediateDominator(FBB, NewDominator, DT);
  }

}
/// getNonLocalCallDependency - Perform a full dependency query for the
/// specified call, returning the set of blocks that the value is
/// potentially live across.  The returned set of results will include a
/// "NonLocal" result for all blocks where the value is live across.
///
/// This method assumes the instruction returns a "NonLocal" dependency
/// within its own block.
///
/// This returns a reference to an internal data structure that may be
/// invalidated on the next non-local query or when an instruction is
/// removed.  Clients must copy this data if they want it around longer than
/// that.
const MemoryDependenceAnalysis::NonLocalDepInfo &
MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
  assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
 "getNonLocalCallDependency should only be used on calls with non-local deps!");
  PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
  NonLocalDepInfo &Cache = CacheP.first;

  /// DirtyBlocks - This is the set of blocks that need to be recomputed.  In
  /// the cached case, this can happen due to instructions being deleted etc. In
  /// the uncached case, this starts out as the set of predecessors we care
  /// about.
  SmallVector<BasicBlock*, 32> DirtyBlocks;
  
  if (!Cache.empty()) {
    // Okay, we have a cache entry.  If we know it is not dirty, just return it
    // with no computation.
    if (!CacheP.second) {
      NumCacheNonLocal++;
      return Cache;
    }
    
    // If we already have a partially computed set of results, scan them to
    // determine what is dirty, seeding our initial DirtyBlocks worklist.
    for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
       I != E; ++I)
      if (I->second.isDirty())
        DirtyBlocks.push_back(I->first);
    
    // Sort the cache so that we can do fast binary search lookups below.
    std::sort(Cache.begin(), Cache.end());
    
    ++NumCacheDirtyNonLocal;
    //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
    //     << Cache.size() << " cached: " << *QueryInst;
  } else {
    // Seed DirtyBlocks with each of the preds of QueryInst's block.
    BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
    for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
      DirtyBlocks.push_back(*PI);
    NumUncacheNonLocal++;
  }
  
  // isReadonlyCall - If this is a read-only call, we can be more aggressive.
  bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);

  SmallPtrSet<BasicBlock*, 64> Visited;
  
  unsigned NumSortedEntries = Cache.size();
  DEBUG(AssertSorted(Cache));
  
  // Iterate while we still have blocks to update.
  while (!DirtyBlocks.empty()) {
    BasicBlock *DirtyBB = DirtyBlocks.back();
    DirtyBlocks.pop_back();
    
    // Already processed this block?
    if (!Visited.insert(DirtyBB))
      continue;
    
    // Do a binary search to see if we already have an entry for this block in
    // the cache set.  If so, find it.
    DEBUG(AssertSorted(Cache, NumSortedEntries));
    NonLocalDepInfo::iterator Entry = 
      std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
                       std::make_pair(DirtyBB, MemDepResult()));
    if (Entry != Cache.begin() && prior(Entry)->first == DirtyBB)
      --Entry;
    
    MemDepResult *ExistingResult = 0;
    if (Entry != Cache.begin()+NumSortedEntries && 
        Entry->first == DirtyBB) {
      // If we already have an entry, and if it isn't already dirty, the block
      // is done.
      if (!Entry->second.isDirty())
        continue;
      
      // Otherwise, remember this slot so we can update the value.
      ExistingResult = &Entry->second;
    }
    
    // If the dirty entry has a pointer, start scanning from it so we don't have
    // to rescan the entire block.
    BasicBlock::iterator ScanPos = DirtyBB->end();
    if (ExistingResult) {
      if (Instruction *Inst = ExistingResult->getInst()) {
        ScanPos = Inst;
        // We're removing QueryInst's use of Inst.
        RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
                             QueryCS.getInstruction());
      }
    }
    
    // Find out if this block has a local dependency for QueryInst.
    MemDepResult Dep;
    
    if (ScanPos != DirtyBB->begin()) {
      Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
    } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
      // No dependence found.  If this is the entry block of the function, it is
      // a clobber, otherwise it is non-local.
      Dep = MemDepResult::getNonLocal();
    } else {
      Dep = MemDepResult::getClobber(ScanPos);
    }
    
    // If we had a dirty entry for the block, update it.  Otherwise, just add
    // a new entry.
    if (ExistingResult)
      *ExistingResult = Dep;
    else
      Cache.push_back(std::make_pair(DirtyBB, Dep));
    
    // If the block has a dependency (i.e. it isn't completely transparent to
    // the value), remember the association!
    if (!Dep.isNonLocal()) {
      // Keep the ReverseNonLocalDeps map up to date so we can efficiently
      // update this when we remove instructions.
      if (Instruction *Inst = Dep.getInst())
        ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
    } else {
    
      // If the block *is* completely transparent to the load, we need to check
      // the predecessors of this block.  Add them to our worklist.
      for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
        DirtyBlocks.push_back(*PI);
    }
  }
  
  return Cache;
}
Exemple #22
0
SourceCompleteResult
ide::isSourceInputComplete(std::unique_ptr<llvm::MemoryBuffer> MemBuf) {
  SourceManager SM;
  auto BufferID = SM.addNewSourceBuffer(std::move(MemBuf));
  ParserUnit Parse(SM, BufferID);
  Parser &P = Parse.getParser();

  bool Done;
  do {
    P.parseTopLevel();
    Done = P.Tok.is(tok::eof);
  } while (!Done);

  SourceCompleteResult SCR;
  SCR.IsComplete = !P.isInputIncomplete();

  // Use the same code that was in the REPL code to track the indent level
  // for now. In the future we should get this from the Parser if possible.

  CharSourceRange entireRange = SM.getRangeForBuffer(BufferID);
  StringRef Buffer = SM.extractText(entireRange);
  const char *SourceStart = Buffer.data();
  const char *SourceEnd = Buffer.data() + Buffer.size();
  const char *LineStart = SourceStart;
  const char *LineSourceStart = nullptr;
  uint32_t LineIndent = 0;
  struct IndentInfo {
    StringRef Prefix;
    uint32_t Indent;
    IndentInfo(const char *s, size_t n, uint32_t i) :
      Prefix(s, n),
      Indent(i) {}
  };
  SmallVector<IndentInfo, 4> IndentInfos;
  for (const char *p = SourceStart; p<SourceEnd; ++p) {
    switch (*p) {
    case '\r':
    case '\n':
      LineIndent = 0;
      LineSourceStart = nullptr;
      LineStart = p + 1;
      break;

    case '"':
      p = skipStringInCode (p, SourceEnd);
      break;

    case '{':
    case '(':
    case '[':
      ++LineIndent;
      if (LineSourceStart == nullptr)
        IndentInfos.push_back(IndentInfo(LineStart,
                                         p - LineStart,
                                         LineIndent));
      else
        IndentInfos.push_back(IndentInfo(LineStart,
                                         LineSourceStart - LineStart,
                                         LineIndent));
      break;

    case '}':
    case ')':
    case ']':
      if (LineIndent > 0)
        --LineIndent;
      if (!IndentInfos.empty())
        IndentInfos.pop_back();
      break;

    default:
      if (LineSourceStart == nullptr && !isspace(*p))
        LineSourceStart = p;
      break;
    }
    if (*p == '\0')
      break;
  }
  if (!IndentInfos.empty()) {
    SCR.IndentPrefix = IndentInfos.back().Prefix.str();
    // Trim off anything that follows a non-space character
    const size_t pos = SCR.IndentPrefix.find_first_not_of(" \t");
    if (pos != std::string::npos)
        SCR.IndentPrefix.erase(pos);
    SCR.IndentLevel = IndentInfos.back().Indent;
  }
  return SCR;
}
Exemple #23
0
/// lowerAcrossUnwindEdges - Find all variables which are alive across an unwind
/// edge and spill them.
void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
                                           ArrayRef<InvokeInst *> Invokes) {
  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
    for (BasicBlock::iterator II = BB->begin(), IIE = BB->end(); II != IIE;
         ++II) {
      // Ignore obvious cases we don't have to handle. In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block. Ignore them quickly.
      Instruction *Inst = &*II;
      if (Inst->use_empty())
        continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->user_back())->getParent() == BB &&
          !isa<PHINode>(Inst->user_back()))
        continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F.begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction *, 16> Users;
      for (User *U : Inst->users()) {
        Instruction *UI = cast<Instruction>(U);
        if (UI->getParent() != BB || isa<PHINode>(UI))
          Users.push_back(UI);
      }

      // Find all of the blocks that this value is live in.
      SmallPtrSet<BasicBlock *, 64> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          DEBUG(dbgs() << "SJLJ Spill: " << *Inst << " around "
                       << UnwindBlock->getName() << "\n");
          NeedsSpill = true;
          break;
        }
      }

      // If we decided we need a spill, do it.
      // FIXME: Spilling this way is overkill, as it forces all uses of
      // the value to be reloaded from the stack slot, even those that aren't
      // in the unwind blocks. We should be more selective.
      if (NeedsSpill) {
        DemoteRegToStack(*Inst, true);
        ++NumSpilled;
      }
    }
  }

  // Go through the landing pads and remove any PHIs there.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
    LandingPadInst *LPI = UnwindBlock->getLandingPadInst();

    // Place PHIs into a set to avoid invalidating the iterator.
    SmallPtrSet<PHINode *, 8> PHIsToDemote;
    for (BasicBlock::iterator PN = UnwindBlock->begin(); isa<PHINode>(PN); ++PN)
      PHIsToDemote.insert(cast<PHINode>(PN));
    if (PHIsToDemote.empty())
      continue;

    // Demote the PHIs to the stack.
    for (PHINode *PN : PHIsToDemote)
      DemotePHIToStack(PN);

    // Move the landingpad instruction back to the top of the landing pad block.
    LPI->moveBefore(&UnwindBlock->front());
  }
}
Exemple #24
0
/// shrinkToUses - After removing some uses of a register, shrink its live
/// range to just the remaining uses. This method does not compute reaching
/// defs for new uses, and it doesn't remove dead defs.
bool LiveIntervals::shrinkToUses(LiveInterval *li,
                                 SmallVectorImpl<MachineInstr*> *dead) {
  DEBUG(dbgs() << "Shrink: " << *li << '\n');
  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
         && "Can only shrink virtual registers");
  // Find all the values used, including PHI kills.
  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;

  // Blocks that have already been added to WorkList as live-out.
  SmallPtrSet<MachineBasicBlock*, 16> LiveOut;

  // Visit all instructions reading li->reg.
  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
       MachineInstr *UseMI = I.skipInstruction();) {
    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
      continue;
    SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
    LiveRangeQuery LRQ(*li, Idx);
    VNInfo *VNI = LRQ.valueIn();
    if (!VNI) {
      // This shouldn't happen: readsVirtualRegister returns true, but there is
      // no live value. It is likely caused by a target getting <undef> flags
      // wrong.
      DEBUG(dbgs() << Idx << '\t' << *UseMI
                   << "Warning: Instr claims to read non-existent value in "
                    << *li << '\n');
      continue;
    }
    // Special case: An early-clobber tied operand reads and writes the
    // register one slot early.
    if (VNInfo *DefVNI = LRQ.valueDefined())
      Idx = DefVNI->def;

    WorkList.push_back(std::make_pair(Idx, VNI));
  }

  // Create a new live interval with only minimal live segments per def.
  LiveInterval NewLI(li->reg, 0);
  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
       I != E; ++I) {
    VNInfo *VNI = *I;
    if (VNI->isUnused())
      continue;
    NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
  }

  // Keep track of the PHIs that are in use.
  SmallPtrSet<VNInfo*, 8> UsedPHIs;

  // Extend intervals to reach all uses in WorkList.
  while (!WorkList.empty()) {
    SlotIndex Idx = WorkList.back().first;
    VNInfo *VNI = WorkList.back().second;
    WorkList.pop_back();
    const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
    SlotIndex BlockStart = getMBBStartIdx(MBB);

    // Extend the live range for VNI to be live at Idx.
    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
      (void)ExtVNI;
      assert(ExtVNI == VNI && "Unexpected existing value number");
      // Is this a PHIDef we haven't seen before?
      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
        continue;
      // The PHI is live, make sure the predecessors are live-out.
      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
           PE = MBB->pred_end(); PI != PE; ++PI) {
        if (!LiveOut.insert(*PI))
          continue;
        SlotIndex Stop = getMBBEndIdx(*PI);
        // A predecessor is not required to have a live-out value for a PHI.
        if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
          WorkList.push_back(std::make_pair(Stop, PVNI));
      }
      continue;
    }

    // VNI is live-in to MBB.
    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
    NewLI.addRange(LiveRange(BlockStart, Idx, VNI));

    // Make sure VNI is live-out from the predecessors.
    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
         PE = MBB->pred_end(); PI != PE; ++PI) {
      if (!LiveOut.insert(*PI))
        continue;
      SlotIndex Stop = getMBBEndIdx(*PI);
      assert(li->getVNInfoBefore(Stop) == VNI &&
             "Wrong value out of predecessor");
      WorkList.push_back(std::make_pair(Stop, VNI));
    }
  }

  // Handle dead values.
  bool CanSeparate = false;
  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
       I != E; ++I) {
    VNInfo *VNI = *I;
    if (VNI->isUnused())
      continue;
    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
    assert(LII != NewLI.end() && "Missing live range for PHI");
    if (LII->end != VNI->def.getDeadSlot())
      continue;
    if (VNI->isPHIDef()) {
      // This is a dead PHI. Remove it.
      VNI->markUnused();
      NewLI.removeRange(*LII);
      DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
      CanSeparate = true;
    } else {
      // This is a dead def. Make sure the instruction knows.
      MachineInstr *MI = getInstructionFromIndex(VNI->def);
      assert(MI && "No instruction defining live value");
      MI->addRegisterDead(li->reg, TRI);
      if (dead && MI->allDefsAreDead()) {
        DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
        dead->push_back(MI);
      }
    }
  }

  // Move the trimmed ranges back.
  li->ranges.swap(NewLI.ranges);
  DEBUG(dbgs() << "Shrunk: " << *li << '\n');
  return CanSeparate;
}
Exemple #25
0
// MI is known to be dead. Figure out what instructions
// are also made dead by this and mark them for removal.
void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
  SmallVector<MachineInstr *, 8> Front;
  DeadInstr.insert(MI);

  DEBUG(dbgs() << "Deleting base instruction " << *MI << "\n");
  Front.push_back(MI);

  while (Front.size() != 0) {
    MI = Front.back();
    Front.pop_back();

    // MI is already known to be dead. We need to see
    // if other instructions can also be removed.
    for (unsigned int i = 0; i < MI->getNumOperands(); ++i) {
      MachineOperand &MO = MI->getOperand(i);
      if ((!MO.isReg()) || (!MO.isUse()))
        continue;
      unsigned Reg = MO.getReg();
      if (!TRI->isVirtualRegister(Reg))
        continue;
      MachineOperand *Op = MI->findRegisterDefOperand(Reg);

      if (!Op)
        continue;

      MachineInstr *Def = Op->getParent();

      // We don't need to do anything if we have already marked
      // this instruction as being dead.
      if (DeadInstr.find(Def) != DeadInstr.end())
        continue;

      // Check if all the uses of this instruction are marked as
      // dead. If so, we can also mark this instruction as being
      // dead.
      bool IsDead = true;
      for (unsigned int j = 0; j < Def->getNumOperands(); ++j) {
        MachineOperand &MODef = Def->getOperand(j);
        if ((!MODef.isReg()) || (!MODef.isDef()))
          continue;
        unsigned DefReg = MODef.getReg();
        if (!TRI->isVirtualRegister(DefReg)) {
          IsDead = false;
          break;
        }
        for (MachineRegisterInfo::use_instr_iterator
             II = MRI->use_instr_begin(Reg), EE = MRI->use_instr_end();
             II != EE; ++II) {
          // We don't care about self references.
          if (&*II == Def)
            continue;
          if (DeadInstr.find(&*II) == DeadInstr.end()) {
            IsDead = false;
            break;
          }
        }
      }

      if (!IsDead) continue;

      DEBUG(dbgs() << "Deleting instruction " << *Def << "\n");
      DeadInstr.insert(Def);
    }
  }
}
void UnwrappedLineParser::calculateBraceTypes() {
  // We'll parse forward through the tokens until we hit
  // a closing brace or eof - note that getNextToken() will
  // parse macros, so this will magically work inside macro
  // definitions, too.
  unsigned StoredPosition = Tokens->getPosition();
  unsigned Position = StoredPosition;
  FormatToken *Tok = FormatTok;
  // Keep a stack of positions of lbrace tokens. We will
  // update information about whether an lbrace starts a
  // braced init list or a different block during the loop.
  SmallVector<FormatToken *, 8> LBraceStack;
  assert(Tok->Tok.is(tok::l_brace));
  do {
    // Get next none-comment token.
    FormatToken *NextTok;
    unsigned ReadTokens = 0;
    do {
      NextTok = Tokens->getNextToken();
      ++ReadTokens;
    } while (NextTok->is(tok::comment));

    switch (Tok->Tok.getKind()) {
    case tok::l_brace:
      LBraceStack.push_back(Tok);
      break;
    case tok::r_brace:
      if (!LBraceStack.empty()) {
        if (LBraceStack.back()->BlockKind == BK_Unknown) {
          bool ProbablyBracedList = false;
          if (Style.Language == FormatStyle::LK_Proto) {
            ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
          } else {
            // Using OriginalColumn to distinguish between ObjC methods and
            // binary operators is a bit hacky.
            bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
                                    NextTok->OriginalColumn == 0;

            // If there is a comma, semicolon or right paren after the closing
            // brace, we assume this is a braced initializer list.  Note that
            // regardless how we mark inner braces here, we will overwrite the
            // BlockKind later if we parse a braced list (where all blocks
            // inside are by default braced lists), or when we explicitly detect
            // blocks (for example while parsing lambdas).
            //
            // We exclude + and - as they can be ObjC visibility modifiers.
            ProbablyBracedList =
                NextTok->isOneOf(tok::comma, tok::semi, tok::period, tok::colon,
                                 tok::r_paren, tok::r_square, tok::l_brace,
                                 tok::l_paren) ||
                (NextTok->isBinaryOperator() && !NextIsObjCMethod);
          }
          if (ProbablyBracedList) {
            Tok->BlockKind = BK_BracedInit;
            LBraceStack.back()->BlockKind = BK_BracedInit;
          } else {
            Tok->BlockKind = BK_Block;
            LBraceStack.back()->BlockKind = BK_Block;
          }
        }
        LBraceStack.pop_back();
      }
      break;
    case tok::at:
    case tok::semi:
    case tok::kw_if:
    case tok::kw_while:
    case tok::kw_for:
    case tok::kw_switch:
    case tok::kw_try:
      if (!LBraceStack.empty())
        LBraceStack.back()->BlockKind = BK_Block;
      break;
    default:
      break;
    }
    Tok = NextTok;
    Position += ReadTokens;
  } while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
  // Assume other blocks for all unclosed opening braces.
  for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
    if (LBraceStack[i]->BlockKind == BK_Unknown)
      LBraceStack[i]->BlockKind = BK_Block;
  }

  FormatTok = Tokens->setPosition(StoredPosition);
}
Exemple #27
0
/// ActOnEndOfTranslationUnit - This is called at the very end of the
/// translation unit when EOF is reached and all but the top-level scope is
/// popped.
void Sema::ActOnEndOfTranslationUnit() {
  assert(DelayedDiagnostics.getCurrentPool() == NULL
         && "reached end of translation unit with a pool attached?");

  // If code completion is enabled, don't perform any end-of-translation-unit
  // work.
  if (PP.isCodeCompletionEnabled())
    return;

  // Only complete translation units define vtables and perform implicit
  // instantiations.
  if (TUKind == TU_Complete) {
    DiagnoseUseOfUnimplementedSelectors();

    // If any dynamic classes have their key function defined within
    // this translation unit, then those vtables are considered "used" and must
    // be emitted.
    for (DynamicClassesType::iterator I = DynamicClasses.begin(ExternalSource),
                                      E = DynamicClasses.end();
         I != E; ++I) {
      assert(!(*I)->isDependentType() &&
             "Should not see dependent types here!");
      if (const CXXMethodDecl *KeyFunction = Context.getCurrentKeyFunction(*I)) {
        const FunctionDecl *Definition = 0;
        if (KeyFunction->hasBody(Definition))
          MarkVTableUsed(Definition->getLocation(), *I, true);
      }
    }

    // If DefinedUsedVTables ends up marking any virtual member functions it
    // might lead to more pending template instantiations, which we then need
    // to instantiate.
    DefineUsedVTables();

    // C++: Perform implicit template instantiations.
    //
    // FIXME: When we perform these implicit instantiations, we do not
    // carefully keep track of the point of instantiation (C++ [temp.point]).
    // This means that name lookup that occurs within the template
    // instantiation will always happen at the end of the translation unit,
    // so it will find some names that should not be found. Although this is
    // common behavior for C++ compilers, it is technically wrong. In the
    // future, we either need to be able to filter the results of name lookup
    // or we need to perform template instantiations earlier.
    PerformPendingInstantiations();
  }

  // Remove file scoped decls that turned out to be used.
  UnusedFileScopedDecls.erase(std::remove_if(UnusedFileScopedDecls.begin(0,
                                                                         true),
                                             UnusedFileScopedDecls.end(),
                              std::bind1st(std::ptr_fun(ShouldRemoveFromUnused),
                                           this)),
                              UnusedFileScopedDecls.end());

  if (TUKind == TU_Prefix) {
    // Translation unit prefixes don't need any of the checking below.
    TUScope = 0;
    return;
  }

  // Check for #pragma weak identifiers that were never declared
  // FIXME: This will cause diagnostics to be emitted in a non-determinstic
  // order!  Iterating over a densemap like this is bad.
  LoadExternalWeakUndeclaredIdentifiers();
  for (llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator
       I = WeakUndeclaredIdentifiers.begin(),
       E = WeakUndeclaredIdentifiers.end(); I != E; ++I) {
    if (I->second.getUsed()) continue;

    Diag(I->second.getLocation(), diag::warn_weak_identifier_undeclared)
      << I->first;
  }

  if (LangOpts.CPlusPlus11 &&
      Diags.getDiagnosticLevel(diag::warn_delegating_ctor_cycle,
                               SourceLocation())
        != DiagnosticsEngine::Ignored)
    CheckDelegatingCtorCycles();

  if (TUKind == TU_Module) {
    // If we are building a module, resolve all of the exported declarations
    // now.
    if (Module *CurrentModule = PP.getCurrentModule()) {
      ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();

      SmallVector<Module *, 2> Stack;
      Stack.push_back(CurrentModule);
      while (!Stack.empty()) {
        Module *Mod = Stack.back();
        Stack.pop_back();

        // Resolve the exported declarations and conflicts.
        // FIXME: Actually complain, once we figure out how to teach the
        // diagnostic client to deal with complaints in the module map at this
        // point.
        ModMap.resolveExports(Mod, /*Complain=*/false);
        ModMap.resolveConflicts(Mod, /*Complain=*/false);

        // Queue the submodules, so their exports will also be resolved.
        for (Module::submodule_iterator Sub = Mod->submodule_begin(),
                                     SubEnd = Mod->submodule_end();
             Sub != SubEnd; ++Sub) {
          Stack.push_back(*Sub);
        }
      }
    }

    // Modules don't need any of the checking below.
    TUScope = 0;
    return;
  }

  // C99 6.9.2p2:
  //   A declaration of an identifier for an object that has file
  //   scope without an initializer, and without a storage-class
  //   specifier or with the storage-class specifier static,
  //   constitutes a tentative definition. If a translation unit
  //   contains one or more tentative definitions for an identifier,
  //   and the translation unit contains no external definition for
  //   that identifier, then the behavior is exactly as if the
  //   translation unit contains a file scope declaration of that
  //   identifier, with the composite type as of the end of the
  //   translation unit, with an initializer equal to 0.
  llvm::SmallSet<VarDecl *, 32> Seen;
  for (TentativeDefinitionsType::iterator
            T = TentativeDefinitions.begin(ExternalSource),
         TEnd = TentativeDefinitions.end();
       T != TEnd; ++T)
  {
    VarDecl *VD = (*T)->getActingDefinition();

    // If the tentative definition was completed, getActingDefinition() returns
    // null. If we've already seen this variable before, insert()'s second
    // return value is false.
    if (VD == 0 || VD->isInvalidDecl() || !Seen.insert(VD))
      continue;

    if (const IncompleteArrayType *ArrayT
        = Context.getAsIncompleteArrayType(VD->getType())) {
      if (RequireCompleteType(VD->getLocation(),
                              ArrayT->getElementType(),
                              diag::err_tentative_def_incomplete_type_arr)) {
        VD->setInvalidDecl();
        continue;
      }

      // Set the length of the array to 1 (C99 6.9.2p5).
      Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
      llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
      QualType T = Context.getConstantArrayType(ArrayT->getElementType(),
                                                One, ArrayType::Normal, 0);
      VD->setType(T);
    } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
                                   diag::err_tentative_def_incomplete_type))
      VD->setInvalidDecl();

    CheckCompleteVariableDeclaration(VD);

    // Notify the consumer that we've completed a tentative definition.
    if (!VD->isInvalidDecl())
      Consumer.CompleteTentativeDefinition(VD);

  }

  // If there were errors, disable 'unused' warnings since they will mostly be
  // noise.
  if (!Diags.hasErrorOccurred()) {
    // Output warning for unused file scoped decls.
    for (UnusedFileScopedDeclsType::iterator
           I = UnusedFileScopedDecls.begin(ExternalSource),
           E = UnusedFileScopedDecls.end(); I != E; ++I) {
      if (ShouldRemoveFromUnused(this, *I))
        continue;

      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
        const FunctionDecl *DiagD;
        if (!FD->hasBody(DiagD))
          DiagD = FD;
        if (DiagD->isDeleted())
          continue; // Deleted functions are supposed to be unused.
        if (DiagD->isReferenced()) {
          if (isa<CXXMethodDecl>(DiagD))
            Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
                  << DiagD->getDeclName();
          else {
            if (FD->getStorageClassAsWritten() == SC_Static &&
                !FD->isInlineSpecified() &&
                !SourceMgr.isFromMainFile(
                   SourceMgr.getExpansionLoc(FD->getLocation())))
              Diag(DiagD->getLocation(), diag::warn_unneeded_static_internal_decl)
                << DiagD->getDeclName();
            else
              Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
                   << /*function*/0 << DiagD->getDeclName();
          }
        } else {
          Diag(DiagD->getLocation(),
               isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
                                         : diag::warn_unused_function)
                << DiagD->getDeclName();
        }
      } else {
        const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
        if (!DiagD)
          DiagD = cast<VarDecl>(*I);
        if (DiagD->isReferenced()) {
          Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
                << /*variable*/1 << DiagD->getDeclName();
        } else {
          Diag(DiagD->getLocation(), diag::warn_unused_variable)
                << DiagD->getDeclName();
        }
      }
    }

    if (ExternalSource)
      ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
    checkUndefinedButUsed(*this);
  }

  if (Diags.getDiagnosticLevel(diag::warn_unused_private_field,
                               SourceLocation())
        != DiagnosticsEngine::Ignored) {
    RecordCompleteMap RecordsComplete;
    RecordCompleteMap MNCComplete;
    for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
         E = UnusedPrivateFields.end(); I != E; ++I) {
      const NamedDecl *D = *I;
      const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
      if (RD && !RD->isUnion() &&
          IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
        Diag(D->getLocation(), diag::warn_unused_private_field)
              << D->getDeclName();
      }
    }
  }

  // Check we've noticed that we're no longer parsing the initializer for every
  // variable. If we miss cases, then at best we have a performance issue and
  // at worst a rejects-valid bug.
  assert(ParsingInitForAutoVars.empty() &&
         "Didn't unmark var as having its initializer parsed");

  TUScope = 0;
}
/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
/// split the critical edge.  This will update DominatorTree and
/// DominatorFrontier information if it is available, thus calling this pass
/// will not invalidate either of them. This returns the new block if the edge
/// was split, null otherwise.
///
/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the
/// specified successor will be merged into the same critical edge block.  
/// This is most commonly interesting with switch instructions, which may 
/// have many edges to any one destination.  This ensures that all edges to that
/// dest go to one block instead of each going to a different block, but isn't 
/// the standard definition of a "critical edge".
///
/// It is invalid to call this function on a critical edge that starts at an
/// IndirectBrInst.  Splitting these edges will almost always create an invalid
/// program because the address of the new block won't be the one that is jumped
/// to.
///
BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
                                    Pass *P, bool MergeIdenticalEdges) {
  if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return 0;
  
  assert(!isa<IndirectBrInst>(TI) &&
         "Cannot split critical edge from IndirectBrInst");
  
  BasicBlock *TIBB = TI->getParent();
  BasicBlock *DestBB = TI->getSuccessor(SuccNum);

  // Create a new basic block, linking it into the CFG.
  BasicBlock *NewBB = BasicBlock::Create(TI->getContext(),
                      TIBB->getName() + "." + DestBB->getName() + "_crit_edge");
  // Create our unconditional branch.
  BranchInst::Create(DestBB, NewBB);

  // Branch to the new block, breaking the edge.
  TI->setSuccessor(SuccNum, NewBB);

  // Insert the block into the function... right after the block TI lives in.
  Function &F = *TIBB->getParent();
  Function::iterator FBBI = TIBB;
  F.getBasicBlockList().insert(++FBBI, NewBB);
  
  // If there are any PHI nodes in DestBB, we need to update them so that they
  // merge incoming values from NewBB instead of from TIBB.
  if (PHINode *APHI = dyn_cast<PHINode>(DestBB->begin())) {
    // This conceptually does:
    //  foreach (PHINode *PN in DestBB)
    //    PN->setIncomingBlock(PN->getIncomingBlock(TIBB), NewBB);
    // but is optimized for two cases.
    
    if (APHI->getNumIncomingValues() <= 8) {  // Small # preds case.
      unsigned BBIdx = 0;
      for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
        // We no longer enter through TIBB, now we come in through NewBB.
        // Revector exactly one entry in the PHI node that used to come from
        // TIBB to come from NewBB.
        PHINode *PN = cast<PHINode>(I);
        
        // Reuse the previous value of BBIdx if it lines up.  In cases where we
        // have multiple phi nodes with *lots* of predecessors, this is a speed
        // win because we don't have to scan the PHI looking for TIBB.  This
        // happens because the BB list of PHI nodes are usually in the same
        // order.
        if (PN->getIncomingBlock(BBIdx) != TIBB)
          BBIdx = PN->getBasicBlockIndex(TIBB);
        PN->setIncomingBlock(BBIdx, NewBB);
      }
    } else {
      // However, the foreach loop is slow for blocks with lots of predecessors
      // because PHINode::getIncomingBlock is O(n) in # preds.  Instead, walk
      // the user list of TIBB to find the PHI nodes.
      SmallPtrSet<PHINode*, 16> UpdatedPHIs;
    
      for (Value::use_iterator UI = TIBB->use_begin(), E = TIBB->use_end();
           UI != E; ) {
        Value::use_iterator Use = UI++;
        if (PHINode *PN = dyn_cast<PHINode>(Use)) {
          // Remove one entry from each PHI.
          if (PN->getParent() == DestBB && UpdatedPHIs.insert(PN))
            PN->setOperand(Use.getOperandNo(), NewBB);
        }
      }
    }
  }
   
  // If there are any other edges from TIBB to DestBB, update those to go
  // through the split block, making those edges non-critical as well (and
  // reducing the number of phi entries in the DestBB if relevant).
  if (MergeIdenticalEdges) {
    for (unsigned i = SuccNum+1, e = TI->getNumSuccessors(); i != e; ++i) {
      if (TI->getSuccessor(i) != DestBB) continue;
      
      // Remove an entry for TIBB from DestBB phi nodes.
      DestBB->removePredecessor(TIBB);
      
      // We found another edge to DestBB, go to NewBB instead.
      TI->setSuccessor(i, NewBB);
    }
  }
  
  

  // If we don't have a pass object, we can't update anything...
  if (P == 0) return NewBB;
  
  DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
  DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>();
  LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
  ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>();
  
  // If we have nothing to update, just return.
  if (DT == 0 && DF == 0 && LI == 0 && PI == 0)
    return NewBB;

  // Now update analysis information.  Since the only predecessor of NewBB is
  // the TIBB, TIBB clearly dominates NewBB.  TIBB usually doesn't dominate
  // anything, as there are other successors of DestBB.  However, if all other
  // predecessors of DestBB are already dominated by DestBB (e.g. DestBB is a
  // loop header) then NewBB dominates DestBB.
  SmallVector<BasicBlock*, 8> OtherPreds;

  // If there is a PHI in the block, loop over predecessors with it, which is
  // faster than iterating pred_begin/end.
  if (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
      if (PN->getIncomingBlock(i) != NewBB)
        OtherPreds.push_back(PN->getIncomingBlock(i));
  } else {
    for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
         I != E; ++I)
      if (*I != NewBB)
        OtherPreds.push_back(*I);
  }
  
  bool NewBBDominatesDestBB = true;
  
  // Should we update DominatorTree information?
  if (DT) {
    DomTreeNode *TINode = DT->getNode(TIBB);

    // The new block is not the immediate dominator for any other nodes, but
    // TINode is the immediate dominator for the new node.
    //
    if (TINode) {       // Don't break unreachable code!
      DomTreeNode *NewBBNode = DT->addNewBlock(NewBB, TIBB);
      DomTreeNode *DestBBNode = 0;
     
      // If NewBBDominatesDestBB hasn't been computed yet, do so with DT.
      if (!OtherPreds.empty()) {
        DestBBNode = DT->getNode(DestBB);
        while (!OtherPreds.empty() && NewBBDominatesDestBB) {
          if (DomTreeNode *OPNode = DT->getNode(OtherPreds.back()))
            NewBBDominatesDestBB = DT->dominates(DestBBNode, OPNode);
          OtherPreds.pop_back();
        }
        OtherPreds.clear();
      }
      
      // If NewBBDominatesDestBB, then NewBB dominates DestBB, otherwise it
      // doesn't dominate anything.
      if (NewBBDominatesDestBB) {
        if (!DestBBNode) DestBBNode = DT->getNode(DestBB);
        DT->changeImmediateDominator(DestBBNode, NewBBNode);
      }
    }
  }

  // Should we update DominanceFrontier information?
  if (DF) {
    // If NewBBDominatesDestBB hasn't been computed yet, do so with DF.
    if (!OtherPreds.empty()) {
      // FIXME: IMPLEMENT THIS!
      llvm_unreachable("Requiring domfrontiers but not idom/domtree/domset."
                       " not implemented yet!");
    }
    
    // Since the new block is dominated by its only predecessor TIBB,
    // it cannot be in any block's dominance frontier.  If NewBB dominates
    // DestBB, its dominance frontier is the same as DestBB's, otherwise it is
    // just {DestBB}.
    DominanceFrontier::DomSetType NewDFSet;
    if (NewBBDominatesDestBB) {
      DominanceFrontier::iterator I = DF->find(DestBB);
      if (I != DF->end()) {
        DF->addBasicBlock(NewBB, I->second);
        
        if (I->second.count(DestBB)) {
          // However NewBB's frontier does not include DestBB.
          DominanceFrontier::iterator NF = DF->find(NewBB);
          DF->removeFromFrontier(NF, DestBB);
        }
      }
      else
        DF->addBasicBlock(NewBB, DominanceFrontier::DomSetType());
    } else {
      DominanceFrontier::DomSetType NewDFSet;
      NewDFSet.insert(DestBB);
      DF->addBasicBlock(NewBB, NewDFSet);
    }
  }
  
  // Update LoopInfo if it is around.
  if (LI) {
    if (Loop *TIL = LI->getLoopFor(TIBB)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (Loop *DestLoop = LI->getLoopFor(DestBB)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NewBB joins loop.
          DestLoop->addBasicBlockToLoop(NewBB, LI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NewBB, LI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NewBB, LI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == DestBB &&
                 "Should not create irreducible loops!");
          if (Loop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NewBB, LI->getBase());
        }
      }
      // If TIBB is in a loop and DestBB is outside of that loop, split the
      // other exit blocks of the loop that also have predecessors outside
      // the loop, to maintain a LoopSimplify guarantee.
      if (!TIL->contains(DestBB) &&
          P->mustPreserveAnalysisID(LoopSimplifyID)) {
        assert(!TIL->contains(NewBB) &&
               "Split point for loop exit is contained in loop!");

        // Update LCSSA form in the newly created exit block.
        if (P->mustPreserveAnalysisID(LCSSAID)) {
          SmallVector<BasicBlock *, 1> OrigPred;
          OrigPred.push_back(TIBB);
          CreatePHIsForSplitLoopExit(OrigPred, NewBB, DestBB);
        }

        // For each unique exit block...
        SmallVector<BasicBlock *, 4> ExitBlocks;
        TIL->getExitBlocks(ExitBlocks);
        for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
          // Collect all the preds that are inside the loop, and note
          // whether there are any preds outside the loop.
          SmallVector<BasicBlock *, 4> Preds;
          bool HasPredOutsideOfLoop = false;
          BasicBlock *Exit = ExitBlocks[i];
          for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
               I != E; ++I)
            if (TIL->contains(*I))
              Preds.push_back(*I);
            else
              HasPredOutsideOfLoop = true;
          // If there are any preds not in the loop, we'll need to split
          // the edges. The Preds.empty() check is needed because a block
          // may appear multiple times in the list. We can't use
          // getUniqueExitBlocks above because that depends on LoopSimplify
          // form, which we're in the process of restoring!
          if (!Preds.empty() && HasPredOutsideOfLoop) {
            BasicBlock *NewExitBB =
              SplitBlockPredecessors(Exit, Preds.data(), Preds.size(),
                                     "split", P);
            if (P->mustPreserveAnalysisID(LCSSAID))
              CreatePHIsForSplitLoopExit(Preds, NewExitBB, Exit);
          }
        }
      }
      // LCSSA form was updated above for the case where LoopSimplify is
      // available, which means that all predecessors of loop exit blocks
      // are within the loop. Without LoopSimplify form, it would be
      // necessary to insert a new phi.
      assert((!P->mustPreserveAnalysisID(LCSSAID) ||
              P->mustPreserveAnalysisID(LoopSimplifyID)) &&
             "SplitCriticalEdge doesn't know how to update LCCSA form "
             "without LoopSimplify!");
    }
  }

  // Update ProfileInfo if it is around.
  if (PI)
    PI->splitEdge(TIBB, DestBB, NewBB, MergeIdenticalEdges);

  return NewBB;
}
Exemple #29
0
/// OptimizeMemoryInst - Load and Store Instructions often have
/// addressing modes that can do significant amounts of computation.  As such,
/// instruction selection will try to get the load or store to do as much
/// computation as possible for the program.  The problem is that isel can only
/// see within a single block.  As such, we sink as much legal addressing mode
/// stuff into the block as possible.
///
/// This method is used to optimize both load/store and inline asms with memory
/// operands.
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
                                        Type *AccessTy) {
    Value *Repl = Addr;

    // Try to collapse single-value PHI nodes.  This is necessary to undo
    // unprofitable PRE transformations.
    SmallVector<Value*, 8> worklist;
    SmallPtrSet<Value*, 16> Visited;
    worklist.push_back(Addr);

    // Use a worklist to iteratively look through PHI nodes, and ensure that
    // the addressing mode obtained from the non-PHI roots of the graph
    // are equivalent.
    Value *Consensus = 0;
    unsigned NumUsesConsensus = 0;
    bool IsNumUsesConsensusValid = false;
    SmallVector<Instruction*, 16> AddrModeInsts;
    ExtAddrMode AddrMode;
    while (!worklist.empty()) {
        Value *V = worklist.back();
        worklist.pop_back();

        // Break use-def graph loops.
        if (!Visited.insert(V)) {
            Consensus = 0;
            break;
        }

        // For a PHI node, push all of its incoming values.
        if (PHINode *P = dyn_cast<PHINode>(V)) {
            for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
                worklist.push_back(P->getIncomingValue(i));
            continue;
        }

        // For non-PHIs, determine the addressing mode being computed.
        SmallVector<Instruction*, 16> NewAddrModeInsts;
        ExtAddrMode NewAddrMode =
            AddressingModeMatcher::Match(V, AccessTy, MemoryInst,
                                         NewAddrModeInsts, *TLI);

        // This check is broken into two cases with very similar code to avoid using
        // getNumUses() as much as possible. Some values have a lot of uses, so
        // calling getNumUses() unconditionally caused a significant compile-time
        // regression.
        if (!Consensus) {
            Consensus = V;
            AddrMode = NewAddrMode;
            AddrModeInsts = NewAddrModeInsts;
            continue;
        } else if (NewAddrMode == AddrMode) {
            if (!IsNumUsesConsensusValid) {
                NumUsesConsensus = Consensus->getNumUses();
                IsNumUsesConsensusValid = true;
            }

            // Ensure that the obtained addressing mode is equivalent to that obtained
            // for all other roots of the PHI traversal.  Also, when choosing one
            // such root as representative, select the one with the most uses in order
            // to keep the cost modeling heuristics in AddressingModeMatcher
            // applicable.
            unsigned NumUses = V->getNumUses();
            if (NumUses > NumUsesConsensus) {
                Consensus = V;
                NumUsesConsensus = NumUses;
                AddrModeInsts = NewAddrModeInsts;
            }
            continue;
        }

        Consensus = 0;
        break;
    }

    // If the addressing mode couldn't be determined, or if multiple different
    // ones were determined, bail out now.
    if (!Consensus) return false;

    // Check to see if any of the instructions supersumed by this addr mode are
    // non-local to I's BB.
    bool AnyNonLocal = false;
    for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
        if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
            AnyNonLocal = true;
            break;
        }
    }

    // If all the instructions matched are already in this BB, don't do anything.
    if (!AnyNonLocal) {
        DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode << "\n");
        return false;
    }

    // Insert this computation right after this user.  Since our caller is
    // scanning from the top of the BB to the bottom, reuse of the expr are
    // guaranteed to happen later.
    IRBuilder<> Builder(MemoryInst);

    // Now that we determined the addressing expression we want to use and know
    // that we have to sink it into this block.  Check to see if we have already
    // done this for some other load/store instr in this block.  If so, reuse the
    // computation.
    Value *&SunkAddr = SunkAddrs[Addr];
    if (SunkAddr) {
        DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
              << *MemoryInst);
        if (SunkAddr->getType() != Addr->getType())
            SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
    } else {
        DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
              << *MemoryInst);
        Type *IntPtrTy =
            TLI->getTargetData()->getIntPtrType(AccessTy->getContext());

        Value *Result = 0;

        // Start with the base register. Do this first so that subsequent address
        // matching finds it last, which will prevent it from trying to match it
        // as the scaled value in case it happens to be a mul. That would be
        // problematic if we've sunk a different mul for the scale, because then
        // we'd end up sinking both muls.
        if (AddrMode.BaseReg) {
            Value *V = AddrMode.BaseReg;
            if (V->getType()->isPointerTy())
                V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
            if (V->getType() != IntPtrTy)
                V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
            Result = V;
        }

        // Add the scale value.
        if (AddrMode.Scale) {
            Value *V = AddrMode.ScaledReg;
            if (V->getType() == IntPtrTy) {
                // done.
            } else if (V->getType()->isPointerTy()) {
                V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
            } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
                       cast<IntegerType>(V->getType())->getBitWidth()) {
                V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
            } else {
                V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr");
            }
            if (AddrMode.Scale != 1)
                V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
                                      "sunkaddr");
            if (Result)
                Result = Builder.CreateAdd(Result, V, "sunkaddr");
            else
                Result = V;
        }

        // Add in the BaseGV if present.
        if (AddrMode.BaseGV) {
            Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
            if (Result)
                Result = Builder.CreateAdd(Result, V, "sunkaddr");
            else
                Result = V;
        }

        // Add in the Base Offset if present.
        if (AddrMode.BaseOffs) {
            Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
            if (Result)
                Result = Builder.CreateAdd(Result, V, "sunkaddr");
            else
                Result = V;
        }

        if (Result == 0)
            SunkAddr = Constant::getNullValue(Addr->getType());
        else
            SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
    }

    MemoryInst->replaceUsesOfWith(Repl, SunkAddr);

    // If we have no uses, recursively delete the value and all dead instructions
    // using it.
    if (Repl->use_empty()) {
        // This can cause recursive deletion, which can invalidate our iterator.
        // Use a WeakVH to hold onto it in case this happens.
        WeakVH IterHandle(CurInstIterator);
        BasicBlock *BB = CurInstIterator->getParent();

        RecursivelyDeleteTriviallyDeadInstructions(Repl);

        if (IterHandle != CurInstIterator) {
            // If the iterator instruction was recursively deleted, start over at the
            // start of the block.
            CurInstIterator = BB->begin();
            SunkAddrs.clear();
        } else {
            // This address is now available for reassignment, so erase the table
            // entry; we don't want to match some completely different instruction.
            SunkAddrs[Addr] = 0;
        }
    }
    ++NumMemoryInsts;
    return true;
}
bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
  // Check if the current basic block has a single predecessor.
  if (MBB->pred_size() != 1)
    return false;

  // Check if the predecessor has two successors, implying the block ends in a
  // conditional branch.
  MachineBasicBlock *PredMBB = *MBB->pred_begin();
  if (PredMBB->succ_size() != 2)
    return false;

  MachineBasicBlock::iterator CondBr = PredMBB->getLastNonDebugInstr();
  if (CondBr == PredMBB->end())
    return false;

  // Keep track of the earliest point in the PredMBB block where kill markers
  // need to be removed if a COPY is removed.
  MachineBasicBlock::iterator FirstUse;
  // After calling knownRegValInBlock, FirstUse will either point to a CBZ/CBNZ
  // or a compare (i.e., SUBS).  In the latter case, we must take care when
  // updating FirstUse when scanning for COPY instructions.  In particular, if
  // there's a COPY in between the compare and branch the COPY should not
  // update FirstUse.
  bool SeenFirstUse = false;
  // Registers that contain a known value at the start of MBB.
  SmallVector<RegImm, 4> KnownRegs;

  MachineBasicBlock::iterator Itr = std::next(CondBr);
  do {
    --Itr;

    if (!knownRegValInBlock(*Itr, MBB, KnownRegs, FirstUse))
      continue;

    // Reset the clobber list.
    OptBBClobberedRegs.reset();

    // Look backward in PredMBB for COPYs from the known reg to find other
    // registers that are known to be a constant value.
    for (auto PredI = Itr;; --PredI) {
      if (FirstUse == PredI)
        SeenFirstUse = true;

      if (PredI->isCopy()) {
        MCPhysReg CopyDstReg = PredI->getOperand(0).getReg();
        MCPhysReg CopySrcReg = PredI->getOperand(1).getReg();
        for (auto &KnownReg : KnownRegs) {
          if (OptBBClobberedRegs[KnownReg.Reg])
            continue;
          // If we have X = COPY Y, and Y is known to be zero, then now X is
          // known to be zero.
          if (CopySrcReg == KnownReg.Reg && !OptBBClobberedRegs[CopyDstReg]) {
            KnownRegs.push_back(RegImm(CopyDstReg, KnownReg.Imm));
            if (SeenFirstUse)
              FirstUse = PredI;
            break;
          }
          // If we have X = COPY Y, and X is known to be zero, then now Y is
          // known to be zero.
          if (CopyDstReg == KnownReg.Reg && !OptBBClobberedRegs[CopySrcReg]) {
            KnownRegs.push_back(RegImm(CopySrcReg, KnownReg.Imm));
            if (SeenFirstUse)
              FirstUse = PredI;
            break;
          }
        }
      }

      // Stop if we get to the beginning of PredMBB.
      if (PredI == PredMBB->begin())
        break;

      trackRegDefs(*PredI, OptBBClobberedRegs, TRI);
      // Stop if all of the known-zero regs have been clobbered.
      if (all_of(KnownRegs, [&](RegImm KnownReg) {
            return OptBBClobberedRegs[KnownReg.Reg];
          }))
        break;
    }
    break;

  } while (Itr != PredMBB->begin() && Itr->isTerminator());

  // We've not found a registers with a known value, time to bail out.
  if (KnownRegs.empty())
    return false;

  bool Changed = false;
  // UsedKnownRegs is the set of KnownRegs that have had uses added to MBB.
  SmallSetVector<unsigned, 4> UsedKnownRegs;
  MachineBasicBlock::iterator LastChange = MBB->begin();
  // Remove redundant copy/move instructions unless KnownReg is modified.
  for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
    MachineInstr *MI = &*I;
    ++I;
    bool RemovedMI = false;
    bool IsCopy = MI->isCopy();
    bool IsMoveImm = MI->isMoveImmediate();
    if (IsCopy || IsMoveImm) {
      MCPhysReg DefReg = MI->getOperand(0).getReg();
      MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0;
      int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0;
      if (!MRI->isReserved(DefReg) &&
          ((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) ||
           IsMoveImm)) {
        for (RegImm &KnownReg : KnownRegs) {
          if (KnownReg.Reg != DefReg &&
              !TRI->isSuperRegister(DefReg, KnownReg.Reg))
            continue;

          // For a copy, the known value must be a zero.
          if (IsCopy && KnownReg.Imm != 0)
            continue;

          if (IsMoveImm) {
            // For a move immediate, the known immediate must match the source
            // immediate.
            if (KnownReg.Imm != SrcImm)
              continue;

            // Don't remove a move immediate that implicitly defines the upper
            // bits when only the lower 32 bits are known.
            MCPhysReg CmpReg = KnownReg.Reg;
            if (any_of(MI->implicit_operands(), [CmpReg](MachineOperand &O) {
                  return !O.isDead() && O.isReg() && O.isDef() &&
                         O.getReg() != CmpReg;
                }))
              continue;
          }

          if (IsCopy)
            DEBUG(dbgs() << "Remove redundant Copy : " << *MI);
          else
            DEBUG(dbgs() << "Remove redundant Move : " << *MI);

          MI->eraseFromParent();
          Changed = true;
          LastChange = I;
          NumCopiesRemoved++;
          UsedKnownRegs.insert(KnownReg.Reg);
          RemovedMI = true;
          break;
        }
      }
    }

    // Skip to the next instruction if we removed the COPY/MovImm.
    if (RemovedMI)
      continue;

    // Remove any regs the MI clobbers from the KnownConstRegs set.
    for (unsigned RI = 0; RI < KnownRegs.size();)
      if (MI->modifiesRegister(KnownRegs[RI].Reg, TRI)) {
        std::swap(KnownRegs[RI], KnownRegs[KnownRegs.size() - 1]);
        KnownRegs.pop_back();
        // Don't increment RI since we need to now check the swapped-in
        // KnownRegs[RI].
      } else {
        ++RI;
      }

    // Continue until the KnownRegs set is empty.
    if (KnownRegs.empty())
      break;
  }

  if (!Changed)
    return false;

  // Add newly used regs to the block's live-in list if they aren't there
  // already.
  for (MCPhysReg KnownReg : UsedKnownRegs)
    if (!MBB->isLiveIn(KnownReg))
      MBB->addLiveIn(KnownReg);

  // Clear kills in the range where changes were made.  This is conservative,
  // but should be okay since kill markers are being phased out.
  DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse
               << "\tLastChange: " << *LastChange);
  for (MachineInstr &MMI : make_range(FirstUse, PredMBB->end()))
    MMI.clearKillInfo();
  for (MachineInstr &MMI : make_range(MBB->begin(), LastChange))
    MMI.clearKillInfo();

  return true;
}