コード例 #1
0
ファイル: Scalarizer.cpp プロジェクト: julaiti-graphsql/llvm
bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
  VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
  if (!VT)
    return false;

  IRBuilder<> Builder(&GEPI);
  unsigned NumElems = VT->getNumElements();
  unsigned NumIndices = GEPI.getNumIndices();

  Scatterer Base = scatter(&GEPI, GEPI.getOperand(0));

  SmallVector<Scatterer, 8> Ops;
  Ops.resize(NumIndices);
  for (unsigned I = 0; I < NumIndices; ++I)
    Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1));

  ValueVector Res;
  Res.resize(NumElems);
  for (unsigned I = 0; I < NumElems; ++I) {
    SmallVector<Value *, 8> Indices;
    Indices.resize(NumIndices);
    for (unsigned J = 0; J < NumIndices; ++J)
      Indices[J] = Ops[J][I];
    Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
                               GEPI.getName() + ".i" + Twine(I));
    if (GEPI.isInBounds())
      if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
        NewGEPI->setIsInBounds();
  }
  gather(&GEPI, Res);
  return true;
}
コード例 #2
0
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
  if (!Fn.getFrameInfo()->hasStackObjects()) return; // Nothing to do?

  // Store SPAdj at exit of a basic block.
  SmallVector<int, 8> SPState;
  SPState.resize(Fn.getNumBlockIDs());
  SmallPtrSet<MachineBasicBlock*, 8> Reachable;

  // Iterate over the reachable blocks in DFS order.
  for (df_ext_iterator<MachineFunction*, SmallPtrSet<MachineBasicBlock*, 8> >
       DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable);
       DFI != DFE; ++DFI) {
    int SPAdj = 0;
    // Check the exit state of the DFS stack predecessor.
    if (DFI.getPathLength() >= 2) {
      MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
      assert(Reachable.count(StackPred) &&
             "DFS stack predecessor is already visited.\n");
      SPAdj = SPState[StackPred->getNumber()];
    }
    MachineBasicBlock *BB = *DFI;
    replaceFrameIndices(BB, Fn, SPAdj);
    SPState[BB->getNumber()] = SPAdj;
  }

  // Handle the unreachable blocks.
  for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
    if (Reachable.count(BB))
      // Already handled in DFS traversal.
      continue;
    int SPAdj = 0;
    replaceFrameIndices(BB, Fn, SPAdj);
  }
}
コード例 #3
0
void RenameIndependentSubregs::distribute(const IntEqClasses &Classes,
    const SmallVectorImpl<SubRangeInfo> &SubRangeInfos,
    const SmallVectorImpl<LiveInterval*> &Intervals) const {
  unsigned NumClasses = Classes.getNumClasses();
  SmallVector<unsigned, 8> VNIMapping;
  SmallVector<LiveInterval::SubRange*, 8> SubRanges;
  BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
  for (const SubRangeInfo &SRInfo : SubRangeInfos) {
    LiveInterval::SubRange &SR = *SRInfo.SR;
    unsigned NumValNos = SR.valnos.size();
    VNIMapping.clear();
    VNIMapping.reserve(NumValNos);
    SubRanges.clear();
    SubRanges.resize(NumClasses-1, nullptr);
    for (unsigned I = 0; I < NumValNos; ++I) {
      const VNInfo &VNI = *SR.valnos[I];
      unsigned LocalID = SRInfo.ConEQ.getEqClass(&VNI);
      unsigned ID = Classes[LocalID + SRInfo.Index];
      VNIMapping.push_back(ID);
      if (ID > 0 && SubRanges[ID-1] == nullptr)
        SubRanges[ID-1] = Intervals[ID]->createSubRange(Allocator, SR.LaneMask);
    }
    DistributeRange(SR, SubRanges.data(), VNIMapping);
  }
}
コード例 #4
0
ファイル: ComputationNode.cpp プロジェクト: Soukiy/CNTK
// binary zip operation, e.g. Plus
// If allowBroadcast then one can be a sub-dimension of the other (if layout then only for rows, otherwise for cols, too).
// This also helpfully resizes the children if not yet sized.
void ComputationNodeBase::ValidateBinaryZip(bool isFinalValidationPass, bool allowBroadcast)
{
    assert(m_inputs.size() == 2);
    ComputationNodeBase::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    ValidateInferBinaryInputDims();

    if (isFinalValidationPass)
        ValidateMBLayout(Input(0), Input(1));

    // result has tensor shape with dimensions being the max over both
    let shape0 = GetInputSampleLayout(0);
    let shape1 = GetInputSampleLayout(1);
    SmallVector<size_t> dims = shape0.GetDims();
    if (shape1.GetRank() > dims.size())
        dims.resize(shape1.GetRank(), 1); // pad with ones

    // If rank of [0] is higher than we only need to take max over rank [1].
    // If rank of [1] is higher then we have padded to equal lentgh.
    for (size_t k = 0; k < shape1.GetRank(); k++)
    {
        size_t dim1 = shape1[k];
        // BUGBUG: We must consider the allowBroadcast flag here.
        if (dims[k] <= 1 && dim1 != 0)                     // is [0] broadcasting (1) or unspecified (0)?
            dims[k] = dim1;                                // then use dimension we broadcast to
        else if (dim1 <= 1 && dims[k] != 0)                // if [1] is broadcasting or unspecified
            ;                                              // then dims is already correct
        else if (isFinalValidationPass && dim1 != dims[k]) // no broadcasting or unspecified: they must match
            InvalidArgument("%ls: Input dimensions [%s] and [%s] are not compatible.",
                            NodeDescription().c_str(), string(shape0).c_str(), string(shape1).c_str());
    }

    SetDims(TensorShape(dims), HasMBLayout());
}
コード例 #5
0
// Topologically sorts the basic blocks in the function and writes the ordering
// into the supplied unique vector. The back and incoming edges must have been
// computed first.
void LiveIRVariables::computeTopologicalOrdering(Function &F,
                                       UniqueVector<BasicBlock *> &Ordering) {
  assert(IncomingEdges.size() == F.size() &&
         "Incoming edges not computed yet!");

  SmallVector<unsigned, 256> ProcessedIncomingEdges;
  ProcessedIncomingEdges.resize(F.size(), 0);

  SmallVector<BasicBlock *, 256> WorkList;
  WorkList.push_back(&F.getEntryBlock());

  while (!WorkList.empty()) {
    BasicBlock *BB = WorkList.back();
    WorkList.pop_back();

    DEBUG(dbgs() << "Assigning topological order " << Ordering.size());
    DEBUG(dbgs() << " to basic block with DFS order ");
    DEBUG(dbgs() << (DFSOrdering.idFor(BB) - 1) << "\n");

    Ordering.insert(BB);

    for (succ_iterator SI = succ_begin(BB),
                       SE = succ_end(BB); SI != SE; ++SI) {
      if (BackEdges.count(std::make_pair(BB, *SI)))
        continue;

      unsigned DFSID = DFSOrdering.idFor(*SI) - 1;
      unsigned ProcessedEdges = ++ProcessedIncomingEdges[DFSID];
      if (ProcessedEdges == IncomingEdges[DFSID])
        WorkList.push_back(*SI);
    }
  }
}
コード例 #6
0
std::string DebugIR::getPath() {
  SmallVector<char, 16> Path;
  sys::path::append(Path, Directory, Filename);
  Path.resize(Filename.size() + Directory.size() + 2);
  Path[Filename.size() + Directory.size() + 1] = '\0';
  return std::string(Path.data());
}
コード例 #7
0
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
void PEI::replaceFrameIndices(MachineFunction &MF) {
  const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
  if (!TFI.needsFrameIndexResolution(MF)) return;

  // Store SPAdj at exit of a basic block.
  SmallVector<int, 8> SPState;
  SPState.resize(MF.getNumBlockIDs());
  df_iterator_default_set<MachineBasicBlock*> Reachable;

  // Iterate over the reachable blocks in DFS order.
  for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable);
       DFI != DFE; ++DFI) {
    int SPAdj = 0;
    // Check the exit state of the DFS stack predecessor.
    if (DFI.getPathLength() >= 2) {
      MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
      assert(Reachable.count(StackPred) &&
             "DFS stack predecessor is already visited.\n");
      SPAdj = SPState[StackPred->getNumber()];
    }
    MachineBasicBlock *BB = *DFI;
    replaceFrameIndices(BB, MF, SPAdj);
    SPState[BB->getNumber()] = SPAdj;
  }

  // Handle the unreachable blocks.
  for (auto &BB : MF) {
    if (Reachable.count(&BB))
      // Already handled in DFS traversal.
      continue;
    int SPAdj = 0;
    replaceFrameIndices(&BB, MF, SPAdj);
  }
}
コード例 #8
0
llvm::Constant* CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral* E) {
    //assert(!E->getType()->isPointerType() && "Strings are always arrays");

    // TEMP only handle 1 byte per char
    SmallString<64> Str(E->getValue());
    Str.resize(E->getByteLength());
    //return llvm::ConstantDataArray::getString(context, Str, false);
    return llvm::ConstantDataArray::getString(context, Str, true); // add 0

#if 0
    // Don't emit it as the address of the string, emit the string data itself
    // as an inline array.
    if (E->getCharByteWidth() == 1) {
        SmallString<64> Str(E->getString());

        // Resize the string to the right size, which is indicated by its type.
        const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
        Str.resize(CAT->getSize().getZExtValue());
        return llvm::ConstantDataArray::getString(VMContext, Str, false);
    }

    llvm::ArrayType *AType =
        cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
    llvm::Type *ElemTy = AType->getElementType();
    unsigned NumElements = AType->getNumElements();

    // Wide strings have either 2-byte or 4-byte elements.
    if (ElemTy->getPrimitiveSizeInBits() == 16) {
        SmallVector<uint16_t, 32> Elements;
        Elements.reserve(NumElements);

        for(unsigned i = 0, e = E->getLength(); i != e; ++i)
            Elements.push_back(E->getCodeUnit(i));
        Elements.resize(NumElements);
        return llvm::ConstantDataArray::get(VMContext, Elements);
    }

    assert(ElemTy->getPrimitiveSizeInBits() == 32);
    SmallVector<uint32_t, 32> Elements;
    Elements.reserve(NumElements);

    for(unsigned i = 0, e = E->getLength(); i != e; ++i)
        Elements.push_back(E->getCodeUnit(i));
    Elements.resize(NumElements);
    return llvm::ConstantDataArray::get(VMContext, Elements);
#endif
}
コード例 #9
0
ファイル: PrologEpilogInserter.cpp プロジェクト: r0mai/llvm
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
  const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
  if (!TFI.needsFrameIndexResolution(Fn)) return;

  MachineModuleInfo &MMI = Fn.getMMI();
  const Function *F = Fn.getFunction();
  const Function *ParentF = MMI.getWinEHParent(F);
  unsigned FrameReg;
  if (F == ParentF) {
    WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn.getFunction());
    // FIXME: This should be unconditional but we have bugs in the preparation
    // pass.
    if (FuncInfo.UnwindHelpFrameIdx != INT_MAX)
      FuncInfo.UnwindHelpFrameOffset = TFI.getFrameIndexReferenceFromSP(
          Fn, FuncInfo.UnwindHelpFrameIdx, FrameReg);
    for (WinEHTryBlockMapEntry &TBME : FuncInfo.TryBlockMap) {
      for (WinEHHandlerType &H : TBME.HandlerArray) {
        unsigned UnusedReg;
        if (H.CatchObj.FrameIndex == INT_MAX)
          H.CatchObj.FrameOffset = INT_MAX;
        else
          H.CatchObj.FrameOffset =
              TFI.getFrameIndexReference(Fn, H.CatchObj.FrameIndex, UnusedReg);
      }
    }
  }

  // Store SPAdj at exit of a basic block.
  SmallVector<int, 8> SPState;
  SPState.resize(Fn.getNumBlockIDs());
  SmallPtrSet<MachineBasicBlock*, 8> Reachable;

  // Iterate over the reachable blocks in DFS order.
  for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable);
       DFI != DFE; ++DFI) {
    int SPAdj = 0;
    // Check the exit state of the DFS stack predecessor.
    if (DFI.getPathLength() >= 2) {
      MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
      assert(Reachable.count(StackPred) &&
             "DFS stack predecessor is already visited.\n");
      SPAdj = SPState[StackPred->getNumber()];
    }
    MachineBasicBlock *BB = *DFI;
    replaceFrameIndices(BB, Fn, SPAdj);
    SPState[BB->getNumber()] = SPAdj;
  }

  // Handle the unreachable blocks.
  for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
    if (Reachable.count(BB))
      // Already handled in DFS traversal.
      continue;
    int SPAdj = 0;
    replaceFrameIndices(BB, Fn, SPAdj);
  }
}
コード例 #10
0
SmallVector<uint8_t, 64>
GetShadowBytes(const SmallVectorImpl<ASanStackVariableDescription> &Vars,
               const ASanStackFrameLayout &Layout) {
  assert(Vars.size() > 0);
  SmallVector<uint8_t, 64> SB;
  SB.clear();
  const size_t Granularity = Layout.Granularity;
  SB.resize(Vars[0].Offset / Granularity, kAsanStackLeftRedzoneMagic);
  for (const auto &Var : Vars) {
    SB.resize(Var.Offset / Granularity, kAsanStackMidRedzoneMagic);

    SB.resize(SB.size() + Var.Size / Granularity, 0);
    if (Var.Size % Granularity)
      SB.push_back(Var.Size % Granularity);
  }
  SB.resize(Layout.FrameSize / Granularity, kAsanStackRightRedzoneMagic);
  return SB;
}
コード例 #11
0
ファイル: ShadowStackGC.cpp プロジェクト: 7heaven/softart
Constant *ShadowStackGC::GetFrameMap(Function &F) {
  // doInitialization creates the abstract type of this value.
  Type *VoidPtr = Type::getInt8PtrTy(F.getContext());

  // Truncate the ShadowStackDescriptor if some metadata is null.
  unsigned NumMeta = 0;
  SmallVector<Constant*, 16> Metadata;
  for (unsigned I = 0; I != Roots.size(); ++I) {
    Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
    if (!C->isNullValue())
      NumMeta = I + 1;
    Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
  }
  Metadata.resize(NumMeta);

  Type *Int32Ty = Type::getInt32Ty(F.getContext());
  
  Constant *BaseElts[] = {
    ConstantInt::get(Int32Ty, Roots.size(), false),
    ConstantInt::get(Int32Ty, NumMeta, false),
  };

  Constant *DescriptorElts[] = {
    ConstantStruct::get(FrameMapTy, BaseElts),
    ConstantArray::get(ArrayType::get(VoidPtr, NumMeta), Metadata)
  };

  Type *EltTys[] = { DescriptorElts[0]->getType(),DescriptorElts[1]->getType()};
  StructType *STy = StructType::create(EltTys, "gc_map."+utostr(NumMeta));
  
  Constant *FrameMap = ConstantStruct::get(STy, DescriptorElts);

  // FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems
  //        that, short of multithreaded LLVM, it should be safe; all that is
  //        necessary is that a simple Module::iterator loop not be invalidated.
  //        Appending to the GlobalVariable list is safe in that sense.
  //
  //        All of the output passes emit globals last. The ExecutionEngine
  //        explicitly supports adding globals to the module after
  //        initialization.
  //
  //        Still, if it isn't deemed acceptable, then this transformation needs
  //        to be a ModulePass (which means it cannot be in the 'llc' pipeline
  //        (which uses a FunctionPassManager (which segfaults (not asserts) if
  //        provided a ModulePass))).
  Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true,
                                    GlobalVariable::InternalLinkage,
                                    FrameMap, "__gc_" + F.getName());

  Constant *GEPIndices[2] = {
                          ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
                          ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)
                          };
  return ConstantExpr::getGetElementPtr(GV, GEPIndices);
}
コード例 #12
0
ファイル: PredictableMemOpt.cpp プロジェクト: asdfeng/swift
/// promoteDestroyAddr - DestroyAddr is a composed operation merging
/// load+strong_release.  If the implicit load's value is available, explode it.
///
/// Note that we handle the general case of a destroy_addr of a piece of the
/// memory object, not just destroy_addrs of the entire thing.
///
bool AllocOptimize::promoteDestroyAddr(DestroyAddrInst *DAI) {
  SILValue Address = DAI->getOperand();
  
  // We cannot promote destroys of address-only types, because we can't expose
  // the load.
  SILType LoadTy = Address.getType().getObjectType();
  if (LoadTy.isAddressOnly(Module))
    return false;
  
  // If the box has escaped at this instruction, we can't safely promote the
  // load.
  if (hasEscapedAt(DAI))
    return false;
  
  // Compute the access path down to the field so we can determine precise
  // def/use behavior.
  unsigned FirstElt = computeSubelement(Address, TheMemory);
  assert(FirstElt != ~0U && "destroy within enum projection is not valid");
  unsigned NumLoadSubElements = getNumSubElements(LoadTy, Module);
  
  // Set up the bitvector of elements being demanded by the load.
  llvm::SmallBitVector RequiredElts(NumMemorySubElements);
  RequiredElts.set(FirstElt, FirstElt+NumLoadSubElements);
  
  SmallVector<std::pair<SILValue, unsigned>, 8> AvailableValues;
  AvailableValues.resize(NumMemorySubElements);
  
  // Find out if we have any available values.  If no bits are demanded, we
  // trivially succeed. This can happen when there is a load of an empty struct.
  if (NumLoadSubElements != 0) {
    computeAvailableValues(DAI, RequiredElts, AvailableValues);
    
    // If some value is not available at this load point, then we fail.
    for (unsigned i = FirstElt, e = FirstElt+NumLoadSubElements; i != e; ++i)
      if (!AvailableValues[i].first.isValid())
        return false;
  }
  
  // Aggregate together all of the subelements into something that has the same
  // type as the load did, and emit smaller) loads for any subelements that were
  // not available.
  auto NewVal =
  AggregateAvailableValues(DAI, LoadTy, Address, AvailableValues, FirstElt);
  
  ++NumDestroyAddrPromoted;
  
  DEBUG(llvm::dbgs() << "  *** Promoting destroy_addr: " << *DAI << "\n");
  DEBUG(llvm::dbgs() << "      To value: " << *NewVal.getDef() << "\n");
  
  SILBuilderWithScope(DAI).emitReleaseValueOperation(DAI->getLoc(), NewVal);
  DAI->eraseFromParent();
  return true;
}
コード例 #13
0
void MatcherGen::EmitResultCode() {
  // Patterns that match nodes with (potentially multiple) chain inputs have to
  // merge them together into a token factor.  This informs the generated code
  // what all the chained nodes are.
  if (!MatchedChainNodes.empty())
    AddMatcher(new EmitMergeInputChainsMatcher
               (MatchedChainNodes.data(), MatchedChainNodes.size()));

  // Codegen the root of the result pattern, capturing the resulting values.
  SmallVector<unsigned, 8> Ops;
  EmitResultOperand(Pattern.getDstPattern(), Ops);

  // At this point, we have however many values the result pattern produces.
  // However, the input pattern might not need all of these.  If there are
  // excess values at the end (such as implicit defs of condition codes etc)
  // just lop them off.  This doesn't need to worry about glue or chains, just
  // explicit results.
  //
  unsigned NumSrcResults = Pattern.getSrcPattern()->getNumTypes();

  // If the pattern also has (implicit) results, count them as well.
  if (!Pattern.getDstRegs().empty()) {
    // If the root came from an implicit def in the instruction handling stuff,
    // don't re-add it.
    Record *HandledReg = 0;
    const TreePatternNode *DstPat = Pattern.getDstPattern();
    if (!DstPat->isLeaf() &&DstPat->getOperator()->isSubClassOf("Instruction")){
      const CodeGenTarget &CGT = CGP.getTargetInfo();
      CodeGenInstruction &II = CGT.getInstruction(DstPat->getOperator());

      if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
        HandledReg = II.ImplicitDefs[0];
    }

    for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
      Record *Reg = Pattern.getDstRegs()[i];
      if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
      ++NumSrcResults;
    }
  }

  assert(Ops.size() >= NumSrcResults && "Didn't provide enough results");
  Ops.resize(NumSrcResults);

  // If the matched pattern covers nodes which define a glue result, emit a node
  // that tells the matcher about them so that it can update their results.
  if (!MatchedGlueResultNodes.empty())
    AddMatcher(new MarkGlueResultsMatcher(MatchedGlueResultNodes.data(),
                                          MatchedGlueResultNodes.size()));

  AddMatcher(new CompleteMatchMatcher(Ops.data(), Ops.size(), Pattern));
}
コード例 #14
0
ファイル: vSSA.cpp プロジェクト: dtzWill/ecosoc
/*
 * Renaming uses of V to uses of sigma
 * The rule of renaming is:
 *   - All uses of V in the dominator tree of sigma(V) are renamed, except for the sigma itself, of course
 *   - Uses of V in the dominance frontier of sigma(V) are renamed iff they are in PHI nodes (maybe this always happens)
 */
void vSSA::renameUsesToSigma(Value *V, PHINode *sigma)
{
  DEBUG(dbgs() << "VSSA: Renaming uses of " << *V << " to " << *sigma << "\n");
	BasicBlock *BB_next = sigma->getParent();

	// Get the dominance frontier of the successor
	DominanceFrontier::iterator DF_BB = DF_->find(BB_next);
	
	// This vector of Instruction* points to the uses of V.
	// This auxiliary vector of pointers is used because the use_iterators are invalidated when we do the renaming
	SmallVector<Instruction*, 25> usepointers;
	unsigned i = 0, n = V->getNumUses();
	usepointers.resize(n);
	
	for (Value::use_iterator uit = V->use_begin(), uend = V->use_end(); uit != uend; ++uit, ++i)
		usepointers[i] = dyn_cast<Instruction>(*uit);
	
	for (i = 0; i < n; ++i) {
		if (usepointers[i] ==  NULL) {
			continue;
		}
		if (usepointers[i] == sigma) {
			continue;
		}
		/* if (isa<GetElementPtrInst>(usepointers[i])) {
			continue;
		} */
		
		BasicBlock *BB_user = usepointers[i]->getParent();
		
		// Check if the use is in the dominator tree of sigma(V)
		if (DT_->dominates(BB_next, BB_user)){
			usepointers[i]->replaceUsesOfWith(V, sigma);
		}
		// Check if the use is in the dominance frontier of sigma(V)
		else if ((DF_BB != DF_->end()) && (DF_BB->second.find(BB_user) != DF_BB->second.end())) {
			// Check if the user is a PHI node (it has to be, but only for precaution)
			if (PHINode *phi = dyn_cast<PHINode>(usepointers[i])) {
				for (unsigned i = 0, e = phi->getNumIncomingValues(); i < e; ++i) {
					Value *operand = phi->getIncomingValue(i);
					
					if (operand != V)
						continue;
					
					if (DT_->dominates(BB_next, phi->getIncomingBlock(i))) {
						phi->setIncomingValue(i, sigma);
					}
				}
			}
		}
	}
}
コード例 #15
0
ファイル: TimingSource.cpp プロジェクト: LGZ-T/llvm-prof
StringRef LmbenchTiming::getName(EnumTy IG)
{
   static SmallVector<std::string,NumGroups> InstGroupNames;
   if(InstGroupNames.size() == 0){
      InstGroupNames.resize(NumGroups);
      auto& n = InstGroupNames;
      std::vector<std::pair<EnumTy,StringRef>> bits = {
         {Integer, "I"}, {I64, "I64"}, {Float,"F"}, {Double,"D"}
      }, ops = {{Add,"Add"},{Mul, "Mul"}, {Div, "Div"}, {Mod, "Mod"}};
      for(auto bit : bits){
         for(auto op : ops)
            n[bit.first|op.first] = (bit.second+op.second).str();
      }
   }
   return InstGroupNames[IG];
}
コード例 #16
0
ファイル: IRMover.cpp プロジェクト: yxsamliu/llvm
void TypeMapTy::linkDefinedTypeBodies() {
  SmallVector<Type *, 16> Elements;
  for (StructType *SrcSTy : SrcDefinitionsToResolve) {
    StructType *DstSTy = cast<StructType>(MappedTypes[SrcSTy]);
    assert(DstSTy->isOpaque());

    // Map the body of the source type over to a new body for the dest type.
    Elements.resize(SrcSTy->getNumElements());
    for (unsigned I = 0, E = Elements.size(); I != E; ++I)
      Elements[I] = get(SrcSTy->getElementType(I));

    DstSTy->setBody(Elements, SrcSTy->isPacked());
    DstStructTypesSet.switchToNonOpaque(DstSTy);
  }
  SrcDefinitionsToResolve.clear();
  DstResolvedOpaqueTypes.clear();
}
コード例 #17
0
ファイル: SafeStackColoring.cpp プロジェクト: CSI-LLVM/llvm
void StackColoring::calculateLiveIntervals() {
  for (auto IT : BlockLiveness) {
    BasicBlock *BB = IT.getFirst();
    BlockLifetimeInfo &BlockInfo = IT.getSecond();
    unsigned BBStart, BBEnd;
    std::tie(BBStart, BBEnd) = BlockInstRange[BB];

    BitVector Started, Ended;
    Started.resize(NumAllocas);
    Ended.resize(NumAllocas);
    SmallVector<unsigned, 8> Start;
    Start.resize(NumAllocas);

    // LiveIn ranges start at the first instruction.
    for (unsigned AllocaNo = 0; AllocaNo < NumAllocas; ++AllocaNo) {
      if (BlockInfo.LiveIn.test(AllocaNo)) {
        Started.set(AllocaNo);
        Start[AllocaNo] = BBStart;
      }
    }

    for (auto &It : BBMarkers[BB]) {
      unsigned InstNo = It.first;
      bool IsStart = It.second.IsStart;
      unsigned AllocaNo = It.second.AllocaNo;

      if (IsStart) {
        assert(!Started.test(AllocaNo));
        Started.set(AllocaNo);
        Ended.reset(AllocaNo);
        Start[AllocaNo] = InstNo;
      } else {
        assert(!Ended.test(AllocaNo));
        if (Started.test(AllocaNo)) {
          LiveRanges[AllocaNo].AddRange(Start[AllocaNo], InstNo);
          Started.reset(AllocaNo);
        }
        Ended.set(AllocaNo);
      }
    }

    for (unsigned AllocaNo = 0; AllocaNo < NumAllocas; ++AllocaNo)
      if (Started.test(AllocaNo))
        LiveRanges[AllocaNo].AddRange(Start[AllocaNo], BBEnd);
  }
}
コード例 #18
0
ファイル: Cpu0MCInstLower.cpp プロジェクト: ThomsonTan/lbd
// Lower ".cpload $reg" to
//  "lui   $gp, %hi(_gp_disp)"
//  "addiu $gp, $gp, %lo(_gp_disp)"
//  "addu  $gp, $gp, $t9"
void Cpu0MCInstLower::LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts) {
  MCOperand GPReg = MCOperand::CreateReg(Cpu0::GP);
  MCOperand T9Reg = MCOperand::CreateReg(Cpu0::T9);
  StringRef SymName("_gp_disp");
  const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName);
  const MCSymbolRefExpr *MCSym;

  MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Cpu0_ABS_HI, *Ctx);
  MCOperand SymHi = MCOperand::CreateExpr(MCSym);
  MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Cpu0_ABS_LO, *Ctx);
  MCOperand SymLo = MCOperand::CreateExpr(MCSym);

  MCInsts.resize(3);

  CreateMCInst(MCInsts[0], Cpu0::LUi, GPReg, SymHi);
  CreateMCInst(MCInsts[1], Cpu0::ADDiu, GPReg, GPReg, SymLo);
  CreateMCInst(MCInsts[2], Cpu0::ADD, GPReg, GPReg, T9Reg);
} // lbd document - mark - LowerCPLOAD
コード例 #19
0
/// ReadProfilingBlock - Read the number of entries in the next profiling data
/// packet and then accumulate the entries into 'Data'.
static void ReadProfilingBlock(const char *ToolName, FILE *F,
                               bool ShouldByteSwap,
                               SmallVector<unsigned, 32> &Data) {
  // Read the number of entries...
  unsigned NumEntries = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);

  // Read in the data.
  SmallVector<unsigned, 8> TempSpace(NumEntries);
  ReadProfilingData<unsigned>(ToolName, F, TempSpace.data(), NumEntries);

  // Make sure we have enough space ...
  if (Data.size() < NumEntries)
    Data.resize(NumEntries, ProfileDataLoader::Uncounted);

  // Accumulate the data we just read into the existing data.
  for (unsigned i = 0; i < NumEntries; ++i) {
    unsigned Entry = ShouldByteSwap ? ByteSwap_32(TempSpace[i]) : TempSpace[i];
    Data[i] = AddCounts(Entry, Data[i]);
  }
}
コード例 #20
0
// Lower ".cprestore offset" to "sw $gp, offset($sp)".
void MipsMCInstLower::LowerCPRESTORE(int64_t Offset,
                                     SmallVector<MCInst, 4>& MCInsts) {
  assert(isInt<32>(Offset) && (Offset >= 0) &&
         "Imm operand of .cprestore must be a non-negative 32-bit value.");

  MCOperand SPReg = MCOperand::CreateReg(Mips::SP), BaseReg = SPReg;
  MCOperand GPReg = MCOperand::CreateReg(Mips::GP);

  if (!isInt<16>(Offset)) {
    unsigned Hi = ((Offset + 0x8000) >> 16) & 0xffff;
    Offset &= 0xffff;
    MCOperand ATReg = MCOperand::CreateReg(Mips::AT);
    BaseReg = ATReg;

    // lui   at,hi
    // addu  at,at,sp
    MCInsts.resize(2);
    CreateMCInst(MCInsts[0], Mips::LUi, ATReg, MCOperand::CreateImm(Hi));
    CreateMCInst(MCInsts[1], Mips::ADDu, ATReg, ATReg, SPReg);
  }
コード例 #21
0
ファイル: LinkModules.cpp プロジェクト: groue/llvm
/// linkDefinedTypeBodies - Produce a body for an opaque type in the dest
/// module from a type definition in the source module.
void TypeMapTy::linkDefinedTypeBodies() {
  SmallVector<Type*, 16> Elements;
  SmallString<16> TmpName;
  
  // Note that processing entries in this loop (calling 'get') can add new
  // entries to the SrcDefinitionsToResolve vector.
  while (!SrcDefinitionsToResolve.empty()) {
    StructType *SrcSTy = SrcDefinitionsToResolve.pop_back_val();
    StructType *DstSTy = cast<StructType>(MappedTypes[SrcSTy]);
    
    // TypeMap is a many-to-one mapping, if there were multiple types that
    // provide a body for DstSTy then previous iterations of this loop may have
    // already handled it.  Just ignore this case.
    if (!DstSTy->isOpaque()) continue;
    assert(!SrcSTy->isOpaque() && "Not resolving a definition?");
    
    // Map the body of the source type over to a new body for the dest type.
    Elements.resize(SrcSTy->getNumElements());
    for (unsigned i = 0, e = Elements.size(); i != e; ++i)
      Elements[i] = getImpl(SrcSTy->getElementType(i));
    
    DstSTy->setBody(Elements, SrcSTy->isPacked());
    
    // If DstSTy has no name or has a longer name than STy, then viciously steal
    // STy's name.
    if (!SrcSTy->hasName()) continue;
    StringRef SrcName = SrcSTy->getName();
    
    if (!DstSTy->hasName() || DstSTy->getName().size() > SrcName.size()) {
      TmpName.insert(TmpName.end(), SrcName.begin(), SrcName.end());
      SrcSTy->setName("");
      DstSTy->setName(TmpName.str());
      TmpName.clear();
    }
  }
  
  DstResolvedOpaqueTypes.clear();
}
コード例 #22
0
// binary zip operation, e.g. Plus
// If allowBroadcast then one can be a sub-dimension of the other (if layout then only for rows, otherwise for cols, too).
// This also helpfully resizes the children if not yet sized.
void ComputationNodeBase::ValidateBinaryZip(bool isFinalValidationPass, bool allowBroadcast)
{
    assert(m_inputs.size() == 2);
    ComputationNodeBase::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    ValidateInferBinaryInputDims();

    if (isFinalValidationPass &&
        Input(0)->GetMBLayout() != Input(1)->GetMBLayout() && Input(0)->HasMBLayout() && Input(1)->HasMBLayout())
    {
        LogicError("%ls: Minibatch layouts are not the same between arguments and might get out of sync during runtime. If this is by design, use ReconcileDynamicAxis() to forward layouts between nodes.", NodeDescription().c_str());
    }

    // result has tensor shape with dimensions being the max over both
    let shape0 = GetInputSampleLayout(0);
    let shape1 = GetInputSampleLayout(1);
    SmallVector<size_t> dims = shape0.GetDims();
    if (shape1.GetRank() > dims.size())
        dims.resize(shape1.GetRank(), 1); // pad with ones

    // If rank of [0] is higher than we only need to take max over rank [1].
    // If rank of [1] is higher then we have padded to equal lentgh.
    for (size_t k = 0; k < shape1.GetRank(); k++)
    {
        size_t dim1 = shape1[k];
        // BUGBUG: We must consider the allowBroadcast flag here.
        if (dims[k] == 1)                                  // is [0] broadcasting?
            dims[k] = dim1;                                // then use dimension we broadcast to
        else if (dim1 == 1)                                // if [1] is broadcasting
            ;                                              // dims is already correct
        else if (isFinalValidationPass && dim1 != dims[k]) // no broadcasting: they must match
            InvalidArgument("%ls: Input dimensions [%s] and [%s] are not compatible.",
                            NodeDescription().c_str(), string(shape0).c_str(), string(shape1).c_str());
    }

    SetDims(TensorShape(dims), HasMBLayout());
}
コード例 #23
0
bool Regex::match(StringRef String, SmallVectorImpl<StringRef> *Matches){
  unsigned nmatch = Matches ? preg->re_nsub+1 : 0;

  // pmatch needs to have at least one element.
  SmallVector<llvm_regmatch_t, 8> pm;
  pm.resize(nmatch > 0 ? nmatch : 1);
  pm[0].rm_so = 0;
  pm[0].rm_eo = String.size();

  int rc = llvm_regexec(preg, String.data(), nmatch, pm.data(), REG_STARTEND);

  if (rc == REG_NOMATCH)
    return false;
  if (rc != 0) {
    // regexec can fail due to invalid pattern or running out of memory.
    error = rc;
    return false;
  }

  // There was a match.

  if (Matches) { // match position requested
    Matches->clear();
    
    for (unsigned i = 0; i != nmatch; ++i) {
      if (pm[i].rm_so == -1) {
        // this group didn't match
        Matches->push_back(StringRef());
        continue;
      }
      assert(pm[i].rm_eo >= pm[i].rm_so);
      Matches->push_back(StringRef(String.data()+pm[i].rm_so,
                                   pm[i].rm_eo-pm[i].rm_so));
    }
  }

  return true;
}
コード例 #24
0
// Lower ".cprestore offset" to "sw $gp, offset($sp)".
void MipsMCInstLower::LowerCPRESTORE(const MachineInstr *MI,
                                     SmallVector<MCInst, 4>& MCInsts) {
  const MachineOperand &MO = MI->getOperand(0);
  assert(MO.isImm() && "CPRESTORE's operand must be an immediate.");
  unsigned Offset = MO.getImm(), Reg = Mips::SP;
  MCInst Sw;

  if (Offset >= 0x8000) {
    unsigned Hi = (Offset >> 16) + ((Offset & 0x8000) != 0); 
    Offset &= 0xffff;
    Reg = Mips::AT;

    // lui   at,hi
    // addu  at,at,sp
    MCInsts.resize(2);
    MCInsts[0].setOpcode(Mips::LUi);
    MCInsts[0].addOperand(MCOperand::CreateReg(Mips::AT));
    MCInsts[0].addOperand(MCOperand::CreateImm(Hi));
    MCInsts[1].setOpcode(Mips::ADDu);
    MCInsts[1].addOperand(MCOperand::CreateReg(Mips::AT));
    MCInsts[1].addOperand(MCOperand::CreateReg(Mips::AT));
    MCInsts[1].addOperand(MCOperand::CreateReg(Mips::SP));
  }
コード例 #25
0
ファイル: Cpu0MCInstLower.cpp プロジェクト: Kobe771/lbd
// Lower ".cprestore offset" to "st $gp, offset($sp)".
void Cpu0MCInstLower::LowerCPRESTORE(int64_t Offset,
                                     SmallVector<MCInst, 4>& MCInsts) {
  assert(isInt<32>(Offset) && (Offset >= 0) &&
         "Imm operand of .cprestore must be a non-negative 32-bit value.");

  MCOperand SPReg = MCOperand::CreateReg(Cpu0::SP), BaseReg = SPReg;
  MCOperand GPReg = MCOperand::CreateReg(Cpu0::GP);
  MCOperand ZEROReg = MCOperand::CreateReg(Cpu0::ZERO);

  if (!isInt<16>(Offset)) {
    unsigned Hi = ((Offset + 0x8000) >> 16) & 0xffff;
    Offset &= 0xffff;
    MCOperand ATReg = MCOperand::CreateReg(Cpu0::AT);
    BaseReg = ATReg;

    // addiu   at,zero,hi
    // shl     at,at,16
    // add     at,at,sp
    MCInsts.resize(3);
    CreateMCInst(MCInsts[0], Cpu0::ADDiu, ATReg, ZEROReg, MCOperand::CreateImm(Hi));
    CreateMCInst(MCInsts[1], Cpu0::SHL, ATReg, ATReg, MCOperand::CreateImm(16));
    CreateMCInst(MCInsts[2], Cpu0::ADD, ATReg, ATReg, SPReg);
  }
コード例 #26
0
QualType
ClassTemplateDecl::getInjectedClassNameSpecialization() {
  Common *CommonPtr = getCommonPtr();
  if (!CommonPtr->InjectedClassNameType.isNull())
    return CommonPtr->InjectedClassNameType;

  // C++0x [temp.dep.type]p2:
  //  The template argument list of a primary template is a template argument 
  //  list in which the nth template argument has the value of the nth template
  //  parameter of the class template. If the nth template parameter is a 
  //  template parameter pack (14.5.3), the nth template argument is a pack 
  //  expansion (14.5.3) whose pattern is the name of the template parameter 
  //  pack.
  ASTContext &Context = getASTContext();
  TemplateParameterList *Params = getTemplateParameters();
  SmallVector<TemplateArgument, 16> TemplateArgs;
  TemplateArgs.resize(Params->size());
  GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data());
  CommonPtr->InjectedClassNameType
    = Context.getTemplateSpecializationType(TemplateName(this),
                                            &TemplateArgs[0],
                                            TemplateArgs.size());
  return CommonPtr->InjectedClassNameType;
}
コード例 #27
0
ファイル: StackColoring.cpp プロジェクト: erikjv/llvm
void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
  SmallVector<SlotIndex, 16> Starts;
  SmallVector<SlotIndex, 16> Finishes;

  // For each block, find which slots are active within this block
  // and update the live intervals.
  for (MachineFunction::iterator MBB = MF->begin(), MBBe = MF->end();
       MBB != MBBe; ++MBB) {
    Starts.clear();
    Starts.resize(NumSlots);
    Finishes.clear();
    Finishes.resize(NumSlots);

    // Create the interval for the basic blocks with lifetime markers in them.
    for (SmallVectorImpl<MachineInstr*>::const_iterator it = Markers.begin(),
         e = Markers.end(); it != e; ++it) {
      const MachineInstr *MI = *it;
      if (MI->getParent() != MBB)
        continue;

      assert((MI->getOpcode() == TargetOpcode::LIFETIME_START ||
              MI->getOpcode() == TargetOpcode::LIFETIME_END) &&
             "Invalid Lifetime marker");

      bool IsStart = MI->getOpcode() == TargetOpcode::LIFETIME_START;
      const MachineOperand &Mo = MI->getOperand(0);
      int Slot = Mo.getIndex();
      assert(Slot >= 0 && "Invalid slot");

      SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);

      if (IsStart) {
        if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex)
          Starts[Slot] = ThisIndex;
      } else {
        if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex)
          Finishes[Slot] = ThisIndex;
      }
    }

    // Create the interval of the blocks that we previously found to be 'alive'.
    BlockLifetimeInfo &MBBLiveness = BlockLiveness[MBB];
    for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1;
         pos = MBBLiveness.LiveIn.find_next(pos)) {
      Starts[pos] = Indexes->getMBBStartIdx(MBB);
    }
    for (int pos = MBBLiveness.LiveOut.find_first(); pos != -1;
         pos = MBBLiveness.LiveOut.find_next(pos)) {
      Finishes[pos] = Indexes->getMBBEndIdx(MBB);
    }

    for (unsigned i = 0; i < NumSlots; ++i) {
      assert(Starts[i].isValid() == Finishes[i].isValid() && "Unmatched range");
      if (!Starts[i].isValid())
        continue;

      assert(Starts[i] && Finishes[i] && "Invalid interval");
      VNInfo *ValNum = Intervals[i]->getValNumInfo(0);
      SlotIndex S = Starts[i];
      SlotIndex F = Finishes[i];
      if (S < F) {
        // We have a single consecutive region.
        Intervals[i]->addSegment(LiveInterval::Segment(S, F, ValNum));
      } else {
        // We have two non-consecutive regions. This happens when
        // LIFETIME_START appears after the LIFETIME_END marker.
        SlotIndex NewStart = Indexes->getMBBStartIdx(MBB);
        SlotIndex NewFin = Indexes->getMBBEndIdx(MBB);
        Intervals[i]->addSegment(LiveInterval::Segment(NewStart, F, ValNum));
        Intervals[i]->addSegment(LiveInterval::Segment(S, NewFin, ValNum));
      }
    }
  }
}
コード例 #28
0
void MatcherGen::
EmitResultInstructionAsOperand(const TreePatternNode *N,
                               SmallVectorImpl<unsigned> &OutputOps) {
  Record *Op = N->getOperator();
  const CodeGenTarget &CGT = CGP.getTargetInfo();
  CodeGenInstruction &II = CGT.getInstruction(Op);
  const DAGInstruction &Inst = CGP.getInstruction(Op);

  bool isRoot = N == Pattern.getDstPattern();

  // TreeHasOutGlue - True if this tree has glue.
  bool TreeHasInGlue = false, TreeHasOutGlue = false;
  if (isRoot) {
    const TreePatternNode *SrcPat = Pattern.getSrcPattern();
    TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
                    SrcPat->TreeHasProperty(SDNPInGlue, CGP);

    // FIXME2: this is checking the entire pattern, not just the node in
    // question, doing this just for the root seems like a total hack.
    TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
  }

  // NumResults - This is the number of results produced by the instruction in
  // the "outs" list.
  unsigned NumResults = Inst.getNumResults();

  // Number of operands we know the output instruction must have. If it is
  // variadic, we could have more operands.
  unsigned NumFixedOperands = II.Operands.size();

  SmallVector<unsigned, 8> InstOps;

  // Loop over all of the fixed operands of the instruction pattern, emitting
  // code to fill them all in. The node 'N' usually has number children equal to
  // the number of input operands of the instruction.  However, in cases where
  // there are predicate operands for an instruction, we need to fill in the
  // 'execute always' values. Match up the node operands to the instruction
  // operands to do this.
  unsigned ChildNo = 0;
  for (unsigned InstOpNo = NumResults, e = NumFixedOperands;
       InstOpNo != e; ++InstOpNo) {
    // Determine what to emit for this operand.
    Record *OperandNode = II.Operands[InstOpNo].Rec;
    if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
        !CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
      // This is a predicate or optional def operand; emit the
      // 'default ops' operands.
      const DAGDefaultOperand &DefaultOp
        = CGP.getDefaultOperand(OperandNode);
      for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
        EmitResultOperand(DefaultOp.DefaultOps[i].get(), InstOps);
      continue;
    }

    // Otherwise this is a normal operand or a predicate operand without
    // 'execute always'; emit it.

    // For operands with multiple sub-operands we may need to emit
    // multiple child patterns to cover them all.  However, ComplexPattern
    // children may themselves emit multiple MI operands.
    unsigned NumSubOps = 1;
    if (OperandNode->isSubClassOf("Operand")) {
      DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
      if (unsigned NumArgs = MIOpInfo->getNumArgs())
        NumSubOps = NumArgs;
    }

    unsigned FinalNumOps = InstOps.size() + NumSubOps;
    while (InstOps.size() < FinalNumOps) {
      const TreePatternNode *Child = N->getChild(ChildNo);
      unsigned BeforeAddingNumOps = InstOps.size();
      EmitResultOperand(Child, InstOps);
      assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");

      // If the operand is an instruction and it produced multiple results, just
      // take the first one.
      if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
        InstOps.resize(BeforeAddingNumOps+1);

      ++ChildNo;
    }
  }

  // If this is a variadic output instruction (i.e. REG_SEQUENCE), we can't
  // expand suboperands, use default operands, or other features determined from
  // the CodeGenInstruction after the fixed operands, which were handled
  // above. Emit the remaining instructions implicitly added by the use for
  // variable_ops.
  if (II.Operands.isVariadic) {
    for (unsigned I = ChildNo, E = N->getNumChildren(); I < E; ++I)
      EmitResultOperand(N->getChild(I), InstOps);
  }

  // If this node has input glue or explicitly specified input physregs, we
  // need to add chained and glued copyfromreg nodes and materialize the glue
  // input.
  if (isRoot && !PhysRegInputs.empty()) {
    // Emit all of the CopyToReg nodes for the input physical registers.  These
    // occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
    for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
      AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
                                          PhysRegInputs[i].first));
    // Even if the node has no other glue inputs, the resultant node must be
    // glued to the CopyFromReg nodes we just generated.
    TreeHasInGlue = true;
  }

  // Result order: node results, chain, glue

  // Determine the result types.
  SmallVector<MVT::SimpleValueType, 4> ResultVTs;
  for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
    ResultVTs.push_back(N->getSimpleType(i));

  // If this is the root instruction of a pattern that has physical registers in
  // its result pattern, add output VTs for them.  For example, X86 has:
  //   (set AL, (mul ...))
  // This also handles implicit results like:
  //   (implicit EFLAGS)
  if (isRoot && !Pattern.getDstRegs().empty()) {
    // If the root came from an implicit def in the instruction handling stuff,
    // don't re-add it.
    Record *HandledReg = nullptr;
    if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
      HandledReg = II.ImplicitDefs[0];

    for (Record *Reg : Pattern.getDstRegs()) {
      if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
      ResultVTs.push_back(getRegisterValueType(Reg, CGT));
    }
  }

  // If this is the root of the pattern and the pattern we're matching includes
  // a node that is variadic, mark the generated node as variadic so that it
  // gets the excess operands from the input DAG.
  int NumFixedArityOperands = -1;
  if (isRoot &&
      Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP))
    NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();

  // If this is the root node and multiple matched nodes in the input pattern
  // have MemRefs in them, have the interpreter collect them and plop them onto
  // this node. If there is just one node with MemRefs, leave them on that node
  // even if it is not the root.
  //
  // FIXME3: This is actively incorrect for result patterns with multiple
  // memory-referencing instructions.
  bool PatternHasMemOperands =
    Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);

  bool NodeHasMemRefs = false;
  if (PatternHasMemOperands) {
    unsigned NumNodesThatLoadOrStore =
      numNodesThatMayLoadOrStore(Pattern.getDstPattern(), CGP);
    bool NodeIsUniqueLoadOrStore = mayInstNodeLoadOrStore(N, CGP) &&
                                   NumNodesThatLoadOrStore == 1;
    NodeHasMemRefs =
      NodeIsUniqueLoadOrStore || (isRoot && (mayInstNodeLoadOrStore(N, CGP) ||
                                             NumNodesThatLoadOrStore != 1));
  }

  // Determine whether we need to attach a chain to this node.
  bool NodeHasChain = false;
  if (Pattern.getSrcPattern()->TreeHasProperty(SDNPHasChain, CGP)) {
    // For some instructions, we were able to infer from the pattern whether
    // they should have a chain.  Otherwise, attach the chain to the root.
    //
    // FIXME2: This is extremely dubious for several reasons, not the least of
    // which it gives special status to instructions with patterns that Pat<>
    // nodes can't duplicate.
    if (II.hasChain_Inferred)
      NodeHasChain = II.hasChain;
    else
      NodeHasChain = isRoot;
    // Instructions which load and store from memory should have a chain,
    // regardless of whether they happen to have a pattern saying so.
    if (II.hasCtrlDep || II.mayLoad || II.mayStore || II.canFoldAsLoad ||
        II.hasSideEffects)
      NodeHasChain = true;
  }

  assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
         "Node has no result");

  AddMatcher(new EmitNodeMatcher(II.Namespace.str()+"::"+II.TheDef->getName().str(),
                                 ResultVTs, InstOps,
                                 NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
                                 NodeHasMemRefs, NumFixedArityOperands,
                                 NextRecordedOperandNo));

  // The non-chain and non-glue results of the newly emitted node get recorded.
  for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
    if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
    OutputOps.push_back(NextRecordedOperandNo++);
  }
}
コード例 #29
0
ファイル: PredictableMemOpt.cpp プロジェクト: asdfeng/swift
/// At this point, we know that this element satisfies the definitive init
/// requirements, so we can try to promote loads to enable SSA-based dataflow
/// analysis.  We know that accesses to this element only access this element,
/// cross element accesses have been scalarized.
///
/// This returns true if the load has been removed from the program.
///
bool AllocOptimize::promoteLoad(SILInstruction *Inst) {
  // Note that we intentionally don't support forwarding of weak pointers,
  // because the underlying value may drop be deallocated at any time.  We would
  // have to prove that something in this function is holding the weak value
  // live across the promoted region and that isn't desired for a stable
  // diagnostics pass this like one.
  
  // We only handle load and copy_addr right now.
  if (auto CAI = dyn_cast<CopyAddrInst>(Inst)) {
    // If this is a CopyAddr, verify that the element type is loadable.  If not,
    // we can't explode to a load.
    if (!CAI->getSrc().getType().isLoadable(Module))
      return false;
  } else if (!isa<LoadInst>(Inst))
    return false;
  
  // If the box has escaped at this instruction, we can't safely promote the
  // load.
  if (hasEscapedAt(Inst))
    return false;
  
  SILType LoadTy = Inst->getOperand(0).getType().getObjectType();
  
  // If this is a load/copy_addr from a struct field that we want to promote,
  // compute the access path down to the field so we can determine precise
  // def/use behavior.
  unsigned FirstElt = computeSubelement(Inst->getOperand(0), TheMemory);
  
  // If this is a load from within an enum projection, we can't promote it since
  // we don't track subelements in a type that could be changing.
  if (FirstElt == ~0U)
    return false;
  
  unsigned NumLoadSubElements = getNumSubElements(LoadTy, Module);
  
  // Set up the bitvector of elements being demanded by the load.
  llvm::SmallBitVector RequiredElts(NumMemorySubElements);
  RequiredElts.set(FirstElt, FirstElt+NumLoadSubElements);
  
  SmallVector<std::pair<SILValue, unsigned>, 8> AvailableValues;
  AvailableValues.resize(NumMemorySubElements);
  
  // Find out if we have any available values.  If no bits are demanded, we
  // trivially succeed. This can happen when there is a load of an empty struct.
  if (NumLoadSubElements != 0) {
    computeAvailableValues(Inst, RequiredElts, AvailableValues);
    
    // If there are no values available at this load point, then we fail to
    // promote this load and there is nothing to do.
    bool AnyAvailable = false;
    for (unsigned i = FirstElt, e = i+NumLoadSubElements; i != e; ++i)
      if (AvailableValues[i].first.isValid()) {
        AnyAvailable = true;
        break;
      }
    
    if (!AnyAvailable)
      return false;
  }
  
  // Ok, we have some available values.  If we have a copy_addr, explode it now,
  // exposing the load operation within it.  Subsequent optimization passes will
  // see the load and propagate the available values into it.
  if (auto *CAI = dyn_cast<CopyAddrInst>(Inst)) {
    explodeCopyAddr(CAI);
    
    // This is removing the copy_addr, but explodeCopyAddr takes care of
    // removing the instruction from Uses for us, so we return false.
    return false;
  }
  
  // Aggregate together all of the subelements into something that has the same
  // type as the load did, and emit smaller) loads for any subelements that were
  // not available.
  auto NewVal = AggregateAvailableValues(Inst, LoadTy, Inst->getOperand(0),
                                         AvailableValues, FirstElt);
  
  ++NumLoadPromoted;
  
  // Simply replace the load.
  assert(isa<LoadInst>(Inst));
  DEBUG(llvm::dbgs() << "  *** Promoting load: " << *Inst << "\n");
  DEBUG(llvm::dbgs() << "      To value: " << *NewVal.getDef() << "\n");
  
  SILValue(Inst, 0).replaceAllUsesWith(NewVal);
  SILValue Addr = Inst->getOperand(0);
  Inst->eraseFromParent();
  if (auto *AddrI = dyn_cast<SILInstruction>(Addr))
    recursivelyDeleteTriviallyDeadInstructions(AddrI);
  return true;
}
コード例 #30
0
void MatcherGen::
EmitResultInstructionAsOperand(const TreePatternNode *N,
                               SmallVectorImpl<unsigned> &OutputOps) {
  Record *Op = N->getOperator();
  const CodeGenTarget &CGT = CGP.getTargetInfo();
  CodeGenInstruction &II = CGT.getInstruction(Op);
  const DAGInstruction &Inst = CGP.getInstruction(Op);

  // If we can, get the pattern for the instruction we're generating.  We derive
  // a variety of information from this pattern, such as whether it has a chain.
  //
  // FIXME2: This is extremely dubious for several reasons, not the least of
  // which it gives special status to instructions with patterns that Pat<>
  // nodes can't duplicate.
  const TreePatternNode *InstPatNode = GetInstPatternNode(Inst, N);

  // NodeHasChain - Whether the instruction node we're creating takes chains.
  bool NodeHasChain = InstPatNode &&
                      InstPatNode->TreeHasProperty(SDNPHasChain, CGP);

  bool isRoot = N == Pattern.getDstPattern();

  // TreeHasOutGlue - True if this tree has glue.
  bool TreeHasInGlue = false, TreeHasOutGlue = false;
  if (isRoot) {
    const TreePatternNode *SrcPat = Pattern.getSrcPattern();
    TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
                    SrcPat->TreeHasProperty(SDNPInGlue, CGP);

    // FIXME2: this is checking the entire pattern, not just the node in
    // question, doing this just for the root seems like a total hack.
    TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
  }

  // NumResults - This is the number of results produced by the instruction in
  // the "outs" list.
  unsigned NumResults = Inst.getNumResults();

  // Loop over all of the operands of the instruction pattern, emitting code
  // to fill them all in.  The node 'N' usually has number children equal to
  // the number of input operands of the instruction.  However, in cases
  // where there are predicate operands for an instruction, we need to fill
  // in the 'execute always' values.  Match up the node operands to the
  // instruction operands to do this.
  SmallVector<unsigned, 8> InstOps;
  for (unsigned ChildNo = 0, InstOpNo = NumResults, e = II.Operands.size();
       InstOpNo != e; ++InstOpNo) {

    // Determine what to emit for this operand.
    Record *OperandNode = II.Operands[InstOpNo].Rec;
    if ((OperandNode->isSubClassOf("PredicateOperand") ||
         OperandNode->isSubClassOf("OptionalDefOperand")) &&
        !CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
      // This is a predicate or optional def operand; emit the
      // 'default ops' operands.
      const DAGDefaultOperand &DefaultOp
        = CGP.getDefaultOperand(OperandNode);
      for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
        EmitResultOperand(DefaultOp.DefaultOps[i], InstOps);
      continue;
    }

    const TreePatternNode *Child = N->getChild(ChildNo);

    // Otherwise this is a normal operand or a predicate operand without
    // 'execute always'; emit it.
    unsigned BeforeAddingNumOps = InstOps.size();
    EmitResultOperand(Child, InstOps);
    assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");

    // If the operand is an instruction and it produced multiple results, just
    // take the first one.
    if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
      InstOps.resize(BeforeAddingNumOps+1);

    ++ChildNo;
  }

  // If this node has input glue or explicitly specified input physregs, we
  // need to add chained and glued copyfromreg nodes and materialize the glue
  // input.
  if (isRoot && !PhysRegInputs.empty()) {
    // Emit all of the CopyToReg nodes for the input physical registers.  These
    // occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
    for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
      AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
                                          PhysRegInputs[i].first));
    // Even if the node has no other glue inputs, the resultant node must be
    // glued to the CopyFromReg nodes we just generated.
    TreeHasInGlue = true;
  }

  // Result order: node results, chain, glue

  // Determine the result types.
  SmallVector<MVT::SimpleValueType, 4> ResultVTs;
  for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
    ResultVTs.push_back(N->getType(i));

  // If this is the root instruction of a pattern that has physical registers in
  // its result pattern, add output VTs for them.  For example, X86 has:
  //   (set AL, (mul ...))
  // This also handles implicit results like:
  //   (implicit EFLAGS)
  if (isRoot && !Pattern.getDstRegs().empty()) {
    // If the root came from an implicit def in the instruction handling stuff,
    // don't re-add it.
    Record *HandledReg = 0;
    if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
      HandledReg = II.ImplicitDefs[0];

    for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
      Record *Reg = Pattern.getDstRegs()[i];
      if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
      ResultVTs.push_back(getRegisterValueType(Reg, CGT));
    }
  }

  // If this is the root of the pattern and the pattern we're matching includes
  // a node that is variadic, mark the generated node as variadic so that it
  // gets the excess operands from the input DAG.
  int NumFixedArityOperands = -1;
  if (isRoot &&
      (Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP)))
    NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();

  // If this is the root node and multiple matched nodes in the input pattern
  // have MemRefs in them, have the interpreter collect them and plop them onto
  // this node. If there is just one node with MemRefs, leave them on that node
  // even if it is not the root.
  //
  // FIXME3: This is actively incorrect for result patterns with multiple
  // memory-referencing instructions.
  bool PatternHasMemOperands =
    Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);

  bool NodeHasMemRefs = false;
  if (PatternHasMemOperands) {
    unsigned NumNodesThatLoadOrStore =
      numNodesThatMayLoadOrStore(Pattern.getDstPattern(), CGP);
    bool NodeIsUniqueLoadOrStore = mayInstNodeLoadOrStore(N, CGP) &&
                                   NumNodesThatLoadOrStore == 1;
    NodeHasMemRefs =
      NodeIsUniqueLoadOrStore || (isRoot && (mayInstNodeLoadOrStore(N, CGP) ||
                                             NumNodesThatLoadOrStore != 1));
  }

  assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
         "Node has no result");

  AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName(),
                                 ResultVTs.data(), ResultVTs.size(),
                                 InstOps.data(), InstOps.size(),
                                 NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
                                 NodeHasMemRefs, NumFixedArityOperands,
                                 NextRecordedOperandNo));

  // The non-chain and non-glue results of the newly emitted node get recorded.
  for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
    if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
    OutputOps.push_back(NextRecordedOperandNo++);
  }
}