コード例 #1
0
ファイル: Value.cpp プロジェクト: CAFxX/llvm-1
unsigned Value::getPointerDereferenceableBytes(bool &CanBeNull) const {
  assert(getType()->isPointerTy() && "must be pointer");

  unsigned DerefBytes = 0;
  CanBeNull = false;
  if (const Argument *A = dyn_cast<Argument>(this)) {
    DerefBytes = A->getDereferenceableBytes();
    if (DerefBytes == 0) {
      DerefBytes = A->getDereferenceableOrNullBytes();
      CanBeNull = true;
    }
  } else if (auto CS = ImmutableCallSite(this)) {
    DerefBytes = CS.getDereferenceableBytes(0);
    if (DerefBytes == 0) {
      DerefBytes = CS.getDereferenceableOrNullBytes(0);
      CanBeNull = true;
    }
  } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
    if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
      ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
      DerefBytes = CI->getLimitedValue();
    }
    if (DerefBytes == 0) {
      if (MDNode *MD =
              LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
        ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
        DerefBytes = CI->getLimitedValue();
      }
      CanBeNull = true;
    }
  }
  return DerefBytes;
}
コード例 #2
0
ファイル: Value.cpp プロジェクト: crabtw/llvm
uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
                                               bool &CanBeNull) const {
  assert(getType()->isPointerTy() && "must be pointer");

  uint64_t DerefBytes = 0;
  CanBeNull = false;
  if (const Argument *A = dyn_cast<Argument>(this)) {
    DerefBytes = A->getDereferenceableBytes();
    if (DerefBytes == 0 && (A->hasByValAttr() || A->hasStructRetAttr())) {
      Type *PT = cast<PointerType>(A->getType())->getElementType();
      if (PT->isSized())
        DerefBytes = DL.getTypeStoreSize(PT);
    }
    if (DerefBytes == 0) {
      DerefBytes = A->getDereferenceableOrNullBytes();
      CanBeNull = true;
    }
  } else if (auto CS = ImmutableCallSite(this)) {
    DerefBytes = CS.getDereferenceableBytes(AttributeList::ReturnIndex);
    if (DerefBytes == 0) {
      DerefBytes = CS.getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
      CanBeNull = true;
    }
  } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
    if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
      ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
      DerefBytes = CI->getLimitedValue();
    }
    if (DerefBytes == 0) {
      if (MDNode *MD =
              LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
        ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
        DerefBytes = CI->getLimitedValue();
      }
      CanBeNull = true;
    }
  } else if (auto *AI = dyn_cast<AllocaInst>(this)) {
    if (!AI->isArrayAllocation()) {
      DerefBytes = DL.getTypeStoreSize(AI->getAllocatedType());
      CanBeNull = false;
    }
  } else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
    if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
      // TODO: Don't outright reject hasExternalWeakLinkage but set the
      // CanBeNull flag.
      DerefBytes = DL.getTypeStoreSize(GV->getValueType());
      CanBeNull = false;
    }
  }
  return DerefBytes;
}
コード例 #3
0
// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing.
bool BranchProbabilityAnalysis::calcMetadataWeights(BasicBlock *BB) {
  TerminatorInst *TI = BB->getTerminator();
  if (TI->getNumSuccessors() == 1)
    return false;
  if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
    return false;

  MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
  if (!WeightsNode)
    return false;

  // Ensure there are weights for all of the successors. Note that the first
  // operand to the metadata node is a name, not a weight.
  if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
    return false;

  // Build up the final weights that will be used in a temporary buffer, but
  // don't add them until all weihts are present. Each weight value is clamped
  // to [1, getMaxWeightFor(BB)].
  uint32_t WeightLimit = getMaxWeightFor(BB);
  SmallVector<uint32_t, 2> Weights;
  Weights.reserve(TI->getNumSuccessors());
  for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
    ConstantInt *Weight = dyn_cast<ConstantInt>(WeightsNode->getOperand(i));
    if (!Weight)
      return false;
    Weights.push_back(
      std::max<uint32_t>(1, Weight->getLimitedValue(WeightLimit)));
  }
  assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
    BP->setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);

  return true;
}
コード例 #4
0
ファイル: IRParser.cpp プロジェクト: orcc/jade
CSDFMoC* IRParser::parseCSDF(MDNode* csdfNode){
    CSDFMoC* csfMoC;

    //Get number of phases
    ConstantInt* value = cast<ConstantInt>(csdfNode->getOperand(0));
    int phasesNb = value->getValue().getLimitedValue();

    if (phasesNb == 1){
        csfMoC = new SDFMoC(actor);
    }else{
        csfMoC = new CSDFMoC(actor);
    }

    // Parse patterns
    Pattern* ip = parsePattern(inputs, csdfNode->getOperand(1));
    Pattern* op = parsePattern(outputs, csdfNode->getOperand(2));

    csfMoC->setInputPattern(ip);
    csfMoC->setOutputPattern(op);
    csfMoC->setNumberOfPhases(value->getLimitedValue());

    // Parse actions
    parseCSDFActions(cast<MDNode>(csdfNode->getOperand(3)), csfMoC);

    return csfMoC;
}
コード例 #5
0
ファイル: IRParser.cpp プロジェクト: orcc/jade
Type* IRParser::parseType(MDNode* node){

    //Get size of the element
    ConstantInt* typeSize = cast<ConstantInt>(node->getOperand(0));
    Type* type = (Type*)IntegerType::get(Context, typeSize->getLimitedValue());

    Value* val = node->getNumOperands() > 1 ? node->getOperand(1) : NULL;

    //if operand 2 is not null, the element is a list
    if (val != NULL){
        for (unsigned i = 1 ; i != node->getNumOperands(); ++i){
            ConstantInt* size = cast<ConstantInt>(val);
            type = ArrayType::get(type, size->getLimitedValue());
        }
    }

    return type;
}
コード例 #6
0
ファイル: Loads.cpp プロジェクト: AlexDenisov/llvm
static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
        Type *Ty, const DataLayout &DL,
        const Instruction *CtxI,
        const DominatorTree *DT,
        const TargetLibraryInfo *TLI) {
    assert(Offset.isNonNegative() && "offset can't be negative");
    assert(Ty->isSized() && "must be sized");

    APInt DerefBytes(Offset.getBitWidth(), 0);
    bool CheckForNonNull = false;
    if (const Argument *A = dyn_cast<Argument>(BV)) {
        DerefBytes = A->getDereferenceableBytes();
        if (!DerefBytes.getBoolValue()) {
            DerefBytes = A->getDereferenceableOrNullBytes();
            CheckForNonNull = true;
        }
    } else if (auto CS = ImmutableCallSite(BV)) {
        DerefBytes = CS.getDereferenceableBytes(0);
        if (!DerefBytes.getBoolValue()) {
            DerefBytes = CS.getDereferenceableOrNullBytes(0);
            CheckForNonNull = true;
        }
    } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) {
        if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
            ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
            DerefBytes = CI->getLimitedValue();
        }
        if (!DerefBytes.getBoolValue()) {
            if (MDNode *MD =
                        LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
                ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
                DerefBytes = CI->getLimitedValue();
            }
            CheckForNonNull = true;
        }
    }

    if (DerefBytes.getBoolValue())
        if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
            if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI))
                return true;

    return false;
}
コード例 #7
0
void AssertionSiteInstrumenter::ParseAssertionLocation(
  Location *Loc, CallInst *Call) {

  assert(Call->getCalledFunction()->getName() == INLINE_ASSERTION);

  if (Call->getNumArgOperands() < 4)
    panic("TESLA assertion must have at least 4 arguments");

  // The filename (argument 1) should be a global variable.
  GlobalVariable *NameVar =
    dyn_cast<GlobalVariable>(Call->getOperand(1)->stripPointerCasts());

  ConstantDataArray *A;
  if (!NameVar ||
      !(A = dyn_cast_or_null<ConstantDataArray>(NameVar->getInitializer()))) {
    Call->dump();
    panic("unable to parse filename from TESLA assertion");
  }

  *Loc->mutable_filename() = A->getAsString();


  // The line and counter values should be constant integers.
  ConstantInt *Line = dyn_cast<ConstantInt>(Call->getOperand(2));
  if (!Line) {
    Call->getOperand(2)->dump();
    panic("assertion line must be a constant int");
  }

  Loc->set_line(Line->getLimitedValue(INT_MAX));

  ConstantInt *Count = dyn_cast<ConstantInt>(Call->getOperand(3));
  if (!Count) {
    Call->getOperand(3)->dump();
    panic("assertion count must be a constant int");
  }

  Loc->set_counter(Count->getLimitedValue(INT_MAX));
}
コード例 #8
0
ファイル: Value.cpp プロジェクト: crabtw/llvm
unsigned Value::getPointerAlignment(const DataLayout &DL) const {
  assert(getType()->isPointerTy() && "must be pointer");

  unsigned Align = 0;
  if (auto *GO = dyn_cast<GlobalObject>(this)) {
    // Don't make any assumptions about function pointer alignment. Some
    // targets use the LSBs to store additional information.
    if (isa<Function>(GO))
      return 0;
    Align = GO->getAlignment();
    if (Align == 0) {
      if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
        Type *ObjectType = GVar->getValueType();
        if (ObjectType->isSized()) {
          // If the object is defined in the current Module, we'll be giving
          // it the preferred alignment. Otherwise, we have to assume that it
          // may only have the minimum ABI alignment.
          if (GVar->isStrongDefinitionForLinker())
            Align = DL.getPreferredAlignment(GVar);
          else
            Align = DL.getABITypeAlignment(ObjectType);
        }
      }
    }
  } else if (const Argument *A = dyn_cast<Argument>(this)) {
    Align = A->getParamAlignment();

    if (!Align && A->hasStructRetAttr()) {
      // An sret parameter has at least the ABI alignment of the return type.
      Type *EltTy = cast<PointerType>(A->getType())->getElementType();
      if (EltTy->isSized())
        Align = DL.getABITypeAlignment(EltTy);
    }
  } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
    Align = AI->getAlignment();
    if (Align == 0) {
      Type *AllocatedType = AI->getAllocatedType();
      if (AllocatedType->isSized())
        Align = DL.getPrefTypeAlignment(AllocatedType);
    }
  } else if (auto CS = ImmutableCallSite(this))
    Align = CS.getAttributes().getRetAlignment();
  else if (const LoadInst *LI = dyn_cast<LoadInst>(this))
    if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
      ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
      Align = CI->getLimitedValue();
    }

  return Align;
}
コード例 #9
0
ファイル: x86_64_systemv.cpp プロジェクト: EgoIncarnate/fcd
bool CallingConvention_x86_64_systemv::analyzeFunction(ParameterRegistry &registry, CallInformation &callInfo, Function &function)
{
	// TODO: Look at called functions to find hidden parameters/return values
	
	if (md::isPrototype(function))
	{
		return false;
	}
	
	TargetInfo& targetInfo = registry.getTargetInfo();
	
	// We always need rip and rsp.
	callInfo.addParameter(ValueInformation::IntegerRegister, targetInfo.registerNamed("rip"));
	callInfo.addParameter(ValueInformation::IntegerRegister, targetInfo.registerNamed("rsp"));
	
	// Identify register GEPs.
	// (assume x86 regs as first parameter)
	assert(function.arg_size() == 1);
	Argument* regs = function.arg_begin();
	auto pointerType = dyn_cast<PointerType>(regs->getType());
	assert(pointerType != nullptr && pointerType->getTypeAtIndex(int(0))->getStructName() == "struct.x86_regs");
	
	unordered_multimap<const TargetRegisterInfo*, GetElementPtrInst*> geps;
	for (auto& use : regs->uses())
	{
		if (GetElementPtrInst* gep = dyn_cast<GetElementPtrInst>(use.getUser()))
		if (const TargetRegisterInfo* regName = targetInfo.registerInfo(*gep))
		{
			geps.insert({regName, gep});
		}
	}
	
	// Look at temporary registers that are read before they are written
	MemorySSA& mssa = *registry.getMemorySSA(function);
	for (const char* name : parameterRegisters)
	{
		const TargetRegisterInfo* smallReg = targetInfo.registerNamed(name);
		const TargetRegisterInfo* regInfo = targetInfo.largestOverlappingRegister(*smallReg);
		auto range = geps.equal_range(regInfo);
		
		vector<Instruction*> addresses;
		for (auto iter = range.first; iter != range.second; ++iter)
		{
			addresses.push_back(iter->second);
		}
		
		for (size_t i = 0; i < addresses.size(); ++i)
		{
			Instruction* addressInst = addresses[i];
			for (auto& use : addressInst->uses())
			{
				if (auto load = dyn_cast<LoadInst>(use.getUser()))
				{
					MemoryAccess* parent = mssa.getMemoryAccess(load)->getDefiningAccess();
					if (mssa.isLiveOnEntryDef(parent))
					{
						// register argument!
						callInfo.addParameter(ValueInformation::IntegerRegister, regInfo);
					}
				}
				else if (auto cast = dyn_cast<CastInst>(use.getUser()))
				{
					if (cast->getType()->isPointerTy())
					{
						addresses.push_back(cast);
					}
				}
			}
		}
	}
	
	// Does the function refer to values at an offset above the initial rsp value?
	// Assume that rsp is known to be preserved.
	auto spRange = geps.equal_range(targetInfo.getStackPointer());
	for (auto iter = spRange.first; iter != spRange.second; ++iter)
	{
		auto* gep = iter->second;
		// Find all uses of reference to sp register
		for (auto& use : gep->uses())
		{
			if (auto load = dyn_cast<LoadInst>(use.getUser()))
			{
				// Find uses above +8 (since +0 is the return address)
				for (auto& use : load->uses())
				{
					ConstantInt* offset = nullptr;
					if (match(use.get(), m_Add(m_Value(), m_ConstantInt(offset))))
					{
						make_signed<decltype(offset->getLimitedValue())>::type intOffset = offset->getLimitedValue();
						if (intOffset > 8)
						{
							// memory argument!
							callInfo.addParameter(ValueInformation::Stack, intOffset);
						}
					}
				}
			}
		}
	}
	
	// Are we using return registers?
	vector<const TargetRegisterInfo*> usedReturns;
	usedReturns.reserve(2);
	
	for (const char* name : returnRegisters)
	{
		const TargetRegisterInfo* regInfo = targetInfo.registerNamed(name);
		auto range = geps.equal_range(regInfo);
		for (auto iter = range.first; iter != range.second; ++iter)
		{
			bool hasStore = any_of(iter->second->use_begin(), iter->second->use_end(), [](Use& use)
			{
				return isa<StoreInst>(use.getUser());
			});
			
			if (hasStore)
			{
				usedReturns.push_back(regInfo);
				break;
			}
		}
	}
	
	for (const TargetRegisterInfo* reg : ipaFindUsedReturns(registry, function, usedReturns))
	{
		// return value!
		callInfo.addReturn(ValueInformation::IntegerRegister, reg);
	}
	
	return true;
}
コード例 #10
0
ファイル: SimplifyGEP.cpp プロジェクト: brills/pfpa
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass.
//  Find all GEPs, and simplify them.
//
// Inputs:
//  M - A reference to the LLVM module to transform
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
//  true  - The module was modified.
//  false - The module was not modified.
//
bool SimplifyGEP::runOnModule(Module& M) {
  TD = &getAnalysis<TargetData>();
  preprocess(M);
  for (Module::iterator F = M.begin(); F != M.end(); ++F){
    for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) {      
      for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE; I++) {
        if(!(isa<GetElementPtrInst>(I)))
          continue;
        GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
        Value *PtrOp = GEP->getOperand(0);
        Value *StrippedPtr = PtrOp->stripPointerCasts();
        // Check if the GEP base pointer is enclosed in a cast
        if (StrippedPtr != PtrOp) {
          const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
          bool HasZeroPointerIndex = false;
          if (ConstantInt *C = dyn_cast<ConstantInt>(GEP->getOperand(1)))
            HasZeroPointerIndex = C->isZero();
          // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
          // into     : GEP [10 x i8]* X, i32 0, ...
          //
          // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
          //           into     : GEP i8* X, ...
          // 
          // This occurs when the program declares an array extern like "int X[];"
          if (HasZeroPointerIndex) {
            const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
            if (const ArrayType *CATy =
                dyn_cast<ArrayType>(CPTy->getElementType())) {
              // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
              if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
                // -> GEP i8* X, ...
                SmallVector<Value*, 8> Idx(GEP->idx_begin()+1, GEP->idx_end());
                GetElementPtrInst *Res =
                  GetElementPtrInst::Create(StrippedPtr, Idx, GEP->getName(), GEP);
                Res->setIsInBounds(GEP->isInBounds());
                GEP->replaceAllUsesWith(Res);
                continue;
              }

              if (const ArrayType *XATy =
                  dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
                // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
                if (CATy->getElementType() == XATy->getElementType()) {
                  // -> GEP [10 x i8]* X, i32 0, ...
                  // At this point, we know that the cast source type is a pointer
                  // to an array of the same type as the destination pointer
                  // array.  Because the array type is never stepped over (there
                  // is a leading zero) we can fold the cast into this GEP.
                  GEP->setOperand(0, StrippedPtr);
                  continue;
                }
              }
            }   
          } else if (GEP->getNumOperands() == 2) {
            // Transform things like:
            // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
            // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
            Type *SrcElTy = StrippedPtrTy->getElementType();
            Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
            if (TD && SrcElTy->isArrayTy() &&
                TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
                TD->getTypeAllocSize(ResElTy)) {
              Value *Idx[2];
              Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP->getContext()));
              Idx[1] = GEP->getOperand(1);
              Value *NewGEP = GetElementPtrInst::Create(StrippedPtr, Idx,
                                                        GEP->getName(), GEP);
              // V and GEP are both pointer types --> BitCast
              GEP->replaceAllUsesWith(new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP));
              continue;
            }

            // Transform things like:
            // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
            //   (where tmp = 8*tmp2) into:
            // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast

            if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
              uint64_t ArrayEltSize =
                TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());

              // Check to see if "tmp" is a scale by a multiple of ArrayEltSize.  We
              // allow either a mul, shift, or constant here.
              Value *NewIdx = 0;
              ConstantInt *Scale = 0;
              if (ArrayEltSize == 1) {
                NewIdx = GEP->getOperand(1);
                Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
              } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
                NewIdx = ConstantInt::get(CI->getType(), 1);
                Scale = CI;
              } else if (Instruction *Inst =dyn_cast<Instruction>(GEP->getOperand(1))){
                if (Inst->getOpcode() == Instruction::Shl &&
                    isa<ConstantInt>(Inst->getOperand(1))) {
                  ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
                  uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
                  Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
                                           1ULL << ShAmtVal);
                  NewIdx = Inst->getOperand(0);
                } else if (Inst->getOpcode() == Instruction::Mul &&
                           isa<ConstantInt>(Inst->getOperand(1))) {
                  Scale = cast<ConstantInt>(Inst->getOperand(1));
                  NewIdx = Inst->getOperand(0);
                }
              }

              // If the index will be to exactly the right offset with the scale taken
              // out, perform the transformation. Note, we don't know whether Scale is
              // signed or not. We'll use unsigned version of division/modulo
              // operation after making sure Scale doesn't have the sign bit set.
              if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
                  Scale->getZExtValue() % ArrayEltSize == 0) {
                Scale = ConstantInt::get(Scale->getType(),
                                         Scale->getZExtValue() / ArrayEltSize);
                if (Scale->getZExtValue() != 1) {
                  Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
                                                             false /*ZExt*/);
                  NewIdx = BinaryOperator::Create(BinaryOperator::Mul, NewIdx, C, "idxscale");
                }

                // Insert the new GEP instruction.
                Value *Idx[2];
                Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP->getContext()));
                Idx[1] = NewIdx;
                Value *NewGEP = GetElementPtrInst::Create(StrippedPtr, Idx,
                                                          GEP->getName(), GEP);
                GEP->replaceAllUsesWith(new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP));
                continue;
              }
            }
          }
        }
      }
    }
  }

  return true;
}
コード例 #11
0
/// Return true if we can evaluate the specified expression tree if the vector
/// elements were shuffled in a different order.
static bool CanEvaluateShuffled(Value *V, ArrayRef<int> Mask,
                                unsigned Depth = 5) {
  // We can always reorder the elements of a constant.
  if (isa<Constant>(V))
    return true;

  // We won't reorder vector arguments. No IPO here.
  Instruction *I = dyn_cast<Instruction>(V);
  if (!I) return false;

  // Two users may expect different orders of the elements. Don't try it.
  if (!I->hasOneUse())
    return false;

  if (Depth == 0) return false;

  switch (I->getOpcode()) {
    case Instruction::Add:
    case Instruction::FAdd:
    case Instruction::Sub:
    case Instruction::FSub:
    case Instruction::Mul:
    case Instruction::FMul:
    case Instruction::UDiv:
    case Instruction::SDiv:
    case Instruction::FDiv:
    case Instruction::URem:
    case Instruction::SRem:
    case Instruction::FRem:
    case Instruction::Shl:
    case Instruction::LShr:
    case Instruction::AShr:
    case Instruction::And:
    case Instruction::Or:
    case Instruction::Xor:
    case Instruction::ICmp:
    case Instruction::FCmp:
    case Instruction::Trunc:
    case Instruction::ZExt:
    case Instruction::SExt:
    case Instruction::FPToUI:
    case Instruction::FPToSI:
    case Instruction::UIToFP:
    case Instruction::SIToFP:
    case Instruction::FPTrunc:
    case Instruction::FPExt:
    case Instruction::GetElementPtr: {
      for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
        if (!CanEvaluateShuffled(I->getOperand(i), Mask, Depth-1))
          return false;
      }
      return true;
    }
    case Instruction::InsertElement: {
      ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
      if (!CI) return false;
      int ElementNumber = CI->getLimitedValue();

      // Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
      // can't put an element into multiple indices.
      bool SeenOnce = false;
      for (int i = 0, e = Mask.size(); i != e; ++i) {
        if (Mask[i] == ElementNumber) {
          if (SeenOnce)
            return false;
          SeenOnce = true;
        }
      }
      return CanEvaluateShuffled(I->getOperand(0), Mask, Depth-1);
    }
  }
  return false;
}
コード例 #12
0
/// CanEvaluateShifted - See if we can compute the specified value, but shifted
/// logically to the left or right by some number of bits.  This should return
/// true if the expression can be computed for the same cost as the current
/// expression tree.  This is used to eliminate extraneous shifting from things
/// like:
///      %C = shl i128 %A, 64
///      %D = shl i128 %B, 96
///      %E = or i128 %C, %D
///      %F = lshr i128 %E, 64
/// where the client will ask if E can be computed shifted right by 64-bits.  If
/// this succeeds, the GetShiftedValue function will be called to produce the
/// value.
static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
                               InstCombiner &IC) {
  // We can always evaluate constants shifted.
  if (isa<Constant>(V))
    return true;

  Instruction *I = dyn_cast<Instruction>(V);
  if (!I) return false;

  // If this is the opposite shift, we can directly reuse the input of the shift
  // if the needed bits are already zero in the input.  This allows us to reuse
  // the value which means that we don't care if the shift has multiple uses.
  //  TODO:  Handle opposite shift by exact value.
  ConstantInt *CI = nullptr;
  if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
      (!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
    if (CI->getZExtValue() == NumBits) {
      // TODO: Check that the input bits are already zero with MaskedValueIsZero
#if 0
      // If this is a truncate of a logical shr, we can truncate it to a smaller
      // lshr iff we know that the bits we would otherwise be shifting in are
      // already zeros.
      uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
      uint32_t BitWidth = Ty->getScalarSizeInBits();
      if (MaskedValueIsZero(I->getOperand(0),
            APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
          CI->getLimitedValue(BitWidth) < BitWidth) {
        return CanEvaluateTruncated(I->getOperand(0), Ty);
      }
#endif

    }
  }

  // We can't mutate something that has multiple uses: doing so would
  // require duplicating the instruction in general, which isn't profitable.
  if (!I->hasOneUse()) return false;

  switch (I->getOpcode()) {
  default: return false;
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
    // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
    return CanEvaluateShifted(I->getOperand(0), NumBits, isLeftShift, IC) &&
           CanEvaluateShifted(I->getOperand(1), NumBits, isLeftShift, IC);

  case Instruction::Shl: {
    // We can often fold the shift into shifts-by-a-constant.
    CI = dyn_cast<ConstantInt>(I->getOperand(1));
    if (!CI) return false;

    // We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
    if (isLeftShift) return true;

    // We can always turn shl(c)+shr(c) -> and(c2).
    if (CI->getValue() == NumBits) return true;

    unsigned TypeWidth = I->getType()->getScalarSizeInBits();

    // We can turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but it isn't
    // profitable unless we know the and'd out bits are already zero.
    if (CI->getZExtValue() > NumBits) {
      unsigned LowBits = TypeWidth - CI->getZExtValue();
      if (MaskedValueIsZero(I->getOperand(0),
                       APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
        return true;
    }

    return false;
  }
  case Instruction::LShr: {
    // We can often fold the shift into shifts-by-a-constant.
    CI = dyn_cast<ConstantInt>(I->getOperand(1));
    if (!CI) return false;

    // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
    if (!isLeftShift) return true;

    // We can always turn lshr(c)+shl(c) -> and(c2).
    if (CI->getValue() == NumBits) return true;

    unsigned TypeWidth = I->getType()->getScalarSizeInBits();

    // We can always turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but it isn't
    // profitable unless we know the and'd out bits are already zero.
    if (CI->getValue().ult(TypeWidth) && CI->getZExtValue() > NumBits) {
      unsigned LowBits = CI->getZExtValue() - NumBits;
      if (MaskedValueIsZero(I->getOperand(0),
                          APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
        return true;
    }

    return false;
  }
  case Instruction::Select: {
    SelectInst *SI = cast<SelectInst>(I);
    return CanEvaluateShifted(SI->getTrueValue(), NumBits, isLeftShift, IC) &&
           CanEvaluateShifted(SI->getFalseValue(), NumBits, isLeftShift, IC);
  }
  case Instruction::PHI: {
    // We can change a phi if we can change all operands.  Note that we never
    // get into trouble with cyclic PHIs here because we only consider
    // instructions with a single use.
    PHINode *PN = cast<PHINode>(I);
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
      if (!CanEvaluateShifted(PN->getIncomingValue(i), NumBits, isLeftShift,IC))
        return false;
    return true;
  }
  }
}
コード例 #13
0
/// PrepareMonoLSDA - Collect information needed by EmitMonoLSDA
///
///   This function collects information available only during EndFunction which is needed
/// by EmitMonoLSDA and stores it into EHFrameInfo. It is the same as the
/// beginning of EmitExceptionTable.
///
void DwarfMonoException::PrepareMonoLSDA(FunctionEHFrameInfo *EHFrameInfo) {
  const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
  const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
  const MachineFunction *MF = Asm->MF;

  // Sort the landing pads in order of their type ids.  This is used to fold
  // duplicate actions.
  SmallVector<const LandingPadInfo *, 64> LandingPads;
  LandingPads.reserve(PadInfos.size());

  for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
    LandingPads.push_back(&PadInfos[i]);

  std::sort(LandingPads.begin(), LandingPads.end(),
          [](const LandingPadInfo *L,
			 const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });

  // Invokes and nounwind calls have entries in PadMap (due to being bracketed
  // by try-range labels when lowered).  Ordinary calls do not, so appropriate
  // try-ranges for them need be deduced when using DWARF exception handling.
  RangeMapType PadMap;
  for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
    const LandingPadInfo *LandingPad = LandingPads[i];
    for (unsigned j = 0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
      MCSymbol *BeginLabel = LandingPad->BeginLabels[j];
      assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
      PadRange P = { i, j };
      PadMap[BeginLabel] = P;
    }
  }

  // Compute the call-site table.
  SmallVector<MonoCallSiteEntry, 64> CallSites;

  MCSymbol *LastLabel = 0;
  for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
        I != E; ++I) {
    for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
          MI != E; ++MI) {
      if (!MI->isLabel()) {
        continue;
      }

      MCSymbol *BeginLabel = MI->getOperand(0).getMCSymbol();
      assert(BeginLabel && "Invalid label!");

      RangeMapType::iterator L = PadMap.find(BeginLabel);

      if (L == PadMap.end())
        continue;

      PadRange P = L->second;
      const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];

      assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
              "Inconsistent landing pad map!");

      // Mono emits one landing pad for each CLR exception clause,
      // and the type info contains the clause index
      assert (LandingPad->TypeIds.size() == 1);
      assert (LandingPad->LandingPadLabel);

      LastLabel = LandingPad->EndLabels[P.RangeIndex];
      MonoCallSiteEntry Site = {BeginLabel, LastLabel,
							LandingPad->LandingPadLabel, LandingPad->TypeIds [0]};

      assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
              "Invalid landing pad!");

	  // FIXME: This doesn't work because it includes ranges outside clauses
#if 0
      // Try to merge with the previous call-site.
      if (CallSites.size()) {
        MonoCallSiteEntry &Prev = CallSites.back();
        if (Site.PadLabel == Prev.PadLabel && Site.TypeID == Prev.TypeID) {
          // Extend the range of the previous entry.
          Prev.EndLabel = Site.EndLabel;
          continue;
        }
      }
#endif

      // Otherwise, create a new call-site.
      CallSites.push_back(Site);
    }
  }

  //
  // Compute a mapping from method names to their AOT method index
  //
  if (FuncIndexes.size () == 0) {
    const Module *m = MMI->getModule ();
    NamedMDNode *indexes = m->getNamedMetadata ("mono.function_indexes");
	if (indexes) {
      for (unsigned int i = 0; i < indexes->getNumOperands (); ++i) {
        MDNode *n = indexes->getOperand (i);
        MDString *s = (MDString*)n->getOperand (0);
        ConstantInt *idx = (ConstantInt*)n->getOperand (1);
        FuncIndexes.GetOrCreateValue (s->getString (), (int)idx->getLimitedValue () + 1);
      }
    }
  }

  MonoEHFrameInfo *MonoEH = &EHFrameInfo->MonoEH;

  // Save information for EmitMonoLSDA
  MonoEH->MF = Asm->MF;
  MonoEH->FunctionNumber = Asm->getFunctionNumber();
  MonoEH->CallSites.insert(MonoEH->CallSites.begin(), CallSites.begin(), CallSites.end());
  MonoEH->TypeInfos = TypeInfos;
  MonoEH->PadInfos = PadInfos;
  MonoEH->MonoMethodIdx = FuncIndexes.lookup (Asm->MF->getFunction ()->getName ()) - 1;
  //outs()<<"A:"<<Asm->MF->getFunction()->getName() << " " << MonoEH->MonoMethodIdx << "\n";

  int ThisSlot = Asm->MF->getMonoInfo()->getThisStackSlot();

  if (ThisSlot != -1) {
    unsigned FrameReg;
    MonoEH->ThisOffset = Asm->MF->getTarget ().getSubtargetImpl ()->getFrameLowering ()->getFrameIndexReference (*Asm->MF, ThisSlot, FrameReg);
    MonoEH->FrameReg = Asm->MF->getTarget ().getSubtargetImpl ()->getRegisterInfo ()->getDwarfRegNum (FrameReg, true);
  } else {
    MonoEH->FrameReg = -1;
  }
}