Beispiel #1
0
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
    const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
    const Instruction *CtxI, const DominatorTree *DT,
    SmallPtrSetImpl<const Value *> &Visited) {
  // Already visited?  Bail out, we've likely hit unreachable code.
  if (!Visited.insert(V).second)
    return false;

  // Note that it is not safe to speculate into a malloc'd region because
  // malloc may return null.

  // bitcast instructions are no-ops as far as dereferenceability is concerned.
  if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
    return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  bool CheckForNonNull = false;
  APInt KnownDerefBytes(Size.getBitWidth(),
                        V->getPointerDereferenceableBytes(DL, CheckForNonNull));
  if (KnownDerefBytes.getBoolValue()) {
    if (KnownDerefBytes.uge(Size))
      if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT))
        return isAligned(V, Align, DL);
  }

  // For GEPs, determine if the indexing lands within the allocated object.
  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    const Value *Base = GEP->getPointerOperand();

    APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
    if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
        !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
      return false;

    // If the base pointer is dereferenceable for Offset+Size bytes, then the
    // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
    // pointer is aligned to Align bytes, and the Offset is divisible by Align
    // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
    // aligned to Align bytes.

    // Offset and Size may have different bit widths if we have visited an
    // addrspacecast, so we can't do arithmetic directly on the APInt values.
    return isDereferenceableAndAlignedPointer(
        Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()),
        DL, CtxI, DT, Visited);
  }

  // For gc.relocate, look through relocations
  if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
    return isDereferenceableAndAlignedPointer(
        RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);

  if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
    return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  if (auto CS = ImmutableCallSite(V))
    if (auto *RP = getArgumentAliasingToReturnedPointer(CS))
      return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
                                                Visited);

  // If we don't know, assume the worst.
  return false;
}
/*
 * function evaluateNode
 *
 * Given a node, this function computes the abstract state of the node
 * considering the abstract state of the node's predecessors and the
 * semantics of the node.
 */
Range llvm::RangeAnalysis::evaluateNode(GraphNode* Node){

	Range result;

	/*
	 * VarNode: Constants generate constant ranges;
	 * otherwise, the output is a union of the predecessors
	 * */
	if(VarNode* VN = dyn_cast<VarNode>(Node)){
		Value* val = VN->getValue();

		if (ConstantInt* CI = dyn_cast<ConstantInt>(val)){
			APInt value = CI->getValue();
			value = value.sextOrTrunc(MAX_BIT_INT);
			result = Range(value,value);
		} else {
			result = getUnionOfPredecessors(Node);
		}

	}
	/*
	 * Nodes that do not modify the data: PHI, Sigma, MemNode
	 * >> Just forward the state to the next node.
	 */
	else if (isa<MemNode>(Node) || isa<SigmaOpNode>(Node) || isa<PHIOpNode>(Node) || ( isa<OpNode>(Node) && dyn_cast<OpNode>(Node)->getOpCode() == Instruction::PHI ) ) {

		result = getUnionOfPredecessors(Node);
	}
	/*
	 * CallNodes: We do not know the output
	 * >> [-inf, +inf]
	 */
	else if (isa<CallNode>(Node)) {

		result = Range(Min, Max);
	}
	/*
	 * Binary operators: we will do the abstract interpretation
	 * to generate the result
	 */
	else if (BinaryOpNode* BOP = dyn_cast<BinaryOpNode>(Node)) {

		GraphNode* Op1 = BOP->getOperand(0);
		GraphNode* Op2 = BOP->getOperand(1);

		result = abstractInterpretation(out_state[Op1], out_state[Op2], BOP->getBinaryOperator());
	}
	/*
	 * Unary operators: we will do the abstract interpretation
	 * to generate the result
	 */
	else if (UnaryOpNode* UOP = dyn_cast<UnaryOpNode>(Node)) {

		GraphNode* Op = UOP->getIncomingNode(0);

		result = abstractInterpretation(out_state[Op], UOP->getUnaryInstruction());
	}
	/*
	 * Generic operations: treat individually case by case
	 */
	else if (OpNode* OP = dyn_cast<OpNode>(Node)) {

		if (Instruction* I = OP->getOperation()) {

			switch (I->getOpcode()) {
			case Instruction::Store:
				GraphNode* Op = OP->getIncomingNode(0);
				result = abstractInterpretation(out_state[Op], I);
				break;
			}
		}

	} else {
		result = Range();
	}

	return result;
}