void InstructionReplace::phase2(llvm::Module& M)
{
	for(llvm::Module::iterator F = M.begin(), ME = M.end(); F != ME; ++F) {
		for(llvm::Function::iterator BB = F->begin(),
		    FE = F->end();
		    BB != FE;
		    ++BB) {
			TaggedData& td = getAnalysis<TaggedData>(*F);
			if(!td.functionMarked(F)) {continue;}
			for( llvm::BasicBlock::iterator i = BB->begin(); i != BB->end(); i++) {
				NoCryptoFA::InstructionMetadata* md = NoCryptoFA::known[i];
				if(!md->hasBeenMasked) { continue; }
				for(Instruction::use_iterator u = i->use_begin(); u != i->use_end(); ++u) {
					Instruction* utilizzatore = cast<Instruction>(u.getUse().getUser());
					NoCryptoFA::InstructionMetadata* usemd = NoCryptoFA::known[utilizzatore];
					if(usemd->MaskedValues.empty()) {
						Unmask(i);
					}
				}
			}
		}
	}
}
示例#2
0
void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
  assert(V->getType()->isPointerTy() && "Capture is for pointers only!");
  SmallVector<Use*, Threshold> Worklist;
  SmallSet<Use*, Threshold> Visited;
  int Count = 0;

  for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
       UI != UE; ++UI) {
    // If there are lots of uses, conservatively say that the value
    // is captured to avoid taking too much compile time.
    if (Count++ >= Threshold)
      return Tracker->tooManyUses();

    Use *U = &UI.getUse();
    if (!Tracker->shouldExplore(U)) continue;
    Visited.insert(U);
    Worklist.push_back(U);
  }

  while (!Worklist.empty()) {
    Use *U = Worklist.pop_back_val();
    Instruction *I = cast<Instruction>(U->getUser());
    V = U->get();

    switch (I->getOpcode()) {
    case Instruction::Call:
    case Instruction::Invoke: {
      CallSite CS(I);
      // Not captured if the callee is readonly, doesn't return a copy through
      // its return value and doesn't unwind (a readonly function can leak bits
      // by throwing an exception or not depending on the input value).
      if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy())
        break;

      // Not captured if only passed via 'nocapture' arguments.  Note that
      // calling a function pointer does not in itself cause the pointer to
      // be captured.  This is a subtle point considering that (for example)
      // the callee might return its own address.  It is analogous to saying
      // that loading a value from a pointer does not cause the pointer to be
      // captured, even though the loaded value might be the pointer itself
      // (think of self-referential objects).
      CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
      for (CallSite::arg_iterator A = B; A != E; ++A)
        if (A->get() == V && !CS.doesNotCapture(A - B))
          // The parameter is not marked 'nocapture' - captured.
          if (Tracker->captured(U))
            return;
      break;
    }
    case Instruction::Load:
      // Loading from a pointer does not cause it to be captured.
      break;
    case Instruction::VAArg:
      // "va-arg" from a pointer does not cause it to be captured.
      break;
    case Instruction::Store:
      if (V == I->getOperand(0))
        // Stored the pointer - conservatively assume it may be captured.
        if (Tracker->captured(U))
          return;
      // Storing to the pointee does not cause the pointer to be captured.
      break;
    case Instruction::BitCast:
    case Instruction::GetElementPtr:
    case Instruction::PHI:
    case Instruction::Select:
      // The original value is not captured via this if the new value isn't.
      for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
           UI != UE; ++UI) {
        Use *U = &UI.getUse();
        if (Visited.insert(U))
          if (Tracker->shouldExplore(U))
            Worklist.push_back(U);
      }
      break;
    case Instruction::ICmp:
      // Don't count comparisons of a no-alias return value against null as
      // captures. This allows us to ignore comparisons of malloc results
      // with null, for example.
      if (ConstantPointerNull *CPN =
          dyn_cast<ConstantPointerNull>(I->getOperand(1)))
        if (CPN->getType()->getAddressSpace() == 0)
          if (isNoAliasCall(V->stripPointerCastsSafe()))
            break;
      // Otherwise, be conservative. There are crazy ways to capture pointers
      // using comparisons.
      if (Tracker->captured(U))
        return;
      break;
    default:
      // Something else - be conservative and say it is captured.
      if (Tracker->captured(U))
        return;
      break;
    }
  }

  // All uses examined.
}
示例#3
0
文件: Shadows.cpp 项目: smowton/llpe
// Create shadow information for function F, including top-sorting its blocks to give them indices and thus
// a sensible order for specialisation.
ShadowFunctionInvar* LLPEAnalysisPass::getFunctionInvarInfo(Function& F) {

  // Already described?
  DenseMap<Function*, ShadowFunctionInvar*>::iterator findit = functionInfo.find(&F);
  if(findit != functionInfo.end())
    return findit->second;

  // Beware! This LoopInfo instance and whatever Loop objects come from it are only alive until
  // the next call to getAnalysis. Therefore the ShadowLoopInvar objects we make here
  // must mirror all information we're interested in from the Loops.
  LoopInfo* LI = &getAnalysis<LoopInfo>(F);

  ShadowFunctionInvar* RetInfoP = new ShadowFunctionInvar();
  functionInfo[&F] = RetInfoP;
  ShadowFunctionInvar& RetInfo = *RetInfoP;

  // Top-sort all blocks, including child loop. Thanks to trickery in createTopOrderingFrom,
  // instead of giving all loop blocks an equal topsort value due to the latch edge cycle,
  // we order the header first, then the loop body in topological order ignoring the latch, then its exit blocks.
  std::vector<BasicBlock*> TopOrderedBlocks;
  SmallSet<BasicBlock*, 8> VisitedBlocks;

  createTopOrderingFrom(&F.getEntryBlock(), TopOrderedBlocks, VisitedBlocks, LI, /* loop = */ 0);

  // Since topsort gives a bottom-up ordering.
  std::reverse(TopOrderedBlocks.begin(), TopOrderedBlocks.end());

  // Assign indices to each BB and instruction (IIndices is useful since otherwise we have to walk
  // the instruction list to get from an instruction to its index)

  DenseMap<BasicBlock*, uint32_t> BBIndices;
  DenseMap<Instruction*, uint32_t> IIndices;

  for(uint32_t i = 0; i < TopOrderedBlocks.size(); ++i) {

    BasicBlock* BB = TopOrderedBlocks[i];

    BBIndices[BB] = i;
    
    uint32_t j;
    BasicBlock::iterator it, endit;
    for(j = 0, it = BB->begin(), endit = BB->end(); it != endit; ++it, ++j) {

      IIndices[it] = j;

    }

  }

  // Create shadow block objects:
  ShadowBBInvar* FShadowBlocks = new ShadowBBInvar[TopOrderedBlocks.size()];

  for(uint32_t i = 0; i < TopOrderedBlocks.size(); ++i) {

    BasicBlock* BB = TopOrderedBlocks[i];
    ShadowBBInvar& SBB = FShadowBlocks[i];
    
    SBB.F = &RetInfo;
    SBB.idx = i;
    SBB.BB = BB;
    // True loop scope will be computed later, but by default...
    SBB.outerScope = 0;
    SBB.naturalScope = 0;

    const Loop* BBScope =  LI->getLoopFor(BB);

    // Find successor block indices:

    succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
    uint32_t succSize = std::distance(SI, SE);
    SBB.succIdxs = ImmutableArray<uint32_t>(new uint32_t[succSize], succSize);

    for(uint32_t j = 0; SI != SE; ++SI, ++j) {

      SBB.succIdxs[j] = BBIndices[*SI];

    }

    // Find predecessor block indices:

    pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
    uint32_t predSize = std::distance(PI, PE);
    SBB.predIdxs = ImmutableArray<uint32_t>(new uint32_t[predSize], predSize);
    
    for(uint32_t j = 0; PI != PE; ++PI, ++j) {

      SBB.predIdxs[j] = BBIndices[*PI];
      
      if(SBB.predIdxs[j] > i) {

	if((!BBScope) || SBB.BB != BBScope->getHeader()) {

	  errs() << "Warning: block " << SBB.BB->getName() << " in " << F.getName() << " has predecessor " << (*PI)->getName() << " that comes after it topologically, but this is not a loop header. The program is not in well-nested natural loop form.\n";

	}

      }

    }

    // Find instruction def/use indices:
    ShadowInstructionInvar* insts = new ShadowInstructionInvar[BB->size()];

    BasicBlock::iterator BI = BB->begin(), BE = BB->end();
    for(uint32_t j = 0; BI != BE; ++BI, ++j) {

      Instruction* I = BI;
      ShadowInstructionInvar& SI = insts[j];

      SI.idx = j;
      SI.parent = &SBB;
      SI.I = I;
      
      // Get operands indices:
      uint32_t NumOperands;
      ShadowInstIdx* operandIdxs;
      if(PHINode* PN = dyn_cast<PHINode>(I)) {

	NumOperands = PN->getNumIncomingValues();
	operandIdxs = new ShadowInstIdx[NumOperands];
	uint32_t* incomingBBs = new uint32_t[NumOperands];

	for(unsigned k = 0, kend = PN->getNumIncomingValues(); k != kend; ++k) {

	  if(Instruction* OpI = dyn_cast<Instruction>(PN->getIncomingValue(k)))
	    operandIdxs[k] = ShadowInstIdx(BBIndices[OpI->getParent()], IIndices[OpI]);
	  else if(GlobalVariable* OpGV = const_cast<GlobalVariable*>(getGlobalVar(PN->getIncomingValue(k))))
	    operandIdxs[k] = ShadowInstIdx(INVALID_BLOCK_IDX, getShadowGlobalIndex(OpGV));
	  else
	    operandIdxs[k] = ShadowInstIdx();
	  incomingBBs[k] = BBIndices[PN->getIncomingBlock(k)];

	}

	SI.operandBBs = ImmutableArray<uint32_t>(incomingBBs, NumOperands);

      }
      else {

	NumOperands = I->getNumOperands();
	operandIdxs = new ShadowInstIdx[NumOperands];

	for(unsigned k = 0, kend = I->getNumOperands(); k != kend; ++k) {
	  
	  if(Instruction* OpI = dyn_cast<Instruction>(I->getOperand(k)))
	    operandIdxs[k] = ShadowInstIdx(BBIndices[OpI->getParent()], IIndices[OpI]);
	  else if(GlobalVariable* OpGV = const_cast<GlobalVariable*>(getGlobalVar(I->getOperand(k))))
	    operandIdxs[k] = ShadowInstIdx(INVALID_BLOCK_IDX, getShadowGlobalIndex(OpGV));
	  else if(BasicBlock* OpBB = dyn_cast<BasicBlock>(I->getOperand(k)))
	    operandIdxs[k] = ShadowInstIdx(BBIndices[OpBB], INVALID_INSTRUCTION_IDX);
	  else
	    operandIdxs[k] = ShadowInstIdx();

	}

      }

      SI.operandIdxs = ImmutableArray<ShadowInstIdx>(operandIdxs, NumOperands);

      // Get user indices:
      unsigned nUsers = std::distance(I->use_begin(), I->use_end());

      ShadowInstIdx* userIdxs = new ShadowInstIdx[nUsers];

      Instruction::use_iterator UI;
      unsigned k;
      for(k = 0, UI = I->use_begin(); k != nUsers; ++k, ++UI) {

	if(Instruction* UserI = dyn_cast<Instruction>(UI->getUser())) {

	  userIdxs[k] = ShadowInstIdx(BBIndices[UserI->getParent()], IIndices[UserI]);

	}
	else {

	  userIdxs[k] = ShadowInstIdx();
	  
	}

      }

      SI.userIdxs = ImmutableArray<ShadowInstIdx>(userIdxs, nUsers);

    }

    SBB.insts = ImmutableArray<ShadowInstructionInvar>(insts, BB->size());

  }

  RetInfo.BBs = ImmutableArray<ShadowBBInvar>(FShadowBlocks, TopOrderedBlocks.size());

  // Get user info for arguments:

  ShadowArgInvar* Args = new ShadowArgInvar[F.arg_size()];

  Function::arg_iterator AI = F.arg_begin();
  uint32_t i = 0;
  for(; i != F.arg_size(); ++i, ++AI) {

    Argument* A = AI;
    ShadowArgInvar& SArg = Args[i];
    SArg.A = A;
      
    unsigned j = 0;
    Argument::use_iterator UI = A->use_begin(), UE = A->use_end();

    uint32_t nUsers = std::distance(UI, UE);
    ShadowInstIdx* Users = new ShadowInstIdx[nUsers];

    for(; UI != UE; ++UI, ++j) {

      Value* UsedV = UI->getUser();
      if(Instruction* UsedI = dyn_cast<Instruction>(UsedV)) {

	Users[j] = ShadowInstIdx(BBIndices[UsedI->getParent()], IIndices[UsedI]);

      }
      else {

	Users[j] = ShadowInstIdx();

      }

    }

    SArg.userIdxs = ImmutableArray<ShadowInstIdx>(Users, nUsers);

  }

  RetInfo.Args = ImmutableArray<ShadowArgInvar>(Args, F.arg_size());

  // Populate map from loop headers to header index. Due to the topological sort,
  // all loops consist of that block + L->getBlocks().size() further, contiguous blocks,
  // making is-in-loop easy to compute.

  DominatorTree* thisDT = DTs[&F];

  for(LoopInfo::iterator it = LI->begin(), it2 = LI->end(); it != it2; ++it) {
    ShadowLoopInvar* newL = getLoopInfo(&RetInfo, BBIndices, *it, thisDT, 0);
    RetInfo.TopLevelLoops.push_back(newL);
  }

  // Count alloca instructions at the start of the function; this will control how
  // large the std::vector that represents the frame will be initialised.
  RetInfo.frameSize = 0;
  for(BasicBlock::iterator it = F.getEntryBlock().begin(), itend = F.getEntryBlock().end(); it != itend && isa<AllocaInst>(it); ++it)
    ++RetInfo.frameSize;

  // "&& RootIA" checks whether we're inside the initial context creation, in which case we should
  // allocate a frame whether or not main can ever allocate to avoid the frame index underflowing
  // in some circumstances.
  if((!RetInfo.frameSize) && RootIA) {

    // Magic value indicating the function will never alloca anything and we can skip all frame processing.
    RetInfo.frameSize = -1;

    for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E && RetInfo.frameSize == -1; ++I) {
      
      if(isa<AllocaInst>(*I))
	RetInfo.frameSize = 0;

    }
      

  }

  return RetInfoP;

}