void IntTest::pbzip2_like(Module &M) { TestBanner X("pbzip2-like"); vector<StoreInst *> writes; Function *f_rand = M.getFunction("rand"); assert(f_rand); Function *f_producer = M.getFunction("_Z8producerPv.SLICER"); assert(f_producer); // Search along the CFG. We need to make sure reads and writes are in // a consistent order. for (Function::iterator bb = f_producer->begin(); bb != f_producer->end(); ++bb) { for (BasicBlock::iterator ins = bb->begin(); ins != bb->end(); ++ins) { if (CallInst *ci = dyn_cast<CallInst>(ins)) { if (ci->getCalledFunction() == f_rand) { for (BasicBlock::iterator j = bb->begin(); j != bb->end(); ++j) { if (StoreInst *si = dyn_cast<StoreInst>(j)) writes.push_back(si); } } } } } errs() << "=== writes ===\n"; for (size_t i = 0; i < writes.size(); ++i) { errs() << *writes[i] << "\n"; } vector<LoadInst *> reads; Function *f_consumer = M.getFunction("_Z8consumerPv.SLICER"); assert(f_consumer); for (Function::iterator bb = f_consumer->begin(); bb != f_consumer->end(); ++bb) { for (BasicBlock::iterator ins = bb->begin(); ins != bb->end(); ++ins) { if (ins->getOpcode() == Instruction::Add && ins->getType()->isIntegerTy(8)) { LoadInst *li = dyn_cast<LoadInst>(ins->getOperand(0)); assert(li); reads.push_back(li); } } } errs() << "=== reads ===\n"; for (size_t i = 0; i < reads.size(); ++i) { errs() << *reads[i] << "\n"; } assert(writes.size() == reads.size()); AliasAnalysis &AA = getAnalysis<AdvancedAlias>(); for (size_t i = 0; i < writes.size(); ++i) { for (size_t j = i + 1; j < reads.size(); ++j) { errs() << "i = " << i << ", j = " << j << "... "; AliasAnalysis::AliasResult res = AA.alias( writes[i]->getPointerOperand(), reads[j]->getPointerOperand()); assert(res == AliasAnalysis::NoAlias); print_pass(errs()); } } }
// TODO: Ideally we should share Inliner's InlineCost Analysis code. // For now use a simplified version. The returned 'InlineCost' will be used // to esimate the size cost as well as runtime cost of the BB. int PartialInlinerImpl::computeBBInlineCost(BasicBlock *BB) { int InlineCost = 0; const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { if (isa<DbgInfoIntrinsic>(I)) continue; switch (I->getOpcode()) { case Instruction::BitCast: case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::Alloca: continue; case Instruction::GetElementPtr: if (cast<GetElementPtrInst>(I)->hasAllZeroIndices()) continue; default: break; } IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(I); if (IntrInst) { if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start || IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) continue; } if (CallInst *CI = dyn_cast<CallInst>(I)) { InlineCost += getCallsiteCost(CallSite(CI), DL); continue; } if (InvokeInst *II = dyn_cast<InvokeInst>(I)) { InlineCost += getCallsiteCost(CallSite(II), DL); continue; } if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { InlineCost += (SI->getNumCases() + 1) * InlineConstants::InstrCost; continue; } InlineCost += InlineConstants::InstrCost; } return InlineCost; }
bool EraseUclibcFiniPass::runOnModule(Module &M) { Function *f = M.getFunction("tern_exit"); Function *fini = M.getFunction("__uClibc_fini"); if(!f || !fini) return false; for (Function::iterator b = f->begin(), be = f->end(); b != be; ++b) { for (BasicBlock::iterator i = b->begin(), ie = b->end(); i != ie; ++i) { if (i->getOpcode() == Instruction::Call) { CallInst *ci = dyn_cast<CallInst>(i); assert(ci); if (ci->getCalledFunction() == fini) { i->eraseFromParent(); return true; } } } } return false; }
// bypassSlowDivision - This optimization identifies DIV instructions that can // be profitably bypassed and carried out with a shorter, faster divide. bool llvm::bypassSlowDivision(Function &F, Function::iterator &I, const DenseMap<unsigned int, unsigned int> &BypassWidths) { DivCacheTy DivCache; bool MadeChange = false; for (BasicBlock::iterator J = I->begin(); J != I->end(); J++) { // Get instruction details unsigned Opcode = J->getOpcode(); bool UseDivOp = Opcode == Instruction::SDiv || Opcode == Instruction::UDiv; bool UseRemOp = Opcode == Instruction::SRem || Opcode == Instruction::URem; bool UseSignedOp = Opcode == Instruction::SDiv || Opcode == Instruction::SRem; // Only optimize div or rem ops if (!UseDivOp && !UseRemOp) continue; // Skip division on vector types, only optimize integer instructions if (!J->getType()->isIntegerTy()) continue; // Get bitwidth of div/rem instruction IntegerType *T = cast<IntegerType>(J->getType()); unsigned int bitwidth = T->getBitWidth(); // Continue if bitwidth is not bypassed DenseMap<unsigned int, unsigned int>::const_iterator BI = BypassWidths.find(bitwidth); if (BI == BypassWidths.end()) continue; // Get type for div/rem instruction with bypass bitwidth IntegerType *BT = IntegerType::get(J->getContext(), BI->second); MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp, UseSignedOp, DivCache); } return MadeChange; }
/// Determine whether the instructions in this range may be safely and cheaply /// speculated. This is not an important enough situation to develop complex /// heuristics. We handle a single arithmetic instruction along with any type /// conversions. static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, BasicBlock::iterator End) { bool seenIncrement = false; for (BasicBlock::iterator I = Begin; I != End; ++I) { if (!isSafeToSpeculativelyExecute(I)) return false; if (isa<DbgInfoIntrinsic>(I)) continue; switch (I->getOpcode()) { default: return false; case Instruction::GetElementPtr: // GEPs are cheap if all indices are constant. if (!cast<GEPOperator>(I)->hasAllConstantIndices()) return false; // fall-thru to increment case case Instruction::Add: case Instruction::Sub: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: if (seenIncrement) return false; seenIncrement = true; break; case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: // ignore type conversions break; } } return true; }
void TaskDebugBranchCheck::addFunctionSummaries(BasicBlock* from_bb, BasicBlock* to_bb, Instruction* first_inst) { //errs() << "++++++++++++++++++++DETECTED BRANCHES++++++++++++++++++++++\n"; //errs() << "BB 1 First inst: " << *(from_bb->getFirstNonPHI()) << "\n"; //TerminatorInst *TInst = from_bb->getTerminator(); //errs() << "BB 1 Last inst: " << *TInst << "\n\n"; //errs() << "BB 2 First inst: " << *(to_bb->getFirstNonPHI()) << "\n"; //TInst = to_bb->getTerminator(); //errs() << "BB 2 Last inst: " << *TInst << "\n\n"; //errs() << "++++++++++++++++++++DETECTED BRANCHES++++++++++++++++++++++\n"; std::vector<Value*> locks_acq; std::vector<Value*> locks_rel; bool startInst = false; for (BasicBlock::iterator i = from_bb->begin(); i != from_bb->end(); ++i) { if (startInst == false) { if (first_inst == dyn_cast<Instruction>(i)) { startInst = true; } else { continue; } } switch (i->getOpcode()) { case Instruction::Call: { CallInst* callInst = dyn_cast<CallInst>(i); if(callInst->getCalledFunction() != NULL) { if(callInst->getCalledFunction() == lockAcquire) { locks_acq.push_back(callInst->getArgOperand(1)); } else if (callInst->getCalledFunction() == lockRelease) { locks_rel.push_back(callInst->getArgOperand(1)); } } break; } case Instruction::Load: { //errs() << "LOAD INST " << *i << "\n"; Value* op_l = i->getOperand(0); if (hasAnnotation(i, op_l, "check_av", 1)) { Constant* read = ConstantInt::get(Type::getInt32Ty(to_bb->getContext()), 0); instrument_access(to_bb->getFirstNonPHI(), op_l, read, locks_acq, locks_rel); } break; } case Instruction::Store: { //errs() << "STR INST " << *i << "\n"; Value* op_s = i->getOperand(1); if (hasAnnotation(i, op_s, "check_av", 1)) { Constant* write = ConstantInt::get(Type::getInt32Ty(to_bb->getContext()), 1); instrument_access(to_bb->getFirstNonPHI(), op_s, write, locks_acq, locks_rel); } break; } } } }
bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) { if (CallInst *CI = dyn_cast<CallInst>(J)) { if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { // Inline ASM is okay, unless it clobbers the ctr register. InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { InlineAsm::ConstraintInfo &C = CIV[i]; if (C.Type != InlineAsm::isInput) for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) if (StringRef(C.Codes[j]).equals_lower("{ctr}")) return true; } continue; } if (!TM) return true; const TargetLowering *TLI = TM->getTargetLowering(); if (Function *F = CI->getCalledFunction()) { // Most intrinsics don't become function calls, but some might. // sin, cos, exp and log are always calls. unsigned Opcode; if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { switch (F->getIntrinsicID()) { default: continue; // VisualStudio defines setjmp as _setjmp #if defined(_MSC_VER) && defined(setjmp) && \ !defined(setjmp_undefined_for_msvc) # pragma push_macro("setjmp") # undef setjmp # define setjmp_undefined_for_msvc #endif case Intrinsic::setjmp: #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) // let's return it to _setjmp state # pragma pop_macro("setjmp") # undef setjmp_undefined_for_msvc #endif case Intrinsic::longjmp: // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp // because, although it does clobber the counter register, the // control can't then return to inside the loop unless there is also // an eh_sjlj_setjmp. case Intrinsic::eh_sjlj_setjmp: case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: case Intrinsic::powi: case Intrinsic::log: case Intrinsic::log2: case Intrinsic::log10: case Intrinsic::exp: case Intrinsic::exp2: case Intrinsic::pow: case Intrinsic::sin: case Intrinsic::cos: return true; case Intrinsic::copysign: if (CI->getArgOperand(0)->getType()->getScalarType()-> isPPC_FP128Ty()) return true; else continue; // ISD::FCOPYSIGN is never a library call. case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; case Intrinsic::floor: Opcode = ISD::FFLOOR; break; case Intrinsic::ceil: Opcode = ISD::FCEIL; break; case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; case Intrinsic::rint: Opcode = ISD::FRINT; break; case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; case Intrinsic::round: Opcode = ISD::FROUND; break; } } // PowerPC does not use [US]DIVREM or other library calls for // operations on regular types which are not otherwise library calls // (i.e. soft float or atomics). If adapting for targets that do, // additional care is required here. LibFunc::Func Func; if (!F->hasLocalLinkage() && F->hasName() && LibInfo && LibInfo->getLibFunc(F->getName(), Func) && LibInfo->hasOptimizedCodeGen(Func)) { // Non-read-only functions are never treated as intrinsics. if (!CI->onlyReadsMemory()) return true; // Conversion happens only for FP calls. if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) return true; switch (Func) { default: return true; case LibFunc::copysign: case LibFunc::copysignf: continue; // ISD::FCOPYSIGN is never a library call. case LibFunc::copysignl: return true; case LibFunc::fabs: case LibFunc::fabsf: case LibFunc::fabsl: continue; // ISD::FABS is never a library call. case LibFunc::sqrt: case LibFunc::sqrtf: case LibFunc::sqrtl: Opcode = ISD::FSQRT; break; case LibFunc::floor: case LibFunc::floorf: case LibFunc::floorl: Opcode = ISD::FFLOOR; break; case LibFunc::nearbyint: case LibFunc::nearbyintf: case LibFunc::nearbyintl: Opcode = ISD::FNEARBYINT; break; case LibFunc::ceil: case LibFunc::ceilf: case LibFunc::ceill: Opcode = ISD::FCEIL; break; case LibFunc::rint: case LibFunc::rintf: case LibFunc::rintl: Opcode = ISD::FRINT; break; case LibFunc::round: case LibFunc::roundf: case LibFunc::roundl: Opcode = ISD::FROUND; break; case LibFunc::trunc: case LibFunc::truncf: case LibFunc::truncl: Opcode = ISD::FTRUNC; break; } MVT VTy = TLI->getSimpleValueType(CI->getArgOperand(0)->getType(), true); if (VTy == MVT::Other) return true; if (TLI->isOperationLegalOrCustom(Opcode, VTy)) continue; else if (VTy.isVector() && TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType())) continue; return true; } } return true; } else if (isa<BinaryOperator>(J) && J->getType()->getScalarType()->isPPC_FP128Ty()) { // Most operations on ppc_f128 values become calls. return true; } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { CastInst *CI = cast<CastInst>(J); if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || (TT.isArch32Bit() && (CI->getSrcTy()->getScalarType()->isIntegerTy(64) || CI->getDestTy()->getScalarType()->isIntegerTy(64)) )) return true; } else if (TT.isArch32Bit() && J->getType()->getScalarType()->isIntegerTy(64) && (J->getOpcode() == Instruction::UDiv || J->getOpcode() == Instruction::SDiv || J->getOpcode() == Instruction::URem || J->getOpcode() == Instruction::SRem)) { return true; } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { // On PowerPC, indirect jumps use the counter register. return true; } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { if (!TM) return true; const TargetLowering *TLI = TM->getTargetLowering(); if (TLI->supportJumpTables() && SI->getNumCases()+1 >= (unsigned) TLI->getMinimumJumpTableEntries()) return true; } } return false; }
// Insert code at the end of a basic block. void BytesFlops::insert_end_bb_code (Module* module, StringRef function_name, int& must_clear, BasicBlock::iterator& insert_before) { // Keep track of how the basic block terminated. Instruction& inst = *insert_before; unsigned int opcode = inst.getOpcode(); // Terminator instruction's opcode LLVMContext& globctx = module->getContext(); int bb_end_type; switch (opcode) { case Instruction::IndirectBr: case Instruction::Switch: bb_end_type = BF_END_BB_DYNAMIC; increment_global_array(insert_before, terminator_var, ConstantInt::get(globctx, APInt(64, bb_end_type)), one); break; case Instruction::Br: if (dyn_cast<BranchInst>(&inst)->isConditional()) { bb_end_type = BF_END_BB_DYNAMIC; static_cond_brs++; } else bb_end_type = BF_END_BB_STATIC; increment_global_array(insert_before, terminator_var, ConstantInt::get(globctx, APInt(64, bb_end_type)), one); break; default: break; } increment_global_array(insert_before, terminator_var, ConstantInt::get(globctx, APInt(64, BF_END_BB_ANY)), one); // If we're instrumenting every basic block, insert calls to // bf_accumulate_bb_tallies() and bf_report_bb_tallies(). if (InstrumentEveryBB) { callinst_create(accum_bb_tallies, insert_before); callinst_create(report_bb_tallies, insert_before); } // If we're instrumenting by function, insert a call to // bf_assoc_counters_with_func() at the end of the basic block. if (TallyByFunction) { vector<Value*> arg_list; arg_list.push_back(map_func_name_to_arg(module, function_name)); callinst_create(assoc_counts_with_func, arg_list, insert_before); } // Reset all of our counter variables. if (InstrumentEveryBB || TallyByFunction) { if (must_clear & CLEAR_LOADS) { new StoreInst(zero, load_var, false, insert_before); new StoreInst(zero, load_inst_var, false, insert_before); } if (must_clear & CLEAR_STORES) { new StoreInst(zero, store_var, false, insert_before); new StoreInst(zero, store_inst_var, false, insert_before); } if (must_clear & CLEAR_FLOPS) new StoreInst(zero, flop_var, false, insert_before); if (must_clear & CLEAR_FP_BITS) new StoreInst(zero, fp_bits_var, false, insert_before); if (must_clear & CLEAR_OPS) new StoreInst(zero, op_var, false, insert_before); if (must_clear & CLEAR_OP_BITS) new StoreInst(zero, op_bits_var, false, insert_before); if (must_clear & CLEAR_MEM_TYPES) { // Zero out the entire array. LoadInst* mem_insts_addr = new LoadInst(mem_insts_var, "mi", false, insert_before); mem_insts_addr->setAlignment(8); LLVMContext& globctx = module->getContext(); CastInst* mem_insts_cast = new BitCastInst(mem_insts_addr, PointerType::get(IntegerType::get(globctx, 8), 0), "miv", insert_before); static ConstantInt* zero_8bit = ConstantInt::get(globctx, APInt(8, 0)); static ConstantInt* mem_insts_size = ConstantInt::get(globctx, APInt(64, NUM_MEM_INSTS*sizeof(uint64_t))); static ConstantInt* mem_insts_align = ConstantInt::get(globctx, APInt(32, sizeof(uint64_t))); static ConstantInt* zero_1bit = ConstantInt::get(globctx, APInt(1, 0)); std::vector<Value*> func_args; func_args.push_back(mem_insts_cast); func_args.push_back(zero_8bit); func_args.push_back(mem_insts_size); func_args.push_back(mem_insts_align); func_args.push_back(zero_1bit); callinst_create(memset_intrinsic, func_args, insert_before); } if (TallyInstMix) { // If we're tallying instructions we don't need a must_clear // bit to tell us that an instruction was executed. We always // need to zero out the entire array. LoadInst* tally_insts_addr = new LoadInst(inst_mix_histo_var, "ti", false, insert_before); tally_insts_addr->setAlignment(8); LLVMContext& globctx = module->getContext(); CastInst* tally_insts_cast = new BitCastInst(tally_insts_addr, PointerType::get(IntegerType::get(globctx, 8), 0), "miv", insert_before); static ConstantInt* zero_8bit = ConstantInt::get(globctx, APInt(8, 0)); static uint64_t totalInstCount = uint64_t(Instruction::OtherOpsEnd); static ConstantInt* tally_insts_size = ConstantInt::get(globctx, APInt(64, totalInstCount*sizeof(uint64_t))); static ConstantInt* tally_insts_align = ConstantInt::get(globctx, APInt(32, sizeof(uint64_t))); static ConstantInt* zero_1bit = ConstantInt::get(globctx, APInt(1, 0)); std::vector<Value*> func_args; func_args.push_back(tally_insts_cast); func_args.push_back(zero_8bit); func_args.push_back(tally_insts_size); func_args.push_back(tally_insts_align); func_args.push_back(zero_1bit); callinst_create(memset_intrinsic, func_args, insert_before); } insert_zero_array_code(module, terminator_var, BF_END_BB_NUM, insert_before); insert_zero_array_code(module, mem_intrinsics_var, BF_NUM_MEM_INTRIN, insert_before); must_clear = 0; } // If we're instrumenting every basic block, insert a call to // bf_reset_bb_tallies(). if (InstrumentEveryBB) callinst_create(reset_bb_tallies, insert_before); // If we're instrumenting by call stack, insert a call to // bf_pop_function() at every return from the function. if (TrackCallStack && insert_before->getOpcode() == Instruction::Ret) callinst_create(pop_function, insert_before); }
void MutationGen::genMutationFile(Function & F){ int index = 0; for(Function::iterator FI = F.begin(); FI != F.end(); ++FI){ BasicBlock *BB = FI; #if NEED_LOOP_INFO bool isLoop = LI->getLoopFor(BB); #endif for(BasicBlock::iterator BI = BB->begin(); BI != BB->end(); ++BI, index++){ unsigned opc = BI->getOpcode(); if( !((opc >= 14 && opc <= 31) || opc == 34 || opc == 52 || opc == 55) ){// omit alloca and getelementptr continue; } int idxtmp = index; #if NEED_LOOP_INFO if(isLoop){ assert(idxtmp != 0); idxtmp = 0 - idxtmp; } #endif switch(opc){ case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem:{ // TODO: add for i1, i8. Support i32 and i64 first if(! (BI->getType()->isIntegerTy(32) || BI->getType()->isIntegerTy(64))){ continue; } genLVR(BI, F.getName(), idxtmp); genUOI(BI, F.getName(), idxtmp); genROV(BI, F.getName(), idxtmp); genABV(BI, F.getName(), idxtmp); genAOR(BI, F.getName(), idxtmp); break; } case Instruction::ICmp:{ if(! (BI->getOperand(0)->getType()->isIntegerTy(32) || BI->getOperand(0)->getType()->isIntegerTy(64)) ){ continue; } genLVR(BI, F.getName(), idxtmp); genUOI(BI, F.getName(), idxtmp); genROV(BI, F.getName(), idxtmp); genABV(BI, F.getName(), idxtmp); genROR(BI, F.getName(), idxtmp); break; } case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: case Instruction::And: case Instruction::Or: case Instruction::Xor:{ // TODO: add for i1, i8. Support i32 and i64 first if(! (BI->getType()->isIntegerTy(32) || BI->getType()->isIntegerTy(64))){ continue; } genLVR(BI, F.getName(), idxtmp); genUOI(BI, F.getName(), idxtmp); genROV(BI, F.getName(), idxtmp); genABV(BI, F.getName(), idxtmp); genLOR(BI, F.getName(), idxtmp); break; } case Instruction::Call: { CallInst* call = cast<CallInst>(BI); // TODO: omit function-pointer if(call->getCalledFunction() == NULL){ continue; } /*Value* callee = dyn_cast<Value>(&*(call->op_end() - 1)); if(callee->getType()->isPointerTy()){ continue; }*/ StringRef name = call->getCalledFunction()->getName(); if(name.startswith("llvm")){//omit llvm inside functions continue; } // TODO: add for ommiting i8. Support i32 and i64 first if(! ( isSupportedType(BI->getType())|| BI->getType()->isVoidTy() ) ){ continue; } genLVR(BI, F.getName(), idxtmp); genUOI(BI, F.getName(), idxtmp); genROV(BI, F.getName(), idxtmp); genABV(BI, F.getName(), idxtmp); genSTDCall(BI, F.getName(), idxtmp); break; } case Instruction::Store:{ auto addr = BI->op_begin() + 1;// the pointer of the storeinst if( ! (dyn_cast<LoadInst>(&*addr) || dyn_cast<AllocaInst>(&*addr) || dyn_cast<Constant>(&*addr) || dyn_cast<GetElementPtrInst>(&*addr) ) ){ continue; } // TODO:: add for i8 Value* tobestore = dyn_cast<Value>(BI->op_begin()); if(! isSupportedType(tobestore->getType())){ continue; } genLVR(BI, F.getName(), idxtmp); genUOI(BI, F.getName(), idxtmp); genABV(BI, F.getName(), idxtmp); genSTDStore(BI, F.getName(), idxtmp); break; } case Instruction::GetElementPtr:{ // TODO: break; } default:{ } } } } ofresult.flush(); }
void ArrayObfs::ArrObfuscate ( Function *F ) { // Iterate the whole Function Function *f = F; for ( Function::iterator bb = f->begin(); bb != f->end(); ++bb ) { for ( BasicBlock::iterator inst = bb->begin(); inst != bb->end(); ) { if ( inst->getOpcode() == 29 ) // getelementptr { //errs() << "INST : " << *inst << "\n"; GetElementPtrInst *Ary = dyn_cast<GetElementPtrInst>(&*inst); Value *ptrVal = Ary->getOperand(0); Type *type = ptrVal->getType(); unsigned numOfOprand = Ary->getNumOperands(); unsigned lastOprand = numOfOprand - 1; // Check Type Array if ( PointerType *ptrType = dyn_cast<PointerType>( type ) ) { Type *elementType = ptrType->getElementType(); if ( elementType->isArrayTy() ) { // Skip if Index is a Variable if ( dyn_cast<ConstantInt>( Ary->getOperand( lastOprand ) ) ) { ////////////////////////////////////////////////////////////////////////////// // Do Real Stuff Value *oprand = Ary->getOperand( lastOprand ); Value *basePtr = Ary->getOperand( 0 ); APInt offset = dyn_cast<ConstantInt>(oprand)->getValue(); Value *prevPtr = basePtr; // Enter a Loop to Perform Random Obfuscation unsigned cnt = 100; // Prelog : Clone the Original Inst unsigned ObfsIdx = cryptoutils->get_uint64_t() & 0xffff; Value *newOprand = ConstantInt::get( oprand->getType(), ObfsIdx ); Instruction *gep = inst->clone(); gep->setOperand( lastOprand, newOprand ); gep->setOperand( 0, prevPtr ); gep->insertBefore( inst ); prevPtr = gep; offset = offset - ObfsIdx; // Create a Global Variable to Avoid Optimization Module *M = f->getParent(); Constant *initGV = ConstantInt::get( prevPtr->getType(), 0 ); GlobalVariable *gv = new GlobalVariable( *M, prevPtr->getType(), false, GlobalValue::CommonLinkage, initGV ); while ( cnt-- ) { // Iteratively Generate Obfuscated Code switch( cryptoutils->get_uint64_t() & 7 ) { // Random Indexing Obfuscation case 0 : case 1 : case 2 : { //errs() << "=> Random Index \n"; // Create New Instruction // Create Obfuscated New Oprand in ConstantInt Type unsigned ObfsIdx = cryptoutils->get_uint64_t() & 0xffff; Value *newOprand = ConstantInt::get( oprand->getType(), ObfsIdx ); // Create GetElementPtrInst Instruction GetElementPtrInst *gep = GetElementPtrInst::Create( prevPtr, newOprand, "", inst ); //Set prevPtr prevPtr = gep; //errs() << "Created : " << *prevPtr << "\n"; offset = offset - ObfsIdx; break; } // Ptr Dereference case 3 : case 4 : { //errs() << "=> Ptr Dereference \n"; Module *M = f->getParent(); Value *ONE = ConstantInt::get( Type::getInt32Ty( M->getContext() ), 1 ); Value *tmp = new AllocaInst( prevPtr->getType(), ONE, "", inst ); new StoreInst( prevPtr, tmp, inst ); prevPtr = new LoadInst( tmp, "", inst ); break; } // Ptr Value Transform case 5 : case 6 : case 7 : { //errs() << "=> Ptr Value Trans \n"; unsigned RandNum = cryptoutils->get_uint64_t(); Value *ObfsVal = ConstantInt::get( prevPtr->getType(), RandNum ); BinaryOperator *op = BinaryOperator::Create( Instruction::FAdd, prevPtr, ObfsVal, "", inst ); new StoreInst( prevPtr, gv, inst ); BinaryOperator::Create( Instruction::FSub, gv, ObfsVal, "", inst ); prevPtr = new LoadInst( gv, "", inst ); break; } } } // Postlog : Fix the Original Indexing { Value *fixOprand = ConstantInt::get( oprand->getType(), offset ); // Refine the Last Instruction GetElementPtrInst *gep = GetElementPtrInst::Create( prevPtr, fixOprand, "", inst ); // Fix the Relationship inst->replaceAllUsesWith( gep ); // Finally : Unlink This Instruction From Parent Instruction *DI = inst++; //errs() << "user_back : " << *(DI->user_back()) << "\n"; DI->removeFromParent(); } ////////////////////////////////////////////////////////////////////////////// // End : Variable Index } else { inst++; } // End : Check Array Type } else { inst++; } // End : Check Pointer Type } else { inst++; } // End : Check Opcode GetElementPtr } else { inst++; } } } ++ArrayMod; }
bool Substitution::substitute(Function *f) { Function *tmp = f; // Loop for the number of time we run the pass on the function int times = ObfTimes; do { for (Function::iterator bb = tmp->begin(); bb != tmp->end(); ++bb) { for (BasicBlock::iterator inst = bb->begin(); inst != bb->end(); ++inst) { if (inst->isBinaryOp()) { switch (inst->getOpcode()) { case BinaryOperator::Add: // case BinaryOperator::FAdd: // Substitute with random add operation (this->*funcAdd[llvm::cryptoutils->get_range(NUMBER_ADD_SUBST)])( cast<BinaryOperator>(inst)); ++Add; break; case BinaryOperator::Sub: // case BinaryOperator::FSub: // Substitute with random sub operation (this->*funcSub[llvm::cryptoutils->get_range(NUMBER_SUB_SUBST)])( cast<BinaryOperator>(inst)); ++Sub; break; case BinaryOperator::Mul: case BinaryOperator::FMul: //++Mul; break; case BinaryOperator::UDiv: case BinaryOperator::SDiv: case BinaryOperator::FDiv: //++Div; break; case BinaryOperator::URem: case BinaryOperator::SRem: case BinaryOperator::FRem: //++Rem; break; case Instruction::Shl: //++Shi; break; case Instruction::LShr: //++Shi; break; case Instruction::AShr: //++Shi; break; case Instruction::And: (this->* funcAnd[llvm::cryptoutils->get_range(2)])(cast<BinaryOperator>(inst)); ++And; break; case Instruction::Or: (this->* funcOr[llvm::cryptoutils->get_range(2)])(cast<BinaryOperator>(inst)); ++Or; break; case Instruction::Xor: (this->* funcXor[llvm::cryptoutils->get_range(2)])(cast<BinaryOperator>(inst)); ++Xor; break; default: break; } // End switch } // End isBinaryOp } // End for basickblock } // End for Function } while (--times > 0); // for times return false; }
/// Determine whether the instructions in this range may be safely and cheaply /// speculated. This is not an important enough situation to develop complex /// heuristics. We handle a single arithmetic instruction along with any type /// conversions. static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, BasicBlock::iterator End, Loop *L) { bool seenIncrement = false; bool MultiExitLoop = false; if (!L->getExitingBlock()) MultiExitLoop = true; for (BasicBlock::iterator I = Begin; I != End; ++I) { if (!isSafeToSpeculativelyExecute(I)) return false; if (isa<DbgInfoIntrinsic>(I)) continue; switch (I->getOpcode()) { default: return false; case Instruction::GetElementPtr: // GEPs are cheap if all indices are constant. if (!cast<GEPOperator>(I)->hasAllConstantIndices()) return false; // fall-thru to increment case case Instruction::Add: case Instruction::Sub: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: { Value *IVOpnd = !isa<Constant>(I->getOperand(0)) ? I->getOperand(0) : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; if (!IVOpnd) return false; // If increment operand is used outside of the loop, this speculation // could cause extra live range interference. if (MultiExitLoop) { for (User *UseI : IVOpnd->users()) { auto *UserInst = cast<Instruction>(UseI); if (!L->contains(UserInst)) return false; } } if (seenIncrement) return false; seenIncrement = true; break; } case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: // ignore type conversions break; } } return true; }
bool PPCCTRLoops::mightUseCTR(BasicBlock *BB) { for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) { if (CallInst *CI = dyn_cast<CallInst>(J)) { // Inline ASM is okay, unless it clobbers the ctr register. if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { if (asmClobbersCTR(IA)) return true; continue; } if (Function *F = CI->getCalledFunction()) { // Most intrinsics don't become function calls, but some might. // sin, cos, exp and log are always calls. unsigned Opcode = 0; if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { switch (F->getIntrinsicID()) { default: continue; // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr // we're definitely using CTR. case Intrinsic::ppc_is_decremented_ctr_nonzero: case Intrinsic::ppc_mtctr: return true; // VisualStudio defines setjmp as _setjmp #if defined(_MSC_VER) && defined(setjmp) && \ !defined(setjmp_undefined_for_msvc) # pragma push_macro("setjmp") # undef setjmp # define setjmp_undefined_for_msvc #endif case Intrinsic::setjmp: #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) // let's return it to _setjmp state # pragma pop_macro("setjmp") # undef setjmp_undefined_for_msvc #endif case Intrinsic::longjmp: // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp // because, although it does clobber the counter register, the // control can't then return to inside the loop unless there is also // an eh_sjlj_setjmp. case Intrinsic::eh_sjlj_setjmp: case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: case Intrinsic::powi: case Intrinsic::log: case Intrinsic::log2: case Intrinsic::log10: case Intrinsic::exp: case Intrinsic::exp2: case Intrinsic::pow: case Intrinsic::sin: case Intrinsic::cos: return true; case Intrinsic::copysign: if (CI->getArgOperand(0)->getType()->getScalarType()-> isPPC_FP128Ty()) return true; else continue; // ISD::FCOPYSIGN is never a library call. case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; case Intrinsic::floor: Opcode = ISD::FFLOOR; break; case Intrinsic::ceil: Opcode = ISD::FCEIL; break; case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; case Intrinsic::rint: Opcode = ISD::FRINT; break; case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; case Intrinsic::round: Opcode = ISD::FROUND; break; case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; } } // PowerPC does not use [US]DIVREM or other library calls for // operations on regular types which are not otherwise library calls // (i.e. soft float or atomics). If adapting for targets that do, // additional care is required here. LibFunc Func; if (!F->hasLocalLinkage() && F->hasName() && LibInfo && LibInfo->getLibFunc(F->getName(), Func) && LibInfo->hasOptimizedCodeGen(Func)) { // Non-read-only functions are never treated as intrinsics. if (!CI->onlyReadsMemory()) return true; // Conversion happens only for FP calls. if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) return true; switch (Func) { default: return true; case LibFunc_copysign: case LibFunc_copysignf: continue; // ISD::FCOPYSIGN is never a library call. case LibFunc_copysignl: return true; case LibFunc_fabs: case LibFunc_fabsf: case LibFunc_fabsl: continue; // ISD::FABS is never a library call. case LibFunc_sqrt: case LibFunc_sqrtf: case LibFunc_sqrtl: Opcode = ISD::FSQRT; break; case LibFunc_floor: case LibFunc_floorf: case LibFunc_floorl: Opcode = ISD::FFLOOR; break; case LibFunc_nearbyint: case LibFunc_nearbyintf: case LibFunc_nearbyintl: Opcode = ISD::FNEARBYINT; break; case LibFunc_ceil: case LibFunc_ceilf: case LibFunc_ceill: Opcode = ISD::FCEIL; break; case LibFunc_rint: case LibFunc_rintf: case LibFunc_rintl: Opcode = ISD::FRINT; break; case LibFunc_round: case LibFunc_roundf: case LibFunc_roundl: Opcode = ISD::FROUND; break; case LibFunc_trunc: case LibFunc_truncf: case LibFunc_truncl: Opcode = ISD::FTRUNC; break; case LibFunc_fmin: case LibFunc_fminf: case LibFunc_fminl: Opcode = ISD::FMINNUM; break; case LibFunc_fmax: case LibFunc_fmaxf: case LibFunc_fmaxl: Opcode = ISD::FMAXNUM; break; } } if (Opcode) { EVT EVTy = TLI->getValueType(*DL, CI->getArgOperand(0)->getType(), true); if (EVTy == MVT::Other) return true; if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) continue; else if (EVTy.isVector() && TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) continue; return true; } } return true; } else if (isa<BinaryOperator>(J) && J->getType()->getScalarType()->isPPC_FP128Ty()) { // Most operations on ppc_f128 values become calls. return true; } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { CastInst *CI = cast<CastInst>(J); if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || isLargeIntegerTy(!TM->isPPC64(), CI->getSrcTy()->getScalarType()) || isLargeIntegerTy(!TM->isPPC64(), CI->getDestTy()->getScalarType())) return true; } else if (isLargeIntegerTy(!TM->isPPC64(), J->getType()->getScalarType()) && (J->getOpcode() == Instruction::UDiv || J->getOpcode() == Instruction::SDiv || J->getOpcode() == Instruction::URem || J->getOpcode() == Instruction::SRem)) { return true; } else if (!TM->isPPC64() && isLargeIntegerTy(false, J->getType()->getScalarType()) && (J->getOpcode() == Instruction::Shl || J->getOpcode() == Instruction::AShr || J->getOpcode() == Instruction::LShr)) { // Only on PPC32, for 128-bit integers (specifically not 64-bit // integers), these might be runtime calls. return true; } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { // On PowerPC, indirect jumps use the counter register. return true; } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) return true; } // FREM is always a call. if (J->getOpcode() == Instruction::FRem) return true; if (STI->useSoftFloat()) { switch(J->getOpcode()) { case Instruction::FAdd: case Instruction::FSub: case Instruction::FMul: case Instruction::FDiv: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FCmp: return true; } } for (Value *Operand : J->operands()) if (memAddrUsesCTR(*TM, Operand)) return true; } return false; }
bool klee::PhiCleanerPass::runOnFunction(Function &f) { bool changed = false; for (Function::iterator b = f.begin(), be = f.end(); b != be; ++b) { BasicBlock::iterator it = b->begin(); if (it->getOpcode() == Instruction::PHI) { PHINode *reference = cast<PHINode>(it); std::set<Value*> phis; phis.insert(reference); unsigned numBlocks = reference->getNumIncomingValues(); for (++it; isa<PHINode>(*it); ++it) { PHINode *pi = cast<PHINode>(it); assert(numBlocks == pi->getNumIncomingValues()); // see if it is out of order unsigned i; for (i=0; i<numBlocks; i++) if (pi->getIncomingBlock(i) != reference->getIncomingBlock(i)) break; if (i!=numBlocks) { std::vector<Value*> values; values.reserve(numBlocks); for (unsigned i=0; i<numBlocks; i++) values[i] = pi->getIncomingValueForBlock(reference->getIncomingBlock(i)); for (unsigned i=0; i<numBlocks; i++) { pi->setIncomingBlock(i, reference->getIncomingBlock(i)); pi->setIncomingValue(i, values[i]); } changed = true; } // see if it uses any previously defined phi nodes for (i=0; i<numBlocks; i++) { Value *value = pi->getIncomingValue(i); if (phis.find(value) != phis.end()) { // fix by making a "move" at the end of the incoming block // to a new temporary, which is thus known not to be a phi // result. we could be somewhat more efficient about this // by sharing temps and by reordering phi instructions so // this isn't completely necessary, but in the end this is // just a pathological case which does not occur very // often. Instruction *tmp = new BitCastInst(value, value->getType(), value->getName() + ".phiclean", pi->getIncomingBlock(i)->getTerminator()); pi->setIncomingValue(i, tmp); } changed = true; } phis.insert(pi); } } } return changed; }
BinaryExprPtr SPGen::encodeInst(BasicBlock *p, std::vector<BasicBlock *> &pcds, const ExprPtr psi, const ExprPtr gamma, BasicBlock::iterator it){ BasicBlock *currentBB = it -> getParent(); assert(currentBB && "current basic block is null"); // std::cout << "\nInstruction" << std::endl; // it -> print(outs()); // std::string s = p ? p -> getName().str() : "null"; // std::string postDomName = !pcds.empty() ? pcds.back()->getName().str() // : "null"; // std::cout << "CURRENT BB: " // << currentBB -> getName().str() // << "PREVIOUS BB: " // << s // <<"Common: " // << postDomName // << std::endl; unsigned opcode = it->getOpcode(); if(opcode != Instruction::PHI && !pcds.empty() && pcds.back() == currentBB) return Expression::mkBinExpr(Expression::mkTrue(),Expression::mkTrue()); BinaryExprPtr enc; switch (opcode) { case Instruction::Br:{ enc = encodeBr(pcds, psi, gamma, cast<BranchInst>(it)); break; } case Instruction::Call:{ // check if this call was inserted by the VarsForArrays pass. If such then // only its name and its first parameters name are relevant. CallInst *call = dyn_cast<CallInst>(it); Function *f = call -> getCalledFunction(); if(f == array_aux_1 || f == array_aux_2){ Value *param = call -> getArgOperand(0); context -> setOldArrayName(param); if(f == array_aux_2){ context -> setNewArrayName(call); } enc = Expression::mkBinExpr(Expression::mkTrue(),Expression::mkTrue()); } else { enc = Expression::mkBinExpr(Expression::mkTrue(),encodeAnn(psi, gamma, cast<CallInst>(it))); } break; } case Instruction::Ret:{ enc = Expression::mkBinExpr(Expression::mkTrue(),Expression::mkTrue()); break; } case Instruction::Unreachable:{ enc = Expression::mkBinExpr(Expression::mkTrue(),Expression::mkTrue()); break; } case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: case Instruction::FSub: case Instruction::Mul: case Instruction::FMul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::FDiv: case Instruction::URem: case Instruction::SRem: case Instruction::FRem: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr:{ ExprPtr e = encoder.encode(cast<BinaryOperator>(it)); enc = Expression::mkBinExpr(e, Expression::mkTrue()); break; } case Instruction::ICmp:{ ExprPtr e = encoder.encode(cast<ICmpInst>(it)); enc = Expression::mkBinExpr(e, Expression::mkTrue()); break; } case Instruction::SExt:{ ExprPtr e = encoder.encode(cast<SExtInst>(it)); enc = Expression::mkBinExpr(e, Expression::mkTrue()); break; } case Instruction::ZExt:{ ExprPtr e = encoder.encode(cast<ZExtInst>(it)); enc = Expression::mkBinExpr(e, Expression::mkTrue()); break; } case Instruction::Select:{ ExprPtr e = encoder.encode(cast<SelectInst>(it)); enc = Expression::mkBinExpr(e, Expression::mkTrue()); break; } case Instruction::PHI:{ assert(p && "trying to encode a phi instruction with no previous block\n"); ExprPtr e = encoder.encode(cast<PHINode>(it),p); enc = Expression::mkBinExpr(e, Expression::mkTrue()); break; } case Instruction::Switch:{ assert(0 && "switch not implemented yet"); // ExprPtr e = encoder.encode(cast<SwitchInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::Alloca:{ ExprPtr e = encodeArray(cast<AllocaInst>(it)); enc = Expression::mkBinExpr(e,Expression::mkTrue()); break; } case Instruction::Store:{ ExprPtr e = encodeArray(cast<StoreInst>(it)); enc = Expression::mkBinExpr(e,Expression::mkTrue()); break; } case Instruction::Load:{ ExprPtr e = encodeArray(cast<LoadInst>(it)); enc = Expression::mkBinExpr(e,Expression::mkTrue()); break; } case Instruction::GetElementPtr:{ ExprPtr e = encodeArray(cast<GetElementPtrInst>(it)); enc = Expression::mkBinExpr(e,Expression::mkTrue()); break; } case Instruction::PtrToInt:{ assert(0 && "PtrToInt not implemented yet"); // ExprPtr e = encoder.encode(cast<PtrToIntInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::VAArg:{ assert(0 && "VAArg not implemented yet"); // ExprPtr e = encoder.encode(cast<VAArgInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::Invoke:{ assert(0 && "Invoke not implemented yet"); // ExprPtr e = encoder.encode(cast<InvokeInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::Trunc:{ assert(0 && "Trunc not implemented yet"); // ExprPtr e = encoder.encode(cast<TruncInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::FPTrunc:{ assert(0 && "FPTrunc not implemented yet"); // ExprPtr e = encoder.encode(cast<FPTruncInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::FPExt:{ assert(0 && "FPExt not implemented yet"); // ExprPtr e = encoder.encode(cast<FPExtInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::UIToFP:{ assert(0 && "UIToFP not implemented yet"); // ExprPtr e = encoder.encode(cast<UIToFPInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::SIToFP:{ assert(0 && "SIToFP not implemented yet"); // ExprPtr e = encoder.encode(cast<SIToFPInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::FPToUI:{ assert(0 && "FPToUI not implemented yet"); // ExprPtr e = encoder.encode(cast<FPToUIInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::FPToSI:{ assert(0 && "FPToSI not implemented yet"); // ExprPtr e = encoder.encode(cast<FPToSIInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::IntToPtr:{ assert(0 && "IntToPtr not implemented yet"); // ExprPtr e = encoder.encode(cast<IntToPtrInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::BitCast:{ assert(0 && "BitCast not implemented yet"); // ExprPtr e = encoder.encode(cast<BitCastInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::FCmp:{ assert(0 && "FCmp not implemented yet"); // ExprPtr e = encoder.encode(cast<FCmpInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::ExtractElement:{ assert(0 && "ExtractElement not implemented yet"); // ExprPtr e = encoder.encode(cast<ExtractElementInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::InsertElement:{ assert(0 && "InsertElement not implemented yet"); // ExprPtr e = encoder.encode(cast<InsertElementInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::ShuffleVector:{ assert(0 && "ShuffleVector not implemented yet"); // ExprPtr e = encoder.encode(cast<ShuffleVectorInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::ExtractValue:{ assert(0 && "ExtractValue not implemented yet"); // ExprPtr e = encoder.encode(cast<ExtractValueInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } case Instruction::InsertValue:{ assert(0 && "InsertValue not implemented yet"); // ExprPtr e = encoder.encode(cast<InsertValueInst>(it)); // ExprPtr impl = ExprUtils::mkImpl(pi.encode(), e); // vcs.addToE(impl); // psi2.add(impl); break; } default: std::cout << "UNREACHABLE OPCODE: " << it->getOpcode() << std::endl; llvm_unreachable("Illegal opcode!"); } ++it; if(it != currentBB -> end()){ ExprPtr psi2 = ExprUtils::mkAnd(psi,enc->getExpr1()); ExprPtr gamma2 = ExprUtils::mkAnd(gamma,enc->getExpr2()); BinaryExprPtr enc2 = encodeInst(p,pcds,psi2,gamma2,it); return Expression::mkBinExpr(ExprUtils::mkAnd(enc -> getExpr1(), enc2 -> getExpr1()), ExprUtils::mkAnd(enc -> getExpr2(), enc2 -> getExpr2())); } else return enc; }