/* ===================================================================== */ int main(int argc, char *argv[]) { if (PIN_Init(argc,argv)) return Usage(); // Single thread profile enabled ? if (KnobSingleThread.Value()) { bimodal.SetSingleOSThreadID(KnobSingleThread.Value()); gshare.SetSingleOSThreadID(KnobSingleThread.Value()); simpleindirect.SetSingleOSThreadID(KnobSingleThread.Value()); vpcsimpleindirect.SetSingleOSThreadID(KnobSingleThread.Value()); targetcache.SetSingleOSThreadID(KnobSingleThread.Value()); } // Active the predictors. if (!KnobDisableBimodal.Value()) { bimodal.Activate(); } if (!KnobDisableGshare.Value()) { gshare.Activate(); } if (!KnobDisableSimpleIndirect.Value()) { simpleindirect.Activate(); } if (!KnobDisableVPCIndirect.Value()) { vpcsimpleindirect.Activate(); } if (!KnobDisableTargetCache.Value()) { targetcache.Activate(); } // Activate the icount. if (!KnobDisableICount.Value()) { icount.Activate(); } outfile = new ofstream(KnobOutputFile.Value().c_str()); PIN_AddFiniFunction(Fini, 0); PIN_StartProgram(); }
VOID VerifyFpContext(ADDRINT pcval, CONTEXT * context) { //printf ("fpContextFromXsave %x FPSTATE_SIZE %d FPSTATE_ALIGNMENT %d\n", fpContextFromXsave, FPSTATE_SIZE, FPSTATE_ALIGNMENT); //fflush (stdout); Do_Xsave (fpContextFromXsave); //printf ("fpContextFromFxsave %x\n", fpContextFromFxsave); //fflush (stdout); Do_Fxsave (fpContextFromFxsave); PIN_SaveContext(context, &contextFromPin); FPSTATE *fpContextFromPin = reinterpret_cast<FPSTATE *> (( reinterpret_cast<ADDRINT>(fpContextSpaceForFpConextFromPin) + (FPSTATE_ALIGNMENT - 1)) & (-1*FPSTATE_ALIGNMENT)); unsigned char * ptr = (reinterpret_cast< unsigned char *>(fpContextFromPin))+ sizeof (FXSAVE); // set values after fxsave part of fp context - to verify that the deprecated call to PIN_GetContextFPState does NOT change these memset (ptr, 0xa5, sizeof(FPSTATE) - sizeof (FXSAVE)); PIN_GetContextFPState(&contextFromPin, reinterpret_cast<void *>(fpContextFromPin)); for (int i=0; i<sizeof(FPSTATE) - sizeof (FXSAVE); i++,ptr++) { if (*ptr != 0xa5) { printf ("**** ERROR: value set after FXSAVE part in deprecated PIN_GetContextFPState *ptr = %x (i %d)\n", *ptr, i); exit (-1); } } PIN_GetContextFPState(&contextFromPin, fpContextFromPin); ptr = (reinterpret_cast< unsigned char *>(fpContextFromPin))+ sizeof (FXSAVE); FPSTATE *fpContextFromPin1 = reinterpret_cast<FPSTATE *> (( reinterpret_cast<ADDRINT>(fpContextSpaceForFpConextFromPin1) + (FPSTATE_ALIGNMENT - 1)) & (-1*FPSTATE_ALIGNMENT)); PIN_GetContextFPState(&contextFromPin, fpContextFromPin1); // set values after fxsave part of fp context - to verify that the deprecated call to PIN_SetContextFPState does NOT change these unsigned char * ptr1 = (reinterpret_cast< unsigned char *>(fpContextFromPin1)) + sizeof (FXSAVE); memset (ptr1, 0xa5, sizeof(FPSTATE) - sizeof (FXSAVE)); PIN_SetContextFPState(&contextFromPin, reinterpret_cast<const void *>(fpContextFromPin1)); PIN_GetContextFPState(&contextFromPin, fpContextFromPin1); if (memcmp (ptr1, ptr, sizeof(FPSTATE) - sizeof (FXSAVE)) != 0) { printf ("**** ERROR: value set after FXSAVE part in deprecated PIN_SetContextFPState\n"); exit (-1); } if (!CompareFpContext (fpContextFromPin, fpContextFromFxsave, FALSE)) { fprintf (log_inl, "***ERROR in fxsave fp context\n"); printf ("***ERROR in fxsave fp context see file %s\n", KnobOutputFile.Value().c_str()); fflush (stdout); string s = disassemble ((pcval),(pcval)+15); fprintf (log_inl," %s\n", s.c_str()); exit (-1); } }
int main(int argc, char * argv[]) { PIN_Init(argc, argv); PIN_InitSymbols(); RegThreadInfo = PIN_ClaimToolRegister(); if (RegThreadInfo == REG_INVALID()) { std::cout << "Out of tool registers" << std::endl; PIN_ExitProcess(1); } // Get the test type and initialize the corresponding lock variable. // TestType = GetTestType(KnobTest.Value()); switch (TestType) { case TEST_NONE: std::cout << "Must specify a test to run with the '-test' knob" << std::endl; PIN_ExitProcess(1); break; case TEST_INVALID: std::cout << "Invalid test name: " << KnobTest.Value() << std::endl; PIN_ExitProcess(1); break; case TEST_LOCK_INTEGRITY: case TEST_LOCK_STRESS: PIN_InitLock(&Lock); break; case TEST_MUTEX_INTEGRITY: case TEST_MUTEX_STRESS: case TEST_MUTEX_TRYSTRESS: PIN_MutexInit(&Mutex); break; case TEST_WRITER_INTEGRITY: case TEST_WRITER_STRESS: case TEST_WRITER_TRYSTRESS: case TEST_READER_STRESS: case TEST_READER_TRYSTRESS: case TEST_RW_INTEGRITY: case TEST_RW_STRESS: case TEST_RW_TRYSTRESS: PIN_RWMutexInit(&RWMutex); break; case TEST_SEMAPHORE: PIN_SemaphoreInit(&Sem1); PIN_SemaphoreInit(&Sem2); PIN_SemaphoreSet(&Sem1); PIN_MutexInit(&Mutex); break; case TEST_TRYLOCKS: PIN_MutexInit(&Mutex); PIN_RWMutexInit(&RWMutex); PIN_SemaphoreInit(&Sem1); break; default: ASSERTX(0); } PIN_AddThreadStartFunction(OnThreadStart, 0); PIN_AddThreadFiniFunction(OnThreadFini, 0); RTN_AddInstrumentFunction(InstrumentRtn, 0); PIN_AddFiniFunction(OnExit, 0); PIN_StartProgram(); return 0; }
VOID Trace(TRACE trace, VOID *v) { const BOOL print_args = KnobPrintArgs.Value(); for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl)) { INS tail = BBL_InsTail(bbl); if( INS_IsCall(tail) ) { if( INS_IsDirectBranchOrCall(tail) ) { const ADDRINT target = INS_DirectBranchOrCallTargetAddress(tail); if( print_args ) { INS_InsertPredicatedCall(tail, IPOINT_BEFORE, AFUNPTR(do_call_args), IARG_PTR, Target2String(target), IARG_G_ARG0_CALLER, IARG_END); } else { INS_InsertPredicatedCall(tail, IPOINT_BEFORE, AFUNPTR(do_call), IARG_PTR, Target2String(target), IARG_END); } } else { if( print_args ) { INS_InsertCall(tail, IPOINT_BEFORE, AFUNPTR(do_call_args_indirect), IARG_BRANCH_TARGET_ADDR, IARG_BRANCH_TAKEN, IARG_G_ARG0_CALLER, IARG_END); } else { INS_InsertCall(tail, IPOINT_BEFORE, AFUNPTR(do_call_indirect), IARG_BRANCH_TARGET_ADDR, IARG_BRANCH_TAKEN, IARG_END); } } } else { // sometimes code is not in an image RTN rtn = TRACE_Rtn(trace); // also track stup jumps into share libraries if( RTN_Valid(rtn) && !INS_IsDirectBranchOrCall(tail) && ".plt" == SEC_Name( RTN_Sec( rtn ) )) { if( print_args ) { INS_InsertCall(tail, IPOINT_BEFORE, AFUNPTR(do_call_args_indirect), IARG_BRANCH_TARGET_ADDR, IARG_BRANCH_TAKEN, IARG_G_ARG0_CALLER, IARG_END); } else { INS_InsertCall(tail, IPOINT_BEFORE, AFUNPTR(do_call_indirect), IARG_BRANCH_TARGET_ADDR, IARG_BRANCH_TAKEN, IARG_END); } } } } }
VOID instrumentRoutine(RTN rtn, VOID *v){ RTN_Open(rtn); if( !RTN_Valid(rtn) || !IMG_IsMainExecutable( IMG_FindByAddress( RTN_Address(rtn) ) )){ RTN_Close(rtn); return; } fprintf(stderr,">>>>>>>>>>>>>>%s<<<<<<<<<<<<<<<\n",RTN_Name(rtn).c_str()); vector<IFR_BasicBlock> bblist = vector<IFR_BasicBlock>(); hash_map<ADDRINT, IFR_BasicBlock> blocks = hash_map<ADDRINT, IFR_BasicBlock>(); findBlocks(rtn,bblist,blocks); hash_map<ADDRINT, set<ADDRINT> > pred = hash_map<ADDRINT, set<ADDRINT> >(); computePredecessors(rtn,bblist,pred); if( KnobPred.Value() == true ){ for( vector<IFR_BasicBlock>::iterator i = bblist.begin(); i != bblist.end(); i++){ fprintf(stderr,"Predecessors to %p:\n\t",i->getEntryAddr()); for( set<ADDRINT>::iterator pi = pred[ i->getEntryAddr() ].begin(); pi != pred[ i->getEntryAddr() ].end(); pi++ ){ fprintf(stderr,"%p ",*pi); } fprintf(stderr,"\n"); } } hash_map<ADDRINT, set<ADDRINT> > dom = hash_map<ADDRINT, set<ADDRINT> >(); computeDominators(rtn, bblist, pred, dom); if( KnobDom.Value() == true ){ for( vector<IFR_BasicBlock>::iterator i = bblist.begin(); i != bblist.end(); i++){ fprintf(stderr,"Dominators of %p:\n\t",i->getEntryAddr()); for( set<ADDRINT>::iterator di = dom[ i->getEntryAddr() ].begin(); di != dom[ i->getEntryAddr() ].end(); di++ ){ fprintf(stderr,"%p ",*di); } fprintf(stderr,"\n"); } } hash_map<ADDRINT, ADDRINT > idom = hash_map<ADDRINT, ADDRINT >(); computeIDoms(bblist, dom, idom); if( KnobIDom.Value() == true ){ for( vector<IFR_BasicBlock>::iterator i = bblist.begin(); i != bblist.end(); i++){ fprintf(stderr,"IDom of %p: %p\n",i->getEntryAddr(), idom[i->getEntryAddr()]); } } hash_map<ADDRINT, set<ADDRINT> > df = hash_map<ADDRINT, set<ADDRINT> >(); computeDominanceFrontiers(bblist, pred, dom, idom, df ); if( KnobDF.Value() == true ){ for( vector<IFR_BasicBlock>::iterator i = bblist.begin(); i != bblist.end(); i++){ fprintf(stderr,"DF of %p:\n\t",i->getEntryAddr()); for( set<ADDRINT>::iterator di = df[ i->getEntryAddr() ].begin(); di != df[ i->getEntryAddr() ].end(); di++ ){ fprintf(stderr,"%p ",*di); } fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } hash_map<ADDRINT, hash_map< unsigned, vector<IFR_MemoryRef> > > memrefs = hash_map<ADDRINT, hash_map< unsigned, vector<IFR_MemoryRef> > >(); computeMemoryReferences(bblist, memrefs); if( KnobSSA.Value() == true ){ for( hash_map<ADDRINT, hash_map< unsigned, vector<IFR_MemoryRef> > >::iterator i = memrefs.begin(); i != memrefs.end(); i++){ cerr << "Block " << hex << i->first << dec << endl; for( hash_map< unsigned, vector<IFR_MemoryRef> >::iterator j = i->second.begin(); j != i->second.end(); j++ ){ cerr << "\tIns" << j->first << ": "; for( vector<IFR_MemoryRef>::iterator k = j->second.begin(); k != j->second.end(); k++ ){ printMemRef( *k ); cerr << ","; } cerr << endl; } } } if( KnobBlocks.Value() == true ){ for( std::vector<IFR_BasicBlock>::iterator i = bblist.begin(); i != bblist.end(); i++ ){ i->print(); fprintf(stderr,"-------------------------------------\n"); } } RTN_Close(rtn); }
VOID emit_bbl_stats_sorted(THREADID tid) { thread_data_t* tdata = get_tls(tid); // dynamic Counts // Need to lock here because we might be resize (and thus reallocing) // the statsList when we do a push_back in the instrumentation. PIN_GetLock(&bbl_list_lock,tid+1); UINT32 limit = tdata->size(); if ( limit > statsList.size() ) limit = statsList.size(); BBL_SORT_STATS* icounts = new BBL_SORT_STATS[limit]; UINT64 thread_total = 0; for(UINT32 i=0;i< limit ; i++) { BBLSTATS* b = statsList[i]; if (b) { UINT32 bcount = tdata->block_counts[i]; icounts[i]._icount = bcount * b->_ninst; icounts[i]._pc = b->_pc; icounts[i]._executions = bcount; icounts[i]._nbytes = b->_nbytes; thread_total += icounts[i]._icount; } } PIN_ReleaseLock(&bbl_list_lock); qsort(icounts, limit, sizeof(BBL_SORT_STATS), qsort_compare_fn); PIN_GetLock(&lock, tid+1); // for output *out << "# EMIT_STATS TOP BLOCKS " << stat_dump_count << " FOR TID " << tid << endl; if (limit > KnobTopBlocks.Value()) limit = KnobTopBlocks.Value(); UINT64 t =0; for(UINT32 i=0;i<limit;i++) { t+= icounts[i]._icount; *out << "BLOCK: " << setw(5) << i << " PC: " << hex << setfill('0') << setw(sizeof(ADDRINT)*2) << icounts[i]._pc << setfill(' ') << dec << " ICOUNT: " << setw(9) << icounts[i]._icount << " EXECUTIONS: " << setw(9) << icounts[i]._executions << " #BYTES: " << setw(2) << icounts[i]._nbytes << " %: " << setw(5) << setprecision(3) << 100.0*icounts[i]._icount/thread_total << " cumltv%: " << setw(5) << setprecision(3) << 100.0*t/thread_total << endl; #if defined(TARGET_IA32) || defined(TARGET_IA32E) if (KnobShowDisassembly) { string s = disassemble(icounts[i]._pc, icounts[i]._pc + icounts[i]._nbytes); *out << s << endl; } #endif } *out << "# END_STATS" << endl; PIN_ReleaseLock(&lock); delete [] icounts; }
VOID Fini(INT32 code, VOID *v) { TraceFile.open(KnobOutputFile.Value().c_str()); TraceFile << alloc_count << endl; TraceFile.close(); }
namespace pintool { //! Pin options: -script KNOB<std::string> KnobPythonModule(KNOB_MODE_WRITEONCE, "pintool", "script", "", "Python script"); //! Lock / Unlock InsertCall Trigger analysisTrigger = Trigger(); //! Snapshot engine Snapshot snapshot = Snapshot(); /* Switch lock */ static void toggleWrapper(bool flag) { PIN_LockClient(); tracer::pintool::analysisTrigger.update(flag); PIN_UnlockClient(); } /* Callback before instruction processing */ static void callbackBefore(triton::arch::Instruction* tritonInst, triton::uint8* addr, triton::uint32 size, CONTEXT* ctx, THREADID threadId) { /* Some configurations must be applied before processing */ tracer::pintool::callbacks::preProcessing(tritonInst, threadId); if (!tracer::pintool::analysisTrigger.getState() || threadId != tracer::pintool::options::targetThreadId) /* Analysis locked */ return; /* Mutex */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Setup Triton information */ tritonInst->clear(); tritonInst->setOpcode(addr, size); tritonInst->setAddress(reinterpret_cast<triton::__uint>(addr)); tritonInst->setThreadId(reinterpret_cast<triton::uint32>(threadId)); /* Disassemble the instruction */ tracer::pintool::api.disassembly(*tritonInst); /* Execute the Python callback before the IR processing */ if (tracer::pintool::context::mustBeExecuted == false) tracer::pintool::callbacks::beforeIRProc(tritonInst); else tracer::pintool::context::mustBeExecuted = false; /* Check if we must execute a new context */ if (tracer::pintool::context::mustBeExecuted == true) { tritonInst->clear(); tracer::pintool::context::executeContext(); } /* Synchronize gliches between Pintool and libTriton */ tracer::pintool::context::synchronizeContext(); /* Process the IR and spread taint only if one of both engines are enabled */ if (tracer::pintool::api.isTaintEngineEnabled() || tracer::pintool::api.isSymbolicEngineEnabled()) tracer::pintool::api.buildSemantics(*tritonInst); /* Execute the Python callback */ if (tracer::pintool::context::mustBeExecuted == false) tracer::pintool::callbacks::before(tritonInst); /* Check if we must restore the snapshot */ if (tracer::pintool::snapshot.mustBeRestored() == true) { tritonInst->clear(); tracer::pintool::snapshot.restoreSnapshot(ctx); } /* Some configurations must be applied after processing */ tracer::pintool::callbacks::postProcessing(tritonInst, threadId); /* Mutex */ PIN_UnlockClient(); } /* Callback after instruction processing */ static void callbackAfter(triton::arch::Instruction* tritonInst, CONTEXT* ctx, THREADID threadId) { if (!tracer::pintool::analysisTrigger.getState() || threadId != tracer::pintool::options::targetThreadId) /* Analysis locked */ return; /* Mutex */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Execute the Python callback */ tracer::pintool::callbacks::after(tritonInst); /* Some configurations must be applied after processing */ tracer::pintool::callbacks::postProcessing(tritonInst, threadId); /* Clear Instruction information because of the Pin's cache */ tritonInst->clear(); /* Check if we must execute a new context */ if (tracer::pintool::context::mustBeExecuted == true) tracer::pintool::context::executeContext(); /* Check if we must restore the snapshot */ if (tracer::pintool::snapshot.mustBeRestored() == true) tracer::pintool::snapshot.restoreSnapshot(ctx); /* Mutex */ PIN_UnlockClient(); } /* Save the memory access into the Triton instruction */ static void saveMemoryAccess(triton::arch::Instruction* tritonInst, triton::__uint addr, triton::uint32 size) { /* Mutex */ PIN_LockClient(); triton::uint512 value = tracer::pintool::context::getCurrentMemoryValue(addr, size); tracer::pintool::api.setConcreteMemoryValue(triton::arch::MemoryAccess(addr, size), value); /* Mutex */ PIN_UnlockClient(); } /* Callback to save bytes for the snapshot engine */ static void callbackSnapshot(triton::__uint mem, triton::uint32 writeSize) { if (!tracer::pintool::analysisTrigger.getState()) /* Analysis locked */ return; /* If the snapshot is not enable we don't save the memory */ if (tracer::pintool::snapshot.isLocked()) return; /* Mutex */ PIN_LockClient(); for (triton::uint32 i = 0; i < writeSize ; i++) tracer::pintool::snapshot.addModification(mem+i, *(reinterpret_cast<triton::uint8*>(mem+i))); /* Mutex */ PIN_UnlockClient(); } /* Callback at a routine entry */ static void callbackRoutineEntry(CONTEXT* ctx, THREADID threadId, PyObject* callback) { if (!tracer::pintool::analysisTrigger.getState() || threadId != tracer::pintool::options::targetThreadId) /* Analysis locked */ return; /* Mutex lock */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Execute the Python callback */ tracer::pintool::callbacks::routine(threadId, callback); /* Mutex unlock */ PIN_UnlockClient(); } /* Callback at a routine exit */ static void callbackRoutineExit(CONTEXT* ctx, THREADID threadId, PyObject* callback) { if (!tracer::pintool::analysisTrigger.getState() || threadId != tracer::pintool::options::targetThreadId) /* Analysis locked */ return; /* Mutex lock */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Execute the Python callback */ tracer::pintool::callbacks::routine(threadId, callback); /* Mutex unlock */ PIN_UnlockClient(); } /* Callback at the end of the execution */ static void callbackFini(int, VOID *) { /* Execute the Python callback */ tracer::pintool::callbacks::fini(); } /* Callback at a syscall entry */ static void callbackSyscallEntry(unsigned int threadId, CONTEXT* ctx, SYSCALL_STANDARD std, void* v) { if (!tracer::pintool::analysisTrigger.getState() || threadId != tracer::pintool::options::targetThreadId) /* Analysis locked */ return; /* Mutex */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Execute the Python callback */ tracer::pintool::callbacks::syscallEntry(threadId, std); /* Mutex */ PIN_UnlockClient(); } /* Callback at the syscall exit */ static void callbackSyscallExit(unsigned int threadId, CONTEXT* ctx, SYSCALL_STANDARD std, void* v) { if (!tracer::pintool::analysisTrigger.getState() || threadId != tracer::pintool::options::targetThreadId) /* Analysis locked */ return; /* Mutex */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Execute the Python callback */ tracer::pintool::callbacks::syscallExit(threadId, std); /* Mutex */ PIN_UnlockClient(); } /* * Callback when an image is loaded. * This callback must be called even outside the range analysis. */ static void callbackImageLoad(IMG img) { /* Mutex */ PIN_LockClient(); /* Collect image information */ std::string imagePath = IMG_Name(img); triton::__uint imageBase = IMG_LowAddress(img); triton::__uint imageSize = (IMG_HighAddress(img) + 1) - imageBase; /* Execute the Python callback */ tracer::pintool::callbacks::imageLoad(imagePath, imageBase, imageSize); /* Mutex */ PIN_UnlockClient(); } /* Callback when a signals occurs */ static bool callbackSignals(unsigned int threadId, int sig, CONTEXT* ctx, bool hasHandler, const EXCEPTION_INFO* pExceptInfo, void* v) { /* Mutex */ PIN_LockClient(); /* Update CTX */ tracer::pintool::context::lastContext = ctx; /* Execute the Python callback */ tracer::pintool::callbacks::signals(threadId, sig); /* Mutex */ PIN_UnlockClient(); /* * We must exit. If you don't want to exit, * you must use the restoreSnapshot() function. */ exit(0); return true; } /* Image instrumentation */ static void IMG_Instrumentation(IMG img, void *v) { /* Lock / Unlock the Analysis from a Entry point */ if (tracer::pintool::options::startAnalysisFromEntry) { tracer::pintool::options::startAnalysisFromEntry = false; /* IMG_LoadOffset(img) + IMG_Entry(img) for PIE binaries (see #524) */ tracer::pintool::options::startAnalysisFromAddress.insert(IMG_LoadOffset(img) + IMG_Entry(img)); } /* Lock / Unlock the Analysis from a symbol */ if (tracer::pintool::options::startAnalysisFromSymbol != nullptr){ RTN targetRTN = RTN_FindByName(img, tracer::pintool::options::startAnalysisFromSymbol); if (RTN_Valid(targetRTN)) { RTN_Open(targetRTN); RTN_InsertCall(targetRTN, IPOINT_BEFORE, (AFUNPTR) toggleWrapper, IARG_BOOL, true, IARG_END); RTN_InsertCall(targetRTN, IPOINT_AFTER, (AFUNPTR) toggleWrapper, IARG_BOOL, false, IARG_END); RTN_Close(targetRTN); } } /* Callback on routine entry */ std::map<const char *, PyObject *>::iterator it; for (it = tracer::pintool::options::callbackRoutineEntry.begin(); it != tracer::pintool::options::callbackRoutineEntry.end(); it++) { RTN targetRTN = RTN_FindByName(img, it->first); if (RTN_Valid(targetRTN)){ RTN_Open(targetRTN); RTN_InsertCall(targetRTN, IPOINT_BEFORE, (AFUNPTR)callbackRoutineEntry, IARG_CONTEXT, IARG_THREAD_ID, IARG_PTR, it->second, IARG_END); RTN_Close(targetRTN); } } /* Callback on routine exit */ for (it = tracer::pintool::options::callbackRoutineExit.begin(); it != tracer::pintool::options::callbackRoutineExit.end(); it++) { RTN targetRTN = RTN_FindByName(img, it->first); if (RTN_Valid(targetRTN)){ RTN_Open(targetRTN); RTN_InsertCall(targetRTN, IPOINT_AFTER, (AFUNPTR)callbackRoutineExit, IARG_CONTEXT, IARG_THREAD_ID, IARG_PTR, it->second, IARG_END); RTN_Close(targetRTN); } } /* * Callback when a new image is loaded. * This callback must be called even outside the range analysis. */ if (IMG_Valid(img)) tracer::pintool::callbackImageLoad(img); } /* Check if the analysis must be unlocked */ static bool checkUnlockAnalysis(triton::__uint address) { if (tracer::pintool::options::targetThreadId != -1) return false; /* Unlock the analysis at the entry point from symbol */ if (tracer::pintool::options::startAnalysisFromSymbol != nullptr) { if ((RTN_FindNameByAddress(address) == tracer::pintool::options::startAnalysisFromSymbol)) { tracer::pintool::options::targetThreadId = PIN_ThreadId(); tracer::pintool::toggleWrapper(true); return true; } } /* Unlock the analysis at the entry point from address */ else if (tracer::pintool::options::startAnalysisFromAddress.find(address) != tracer::pintool::options::startAnalysisFromAddress.end()) { tracer::pintool::options::targetThreadId = PIN_ThreadId(); tracer::pintool::toggleWrapper(true); return true; } /* Unlock the analysis at the entry point from offset */ else if (tracer::pintool::options::startAnalysisFromOffset.find(tracer::pintool::getInsOffset(address)) != tracer::pintool::options::startAnalysisFromOffset.end()) { tracer::pintool::options::targetThreadId = PIN_ThreadId(); tracer::pintool::toggleWrapper(true); return true; } return false; } /* Check if the instruction is blacklisted */ static bool instructionBlacklisted(triton::__uint address) { std::list<const char *>::iterator it; for (it = tracer::pintool::options::imageBlacklist.begin(); it != tracer::pintool::options::imageBlacklist.end(); it++) { if (strstr(tracer::pintool::getImageName(address).c_str(), *it)) return true; } return false; } /* Check if the instruction is whitelisted */ static bool instructionWhitelisted(triton::__uint address) { std::list<const char *>::iterator it; /* If there is no whitelist -> jit everything */ if (tracer::pintool::options::imageWhitelist.empty()) return true; for (it = tracer::pintool::options::imageWhitelist.begin(); it != tracer::pintool::options::imageWhitelist.end(); it++) { if (strstr(tracer::pintool::getImageName(address).c_str(), *it)) return true; } return false; } /* Trace instrumentation */ static void TRACE_Instrumentation(TRACE trace, VOID *v) { for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl)) { for (INS ins = BBL_InsHead(bbl); INS_Valid(ins); ins = INS_Next(ins)) { /* Check if the analysis me be unlocked */ tracer::pintool::checkUnlockAnalysis(INS_Address(ins)); if (!tracer::pintool::analysisTrigger.getState()) /* Analysis locked */ continue; if (tracer::pintool::instructionBlacklisted(INS_Address(ins)) == true || tracer::pintool::instructionWhitelisted(INS_Address(ins)) == false) /* Insruction blacklisted */ continue; /* Prepare the Triton's instruction */ triton::arch::Instruction* tritonInst = new triton::arch::Instruction(); /* Save memory read1 informations */ if (INS_IsMemoryRead(ins)) { INS_InsertCall(ins, IPOINT_BEFORE, (AFUNPTR)saveMemoryAccess, IARG_PTR, tritonInst, IARG_MEMORYREAD_EA, IARG_MEMORYREAD_SIZE, IARG_END); } /* Save memory read2 informations */ if (INS_HasMemoryRead2(ins)) { INS_InsertCall(ins, IPOINT_BEFORE, (AFUNPTR)saveMemoryAccess, IARG_PTR, tritonInst, IARG_MEMORYREAD2_EA, IARG_MEMORYREAD_SIZE, IARG_END); } /* Callback before */ INS_InsertCall(ins, IPOINT_BEFORE, (AFUNPTR)callbackBefore, IARG_PTR, tritonInst, IARG_INST_PTR, IARG_UINT32, INS_Size(ins), IARG_CONTEXT, IARG_THREAD_ID, IARG_END); /* Callback after */ /* Syscall after context must be catcher with INSERT_POINT.SYSCALL_EXIT */ if (INS_IsSyscall(ins) == false) { IPOINT where = IPOINT_AFTER; if (INS_HasFallThrough(ins) == false) where = IPOINT_TAKEN_BRANCH; INS_InsertCall(ins, where, (AFUNPTR)callbackAfter, IARG_PTR, tritonInst, IARG_CONTEXT, IARG_THREAD_ID, IARG_END); } /* I/O memory monitoring for snapshot */ if (INS_OperandCount(ins) > 1 && INS_MemoryOperandIsWritten(ins, 0)) { INS_InsertCall( ins, IPOINT_BEFORE, (AFUNPTR)callbackSnapshot, IARG_MEMORYOP_EA, 0, IARG_UINT32, INS_MemoryWriteSize(ins), IARG_END); } } } } /* Usage function */ static triton::sint32 Usage() { std::cerr << KNOB_BASE::StringKnobSummary() << std::endl; return -1; } //! The pintool's entry point int main(int argc, char *argv[]) { PIN_InitSymbols(); PIN_SetSyntaxIntel(); if(PIN_Init(argc, argv)) return Usage(); /* Init the Triton module */ triton::bindings::python::inittriton(); /* Define Triton architecure */ if (sizeof(void*) == QWORD_SIZE) tracer::pintool::api.setArchitecture(triton::arch::ARCH_X86_64); else tracer::pintool::api.setArchitecture(triton::arch::ARCH_X86); /* During the execution provide concrete values only if Triton needs them - cf #376, #632 and #645 */ tracer::pintool::api.addCallback(tracer::pintool::context::needConcreteRegisterValue); tracer::pintool::api.addCallback(tracer::pintool::context::needConcreteMemoryValue); /* Image callback */ IMG_AddInstrumentFunction(IMG_Instrumentation, nullptr); /* Instruction callback */ TRACE_AddInstrumentFunction(TRACE_Instrumentation, nullptr); /* End instrumentation callback */ PIN_AddFiniFunction(callbackFini, nullptr); /* Syscall entry callback */ PIN_AddSyscallEntryFunction(callbackSyscallEntry, nullptr); /* Syscall exit callback */ PIN_AddSyscallExitFunction(callbackSyscallExit, nullptr); /* Signals callback */ PIN_InterceptSignal(SIGHUP, callbackSignals, nullptr); PIN_InterceptSignal(SIGINT, callbackSignals, nullptr); PIN_InterceptSignal(SIGQUIT, callbackSignals, nullptr); PIN_InterceptSignal(SIGILL, callbackSignals, nullptr); PIN_InterceptSignal(SIGABRT, callbackSignals, nullptr); PIN_InterceptSignal(SIGFPE, callbackSignals, nullptr); PIN_InterceptSignal(SIGKILL, callbackSignals, nullptr); PIN_InterceptSignal(SIGSEGV, callbackSignals, nullptr); PIN_InterceptSignal(SIGPIPE, callbackSignals, nullptr); PIN_InterceptSignal(SIGALRM, callbackSignals, nullptr); PIN_InterceptSignal(SIGTERM, callbackSignals, nullptr); PIN_InterceptSignal(SIGBUS, callbackSignals, nullptr); /* Exec the Pin's python bindings */ tracer::pintool::initBindings(argc, argv); tracer::pintool::execScript(KnobPythonModule.Value().c_str()); return 0; } };
VOID Trace(TRACE trace, VOID *v) { static UINT32 basic_blocks = 0; const BOOL accurate_handling_of_predicates = KnobProfilePredicated.Value(); ADDRINT pc = TRACE_Address(trace); ADDRINT start_pc = pc; UINT32 new_blocks = 0; for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl)) { const INS head = BBL_InsHead(bbl); if (! INS_Valid(head)) continue; new_blocks++; } TRACE_InsertCall(trace, IPOINT_BEFORE, AFUNPTR(validate_bbl_count), IARG_THREAD_ID, IARG_UINT32, basic_blocks+new_blocks, IARG_END); for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl)) { const INS head = BBL_InsHead(bbl); if (! INS_Valid(head)) continue; // Summarize the stats for the bbl in a 0 terminated list // This is done at instrumentation time const UINT32 n = IndexStringLength(bbl, 1); // stats is an array of index types. We later multiply it by the // dynamic count for a block. stat_index_t *const stats = new stat_index_t[ n + 1]; stat_index_t *const stats_end = stats + (n + 1); stat_index_t *curr = stats; UINT32 ninsts = 0; for (INS ins = head; INS_Valid(ins); ins = INS_Next(ins)) { unsigned int instruction_size = INS_Size(ins); // This checks for x86-specific opcodes CheckForSpecialMarkers(ins, pc, instruction_size); // Count the number of times a predicated instruction is actually executed // this is expensive and hence disabled by default if( INS_IsPredicated(ins) && accurate_handling_of_predicates ) { INS_InsertPredicatedCall(ins, IPOINT_BEFORE, AFUNPTR(docount_predicated_true), IARG_UINT32, INS_GetIndex(ins), IARG_THREAD_ID, IARG_END); } if (KnobMapToFile) { INT32 line; string filename; PIN_GetSourceLocation(pc, NULL, &line, &filename); if (!filename.empty()) *out << "MAPADDR 0x" << hex << pc << " " << dec << line << " " << filename << endl; } curr = INS_GenerateIndexString(ins,curr,1); if (measurement == measure_opcode) curr = INS_GenerateIndexFMA(ins,curr); pc = pc + instruction_size; ninsts++; } // stats terminator *curr++ = 0; ASSERTX( curr == stats_end ); // Insert instrumentation to count the number of times the bbl is executed BBLSTATS * bblstats = new BBLSTATS(stats, start_pc, ninsts, pc-start_pc); INS_InsertCall(head, IPOINT_BEFORE, AFUNPTR(docount_bbl), IARG_FAST_ANALYSIS_CALL, IARG_UINT32, basic_blocks, IARG_THREAD_ID, IARG_END); // Remember the counter and stats so we can compute a summary at the end basic_blocks++; PIN_GetLock(&bbl_list_lock,1); statsList.push_back(bblstats); PIN_ReleaseLock(&bbl_list_lock); } }
VOID save_instrumentation_infos() { /// basic_blocks_info section json_t *bbls_info = json_object(); json_t *bbls_list = json_array(); json_t *bbl_info = json_object(); // unique_count field json_object_set_new(bbls_info, "unique_count", json_integer(basic_blocks_info.size())); // list field json_object_set_new(bbls_info, "list", bbls_list); for(BASIC_BLOCKS_INFO_T::const_iterator it = basic_blocks_info.begin(); it != basic_blocks_info.end(); ++it) { bbl_info = json_object(); json_object_set_new(bbl_info, "address", json_integer(it->first)); json_object_set_new(bbl_info, "nbins", json_integer(it->second)); json_array_append_new(bbls_list, bbl_info); } /// blacklisted_modules section json_t *blacklisted_modules = json_object(); json_t *modules_list = json_array(); // unique_count field json_object_set_new(blacklisted_modules, "unique_count", json_integer(modules_blacklisted.size())); // list field json_object_set_new(blacklisted_modules, "list", modules_list); for(MODULE_BLACKLIST_T::const_iterator it = modules_blacklisted.begin(); it != modules_blacklisted.end(); ++it) { json_t *mod_info = json_object(); json_object_set_new(mod_info, "path", json_string(it->first.c_str())); json_object_set_new(mod_info, "low_address", json_integer(it->second.first)); json_object_set_new(mod_info, "high_address", json_integer(it->second.second)); json_array_append_new(modules_list, mod_info); } /// modules section json_t *modules = json_object(); json_t *modules_list_ = json_array(); // unique_count field json_object_set_new(modules, "unique_count", json_integer(module_list.size())); // list field json_object_set_new(modules, "list", modules_list_); for(MODULE_BLACKLIST_T::const_iterator it = module_list.begin(); it != module_list.end(); ++it) { json_t *mod_info = json_object(); json_object_set_new(mod_info, "path", json_string(it->first.c_str())); json_object_set_new(mod_info, "low_address", json_integer(it->second.first)); json_object_set_new(mod_info, "high_address", json_integer(it->second.second)); json_array_append_new(modules_list_, mod_info); } /// Building the tree json_t *root = json_object(); json_object_set_new(root, "basic_blocks_info", bbls_info); json_object_set_new(root, "blacklisted_modules", blacklisted_modules); json_object_set_new(root, "modules", modules); /// Writing the report FILE* f = fopen(KnobOutputPath.Value().c_str(), "w"); json_dumpf(root, f, JSON_COMPACT | JSON_ENSURE_ASCII); fclose(f); }
static VOID ImageLoad(IMG img, VOID * v) { if (IMG_IsMainExecutable(img)) { // Instrument ChangeRegs RTN changeRegsRtn = RTN_FindByName(img, "ChangeRegs"); assert(RTN_Valid(changeRegsRtn)); RTN_Open(changeRegsRtn); REGSET regset = GetTestRegset(); for (INS ins = RTN_InsHead(changeRegsRtn); INS_Valid(ins); ins = INS_Next(ins)) { REG reg = INS_RegW(ins, 0); if (REGSET_Contains(regset, reg)) { if (KnobTestReference.Value() == "default") { INS_InsertCall(ins, IPOINT_AFTER, AFUNPTR(ChangeRegAfter), IARG_UINT32, reg, IARG_REG_REFERENCE, reg, IARG_END); } else if (KnobTestReference.Value() == "const") { INS_InsertCall(ins, IPOINT_AFTER, AFUNPTR(ChangeRegAfter), IARG_UINT32, reg, IARG_REG_CONST_REFERENCE, reg, IARG_END); } else { OutFile << "ERROR: Unknown reference requested for testing: " << KnobTestReference.Value() << endl; PIN_ExitApplication(2); // never returns } } } RTN_Close(changeRegsRtn); // When using the regular (R/W) reference, also check for correct modification of the registers. if (KnobTestReference.Value() == "default") { // Find the application's modified values in memory RTN SaveAppPointersRtn = RTN_FindByName(img, "SaveAppPointers"); assert(RTN_Valid(SaveAppPointersRtn)); RTN_Open(SaveAppPointersRtn); RTN_InsertCall(SaveAppPointersRtn, IPOINT_BEFORE, AFUNPTR(ToolSaveAppPointers), IARG_FUNCARG_ENTRYPOINT_VALUE, 0, IARG_FUNCARG_ENTRYPOINT_VALUE, 1, IARG_FUNCARG_ENTRYPOINT_VALUE, 2, IARG_FUNCARG_ENTRYPOINT_VALUE, 3, IARG_END); RTN_Close(SaveAppPointersRtn); // Instrument SaveRegsToMem RTN SaveRegsToMemRtn = RTN_FindByName(img, "SaveRegsToMem"); assert(RTN_Valid(SaveRegsToMemRtn)); RTN_Open(SaveRegsToMemRtn); RTN_InsertCall(SaveRegsToMemRtn, IPOINT_AFTER, AFUNPTR(CheckToolModifiedValues), IARG_CONTEXT, IARG_PTR, &OutFile, IARG_END); RTN_Close(SaveRegsToMemRtn); } } }
void ConfigureTool(){ Config *config = Config::getInstance(); config->INTER_WRITESET_ANALYSIS_ENABLE = KnobInterWriteSetAnalysis.Value(); config->ANTIEVASION_MODE = KnobAntiEvasion.Value(); config->ANTIEVASION_MODE_INS_PATCHING = KnobAntiEvasionINSpatcher.Value(); config->ANTIEVASION_MODE_SREAD = KnobAntiEvasionSuspiciousRead.Value(); config->ANTIEVASION_MODE_SWRITE = KnobAntiEvasionSuspiciousWrite.Value(); config->UNPACKING_MODE = KnobUnpacking.Value(); config->ADVANCED_IAT_FIX = KnobAdvancedIATFixing.Value(); config->POLYMORPHIC_CODE_PATCH = KnobPolymorphicCodePatch.Value(); config->NULLIFY_UNK_IAT_ENTRY = KnobNullyfyUnknownIATEntry.Value(); if(KnobInterWriteSetAnalysis.Value() > 1 && KnobInterWriteSetAnalysis.Value() <= Config::MAX_JUMP_INTER_WRITE_SET_ANALYSIS ){ config->WRITEINTERVAL_MAX_NUMBER_JMP = KnobInterWriteSetAnalysis.Value(); } else{ MYWARN("Invalid number of jumps to track, se to default value: 2\n"); config->WRITEINTERVAL_MAX_NUMBER_JMP = 2; // default value is 2 if we have invalid value } }
int main(int argc, char *argv[]) { // Initialize PIN library. Print help message if -h(elp) is specified // in the command line or the command line is invalid PIN_InitSymbols(); if( PIN_Init(argc,argv) ) { return Usage(); } // Register function to be called to instrument traces // PIN_AddSyscallEntryFunction(SyscallEntryF,0); // PIN_AddSyscallExitFunction(SyscallExitF,0); // TRACE_AddInstrumentFunction(TraceIns, 0); // INS_AddInstrumentFunction(Instruction,0); PIN_AddFollowChildProcessFunction(FollowChild, 0); IMG_AddInstrumentFunction(rtnInst, 0); // INS_AddInstrumentFunction(InstructionProp, 0);//for function summary TRACE_AddInstrumentFunction(Trace, 0); // Register function to be called when the application exits PIN_AddFiniFunction(Fini, 0); cerr << "===============================================" << endl; cerr << "This application is instrumented by MyPinTool" << endl; if (!KnobOutputFile.Value().empty()) { cerr << "See file " << KnobOutputFile.Value() << " for analysis results" << endl; string fileName = KnobOutputFile.Value(); out.open(fileName.c_str()); out << hex; // out.open(fileName.c_str()); // out << hex; } cerr << "===============================================" << endl; // Start the program, never returns PIN_StartProgram(); return 0; }
VOID Fini(int code, VOID * v) { std::ofstream out(KnobOutputFile.Value().c_str()); // print D-cache profile // @todo what does this print out << "PIN:MEMLATENCIES 1.0. 0x0\n"; out << "#\n" "# DCACHE stats\n" "#\n"; out << "## distribution of retention time in # of memory accesses\n"; std::map<UINT32, UINT64>::iterator I = g_hInterval.begin(), E = g_hInterval.end(); for(; I != E; ++ I) { out << I->first << ":\t" << I->second << ":\t" << I->second/(double)g_nTotalInterval << "\n"; } out << "###" << endl; out << "TotalInterval:\t" << g_nTotalInterval << endl; out << "TotalWrite:\t" << g_nTotalWrite << endl; out << "TotalWriteLong:\t" << g_nTotalWriteL << endl; out << "TotalWriteZero:\t" << g_hInterval[0] << endl; out << "TotalCycles:\t" << g_nClock << "\n"; // output localty info // 1). storing it into histogram std::map<UINT32, UINT32> hInterval; UINT count = 0; UINT countL = 0; std::map<ADDRINT, bool>::iterator K1 = g_hWriteInstIsL.begin(), K1E = g_hWriteInstIsL.end(); for(; K1 != K1E; ++ K1) { //cerr << hex << K1->first << ":" << dec << K1->second ; if( !K1->second ) { ++ count; } else { ++ countL; if( count != 0) { //cerr << "===" << count << endl; UINT32 order = OrderNum(count, 1); ++ hInterval[order]; } count = 0; } //cerr << endl; } // 2). dump locality info out << "#######localty info, total write insts: " << countL << " / " << g_hWriteInstIsL.size() << " ##########" << endl; std::map<UINT32, UINT32>::iterator K = hInterval.begin(), KE = hInterval.end(); for(; K != KE; ++ K) { out << K->first << ":\t" << K->second << endl; } out.close(); std::ofstream out1("longWrite.txt"); std::map<ADDRINT, UINT64>::iterator J = g_hWriteInstL.begin(), JE = g_hWriteInstL.end(); for(; J != JE; ++ J) { out1 << J->first << "\t" << J->second << endl; } out1.close(); }
VOID Fini(int, VOID * v) { string filename; std::ofstream out; // dump insmix profile filename = KnobOutputFile.Value(); if( KnobPid ) { filename += "." + decstr( getpid_portable() ); } out.open(filename.c_str()); out << "INSMIX 1.0 0\n"; DumpStats(out, GlobalStatsStatic, false, 0, "$static-counts"); out << endl; // dynamic Counts sort( statsList.begin(), statsList.end(), CompareLess ); statsList.push_back(0); // add terminator marker STATS DynamicRtn; UINT32 rtn_num = 0; for (vector<const BBLSTATS*>::iterator bi = statsList.begin(); bi != statsList.end(); bi++) { const BBLSTATS *b = (*bi); if( b == 0 || rtn_num != b->_rtn_num ) { if( rtn_num>0 && KnobProfileRoutines ) { DumpStats(out, DynamicRtn, false, 0, "$rtn-counts " + longstr(rtn_num, rtn_table[rtn_num]->_name) + " at " + hexstr(rtn_table[rtn_num]->_address) ); out << "#" << endl; } if( b != 0 ) { rtn_num = b->_rtn_num; DynamicRtn.Clear(); } else { break; } } for (const UINT16 * stats = b->_stats; *stats; stats++) { ASSERT( *stats < MAX_INDEX,"bad index " + decstr(*stats) + " at " + hexstr(b->_addr) + "\n" ); DynamicRtn.unpredicated[*stats] += b->_counter; GlobalStatsDynamic.unpredicated[*stats] += b->_counter; } } DumpStats(out, GlobalStatsDynamic, KnobProfilePredicated, 0, "$dynamic-counts"); out << "# $eof" << endl; out.close(); // dump bblcnt profile filename = KnobOutput2File.Value(); if( KnobPid ) { filename += "." + decstr( getpid_portable() ); } out.open(filename.c_str()); out << "BBLCOUNT 1.0 0\n"; for (vector<const BBLSTATS*>::iterator bi = statsList.begin(); bi != statsList.end(); bi++) { const BBLSTATS *b = (*bi); if (b == 0) break; // sentinel out << "0x" << hex << b->_addr << " " << dec << b->_counter << " " << b->_numins << " " << b->_size << endl; } out << "# $eof" << endl; out.close(); }
VOID Trace(TRACE trace, VOID *v) { if ( KnobNoSharedLibs.Value() && IMG_Type(SEC_Img(RTN_Sec(TRACE_Rtn(trace)))) == IMG_TYPE_SHAREDLIB) return; RTN rtn = TRACE_Rtn(trace); ADDRINT rtn_address; const char *rtn_name; UINT32 rtn_num; if (!RTN_Valid(rtn)) { //cerr << "Cannot find valid RTN for trace at address" << TRACE_Address(trace); rtn_address = 0; rtn_name = "UNKNOWN"; rtn_num = 0; } else { rtn_num = RTN_Id(rtn); rtn_address = RTN_Address(rtn); rtn_name = RTN_Name(rtn).c_str(); } map<UINT32, RTN_TABLE_ENTRY *>::const_iterator it = rtn_table.find(rtn_num); if (it == rtn_table.end()) { char *str = new char [ strlen(rtn_name) + 1]; strcpy(str, rtn_name); RTN_TABLE_ENTRY *rtn_table_entry = new RTN_TABLE_ENTRY(rtn_address, str); rtn_table[rtn_num] = rtn_table_entry; } const BOOL accurate_handling_of_predicates = KnobProfilePredicated.Value(); for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl)) { // Summarize the stats for the bbl in a 0 terminated list // This is done at instrumentation time const UINT32 n = IndexStringLength(bbl, 1); UINT16 *const stats = new UINT16[ n + 1]; UINT16 *const stats_end = stats + (n + 1); UINT16 *curr = stats; UINT32 numins = 0; UINT32 size = 0; for (INS ins = BBL_InsHead(bbl); INS_Valid(ins); ins = INS_Next(ins)) { if ((INS_IsMemoryRead(ins) || INS_IsMemoryWrite(ins)) && !INS_IsStandardMemop(ins)) continue; numins += 1; size += INS_Size(ins); // Count the number of times a predicated instruction is actually executed // this is expensive and hence disabled by default if( INS_IsPredicated(ins) && accurate_handling_of_predicates ) { INS_InsertPredicatedCall(ins, IPOINT_BEFORE, AFUNPTR(docount), IARG_FAST_ANALYSIS_CALL, IARG_PTR, &(GlobalStatsDynamic.predicated_true[INS_Opcode(ins)]), IARG_END); } curr = INS_GenerateIndexString(ins,curr,1); } // string terminator *curr++ = 0; ASSERTX( curr == stats_end ); // Insert instrumentation to count the number of times the bbl is executed BBLSTATS * bblstats = new BBLSTATS(stats, INS_Address(BBL_InsHead(bbl)), rtn_num, size, numins ); INS_InsertCall(BBL_InsHead(bbl), IPOINT_BEFORE, AFUNPTR(docount), IARG_FAST_ANALYSIS_CALL, IARG_PTR, &(bblstats->_counter), IARG_END); // Remember the counter and stats so we can compute a summary at the end statsList.push_back(bblstats); } }