static void addEvent_Dw ( CgState* cgs, InstrInfo* inode, Int datasize, IRAtom* ea ) { Event* lastEvt; Event* evt; tl_assert(isIRAtom(ea)); tl_assert(datasize >= 1 && datasize <= MIN_LINE_SIZE); /* Is it possible to merge this write with the preceding read? */ lastEvt = &cgs->events[cgs->events_used-1]; if (cgs->events_used > 0 && lastEvt->ekind == Event_Dr && lastEvt->datasize == datasize && lastEvt->inode == inode && eqIRAtom(lastEvt->dataEA, ea)) { lastEvt->ekind = Event_Dm; return; } /* No. Add as normal. */ if (cgs->events_used == N_EVENTS) flushEvents(cgs); tl_assert(cgs->events_used >= 0 && cgs->events_used < N_EVENTS); evt = &cgs->events[cgs->events_used]; evt->ekind = Event_Dw; evt->inode = inode; evt->datasize = datasize; evt->dataEA = ea; cgs->events_used++; }
/* Add an ordinary write event. Try to merge it with an immediately preceding ordinary read event of the same size to the same address. */ static void addEvent_Dw ( IRSB* sb, IRAtom* daddr, Int dsize ) { Event* lastEvt; Event* evt; tl_assert(clo_trace_mem); tl_assert(isIRAtom(daddr)); tl_assert(dsize >= 1 && dsize <= MAX_DSIZE); // Is it possible to merge this write with the preceding read? lastEvt = &events[events_used-1]; if (events_used > 0 && lastEvt->ekind == Event_Dr && lastEvt->size == dsize && lastEvt->guard == NULL && eqIRAtom(lastEvt->addr, daddr)) { lastEvt->ekind = Event_Dm; return; } // No. Add as normal. if (events_used == N_EVENTS) flushEvents(sb); tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Dw; evt->size = dsize; evt->addr = daddr; evt->guard = NULL; events_used++; }
static void addEvent_Dw ( IRSB* sb, char *fnname, IRAtom* daddr, Int dsize ) { Event* lastEvt; Event* evt; tl_assert(clo_trace_mem); tl_assert(isIRAtom(daddr)); tl_assert(dsize >= 1 && dsize <= MAX_DSIZE); char *buf = (char *)VG_(malloc)("addEvent_Dw",100*sizeof(char)); tl_assert(buf!=NULL); VG_(strcpy)(buf,fnname); /*// Is it possible to merge this write with the preceding read? lastEvt = &events[events_used-1]; if (events_used > 0 && lastEvt->ekind == Event_Dr && lastEvt->size == dsize && eqIRAtom(lastEvt->addr, daddr)) { lastEvt->ekind = Event_Dm; return; }*/ // No. Add as normal. tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Dw; evt->size = dsize; evt->addr = daddr; evt->fnname = buf; events_used++; //if (events_used == N_EVENTS) flushEvents(sb); }
// // RawWin32Mouse::resume // void RawWin32Mouse::resume() { mActive = true; flushEvents(); registerMouseDevice(); registerWindowProc(); }
void VrmlScene::replaceWorld( VrmlMFNode &nodes, VrmlNamespace *ns, Doc *url, Doc *urlLocal ) { theSystem->debug("replaceWorld( url %s )\n", url->url()); delete d_namespace; delete d_url; delete d_urlLocal; d_namespace = ns; d_url = url; d_urlLocal = urlLocal; // Clear bindable stacks. bindableRemoveAll( d_backgroundStack ); bindableRemoveAll( d_fogStack ); bindableRemoveAll( d_navigationInfoStack ); bindableRemoveAll( d_viewpointStack ); // Get rid of current world: pending events, nodes. flushEvents(); d_nodes.removeChildren(); // Do this to set the relative URL d_nodes.addToScene( (VrmlScene *) this, urlDoc()->url() ); // Add the nodes to a Group and put the group in the scene. // This will load EXTERNPROTOs and Inlines. d_nodes.addChildren( nodes ); // Send initial set_binds to bindable nodes double timeNow = theSystem->time(); VrmlSFBool flag(true); VrmlNode *bindable = 0; // compiler warning if (d_backgrounds->size() > 0 && (bindable = d_backgrounds->front()) != 0) bindable->eventIn( timeNow, "set_bind", &flag ); if (d_fogs->size() > 0 && (bindable = d_fogs->front()) != 0) bindable->eventIn( timeNow, "set_bind", &flag ); if (d_navigationInfos->size() > 0 && (bindable = d_navigationInfos->front()) != 0) bindable->eventIn( timeNow, "set_bind", &flag ); if (d_viewpoints->size() > 0 && (bindable = d_viewpoints->front()) != 0) bindable->eventIn( timeNow, "set_bind", &flag ); // Notify anyone interested that the world has changed doCallbacks( REPLACE_WORLD ); setModified(); }
static void addEvent_Ir ( CgState* cgs, InstrInfo* inode ) { Event* evt; if (cgs->events_used == N_EVENTS) flushEvents(cgs); tl_assert(cgs->events_used >= 0 && cgs->events_used < N_EVENTS); evt = &cgs->events[cgs->events_used]; evt->ekind = Event_Ir; evt->inode = inode; evt->datasize = 0; evt->dataEA = NULL; /*paranoia*/ cgs->events_used++; }
static void addEvent_Ir ( IRSB* sb, IRAtom* iaddr, UInt isize ) { Event* evt; tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB) || VG_CLREQ_SZB == isize ); if (events_used == N_EVENTS) flushEvents(sb); tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Ir; evt->addr = iaddr; evt->size = isize; events_used++; }
static void addEvent_Dr ( IRSB* sb, IRAtom* daddr, Int dsize ) { Event* evt; tl_assert(isIRAtom(daddr)); tl_assert(dsize >= 1 && dsize <= MAX_DSIZE); if (events_used == N_EVENTS) flushEvents(sb); tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Dr; evt->addr = daddr; evt->size = dsize; events_used++; }
static void addEvent_FnEntry ( IRSB* sb, char *fnname) { IRExpr** argv; IRDirty* di; char *buf = (char *)VG_(malloc)("addEvent_FnEntry",100*sizeof(char)); tl_assert(buf!=NULL); VG_(strcpy)(buf,fnname); argv = mkIRExprVec_1( mkIRExpr_HWord( (HWord) buf )); di = unsafeIRDirty_0_N( /*regparms*/1, "trace_fnentry", VG_(fnptr_to_fnentry)( trace_fnentry ), argv ); if(events_used > 0) flushEvents(sb); addStmtToIRSB( sb, IRStmt_Dirty(di) ); }
static void addEvent_Dr ( CgState* cgs, InstrInfo* inode, Int datasize, IRAtom* ea ) { Event* evt; tl_assert(isIRAtom(ea)); tl_assert(datasize >= 1 && datasize <= MIN_LINE_SIZE); if (cgs->events_used == N_EVENTS) flushEvents(cgs); tl_assert(cgs->events_used >= 0 && cgs->events_used < N_EVENTS); evt = &cgs->events[cgs->events_used]; evt->ekind = Event_Dr; evt->inode = inode; evt->datasize = datasize; evt->dataEA = ea; cgs->events_used++; }
/* Add a guarded write event. */ static void addEvent_Dw_guarded ( IRSB* sb, IRAtom* daddr, Int dsize, IRAtom* guard ) { Event* evt; tl_assert(clo_trace_mem); tl_assert(isIRAtom(daddr)); tl_assert(dsize >= 1 && dsize <= MAX_DSIZE); if (events_used == N_EVENTS) flushEvents(sb); tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Dw; evt->addr = daddr; evt->size = dsize; evt->guard = guard; events_used++; }
// WARNING: If you aren't interested in instruction reads, you can omit the // code that adds calls to trace_instr() in flushEvents(). However, you // must still call this function, addEvent_Ir() -- it is necessary to add // the Ir events to the events list so that merging of paired load/store // events into modify events works correctly. static void addEvent_Ir ( IRSB* sb, IRAtom* iaddr, UInt isize ) { /* Not tracking modifies. So Excluded this function */ return; Event* evt; tl_assert(clo_trace_mem); tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB) || VG_CLREQ_SZB == isize ); if (events_used == N_EVENTS) flushEvents(sb); tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Ir; evt->addr = iaddr; evt->size = isize; evt->fnname = "Instr"; events_used++; }
static void addEvent_RegW ( IRSB* sb, char *fnname, Int reg_no, IRAtom* tmp_val) { IRExpr** argv; IRDirty* di; tl_assert(clo_trace_mem); tl_assert(isIRAtom(tmp_val)); char *buf = (char *)VG_(malloc)("addEvent_RegW",100*sizeof(char)); tl_assert(buf!=NULL); VG_(strcpy)(buf,fnname); argv = mkIRExprVec_3( mkIRExpr_HWord( (HWord) buf ), mkIRExpr_HWord( reg_no ), tmp_val ); di = unsafeIRDirty_0_N( /*regparms*/3, "trace_regw", VG_(fnptr_to_fnentry)( trace_regw ), argv ); if(events_used > 0) flushEvents(sb); addStmtToIRSB( sb, IRStmt_Dirty(di) ); }
static void addEvent_Dr ( IRSB* sb, char *fnname, IRAtom* daddr, Int dsize ) { Event* evt; tl_assert(clo_trace_mem); tl_assert(isIRAtom(daddr)); tl_assert(dsize >= 1 && dsize <= MAX_DSIZE); char *buf = (char *)VG_(malloc)("addEvent_Dr",100*sizeof(char)); tl_assert(buf!=NULL); VG_(strcpy)(buf,fnname); tl_assert(events_used >= 0 && events_used < N_EVENTS); evt = &events[events_used]; evt->ekind = Event_Dr; evt->addr = daddr; evt->size = dsize; evt->fnname = buf; events_used++; //if (events_used == N_EVENTS) flushEvents(sb); }
static IRBB* cg_instrument ( IRBB* bbIn, VexGuestLayout* layout, Addr64 orig_addr_noredir, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { Int i, isize; IRStmt* st; Addr64 cia; /* address of current insn */ CgState cgs; IRTypeEnv* tyenv = bbIn->tyenv; InstrInfo* curr_inode = NULL; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up BB, including copying of the where-next stuff. */ cgs.bbOut = emptyIRBB(); cgs.bbOut->tyenv = dopyIRTypeEnv(tyenv); tl_assert( isIRAtom(bbIn->next) ); cgs.bbOut->next = dopyIRExpr(bbIn->next); cgs.bbOut->jumpkind = bbIn->jumpkind; // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRBB( cgs.bbOut, bbIn->stmts[i] ); i++; } // Get the first statement, and initial cia from it tl_assert(bbIn->stmts_used > 0); tl_assert(i < bbIn->stmts_used); st = bbIn->stmts[i]; tl_assert(Ist_IMark == st->tag); cia = st->Ist.IMark.addr; // Set up running state and get block info cgs.events_used = 0; cgs.bbInfo = get_BB_info(bbIn, (Addr)orig_addr_noredir); cgs.bbInfo_i = 0; if (DEBUG_CG) VG_(printf)("\n\n---------- cg_instrument ----------\n"); // Traverse the block, initialising inodes, adding events and flushing as // necessary. for (/*use current i*/; i < bbIn->stmts_used; i++) { st = bbIn->stmts[i]; tl_assert(isFlatIRStmt(st)); switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MFence: break; case Ist_IMark: cia = st->Ist.IMark.addr; isize = st->Ist.IMark.len; // If Vex fails to decode an instruction, the size will be zero. // Pretend otherwise. if (isize == 0) isize = VG_MIN_INSTR_SZB; // Sanity-check size. tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB) || VG_CLREQ_SZB == isize ); // Get space for and init the inode, record it as the current one. // Subsequent Dr/Dw/Dm events from the same instruction will // also use it. curr_inode = setup_InstrInfo(&cgs, cia, isize); addEvent_Ir( &cgs, curr_inode ); break; case Ist_Tmp: { IRExpr* data = st->Ist.Tmp.data; if (data->tag == Iex_Load) { IRExpr* aexpr = data->Iex.Load.addr; // Note also, endianness info is ignored. I guess // that's not interesting. addEvent_Dr( &cgs, curr_inode, sizeofIRType(data->Iex.Load.ty), aexpr ); } break; } case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRExpr* aexpr = st->Ist.Store.addr; addEvent_Dw( &cgs, curr_inode, sizeofIRType(typeOfIRExpr(tyenv, data)), aexpr ); break; } case Ist_Dirty: { Int dataSize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { /* This dirty helper accesses memory. Collect the details. */ tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dataSize = d->mSize; // Large (eg. 28B, 108B, 512B on x86) data-sized // instructions will be done inaccurately, but they're // very rare and this avoids errors from hitting more // than two cache lines in the simulation. if (dataSize > MIN_LINE_SIZE) dataSize = MIN_LINE_SIZE; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( &cgs, curr_inode, dataSize, d->mAddr ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( &cgs, curr_inode, dataSize, d->mAddr ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } break; } case Ist_Exit: /* We may never reach the next statement, so need to flush all outstanding transactions now. */ flushEvents( &cgs ); break; default: tl_assert(0); break; } /* Copy the original statement */ addStmtToIRBB( cgs.bbOut, st ); if (DEBUG_CG) { ppIRStmt(st); VG_(printf)("\n"); } } /* At the end of the bb. Flush outstandings. */ flushEvents( &cgs ); /* done. stay sane ... */ tl_assert(cgs.bbInfo_i == cgs.bbInfo->n_instrs); if (DEBUG_CG) { VG_(printf)( "goto {"); ppIRJumpKind(bbIn->jumpkind); VG_(printf)( "} "); ppIRExpr( bbIn->next ); VG_(printf)( "}\n"); } return cgs.bbOut; }
// // ISDL12KeyboardInputDevice::reset // void ISDL12KeyboardInputDevice::reset() { flushEvents(); }
// // ISDL12MouseInputDevice::reset // void ISDL12MouseInputDevice::reset() { flushEvents(); center(); }
// // ISDL12JoystickInputDevice::reset // void ISDL12JoystickInputDevice::reset() { flushEvents(); }
static IRSB* lk_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; HChar fnname[100]; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_basic_counts) { /* Count this superblock. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_entered", VG_(fnptr_to_fnentry)( &add_one_SB_entered ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_sbs) { /* Print this superblock's address. */ di = unsafeIRDirty_0_N( 0, "trace_superblock", VG_(fnptr_to_fnentry)( &trace_superblock ), mkIRExprVec_1( mkIRExpr_HWord( vge->base[0] ) ) ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; if (clo_basic_counts) { /* Count one VEX statement. */ di = unsafeIRDirty_0_N( 0, "add_one_IRStmt", VG_(fnptr_to_fnentry)( &add_one_IRStmt ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: if (clo_basic_counts) { /* Needed to be able to check for inverted condition in Ist_Exit */ iaddr = st->Ist.IMark.addr; ilen = st->Ist.IMark.len; /* Count guest instruction. */ di = unsafeIRDirty_0_N( 0, "add_one_guest_instr", VG_(fnptr_to_fnentry)( &add_one_guest_instr ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); /* An unconditional branch to a known destination in the * guest's instructions can be represented, in the IRSB to * instrument, by the VEX statements that are the * translation of that known destination. This feature is * called 'SB chasing' and can be influenced by command * line option --vex-guest-chase-thresh. * * To get an accurate count of the calls to a specific * function, taking SB chasing into account, we need to * check for each guest instruction (Ist_IMark) if it is * the entry point of a function. */ tl_assert(clo_fnname); tl_assert(clo_fnname[0]); if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr, fnname, sizeof(fnname)) && 0 == VG_(strcmp)(fnname, clo_fnname)) { di = unsafeIRDirty_0_N( 0, "add_one_func_call", VG_(fnptr_to_fnentry)( &add_one_func_call ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } if (clo_detailed_counts) { IRExpr* expr = st->Ist.WrTmp.data; IRType type = typeOfIRExpr(sbOut->tyenv, expr); tl_assert(type != Ity_INVALID); switch (expr->tag) { case Iex_Load: instrument_detail( sbOut, OpLoad, type, NULL/*guard*/ ); break; case Iex_Unop: case Iex_Binop: case Iex_Triop: case Iex_Qop: case Iex_ITE: instrument_detail( sbOut, OpAlu, type, NULL/*guard*/ ); break; default: break; } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(type) ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_StoreG: { IRStoreG* sg = st->Ist.StoreG.details; IRExpr* data = sg->data; IRType type = typeOfIRExpr(tyenv, data); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dw_guarded( sbOut, sg->addr, sizeofIRType(type), sg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpStore, type, sg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LoadG: { IRLoadG* lg = st->Ist.LoadG.details; IRType type = Ity_INVALID; /* loaded type */ IRType typeWide = Ity_INVALID; /* after implicit widening */ typeOfIRLoadGOp(lg->cvt, &typeWide, &type); tl_assert(type != Ity_INVALID); if (clo_trace_mem) { addEvent_Dr_guarded( sbOut, lg->addr, sizeofIRType(type), lg->guard ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, type, lg->guard ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ if (clo_trace_mem) { addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } if (clo_detailed_counts) { instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); if (cas->dataHi != NULL) /* dcas */ instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) { addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); /* flush events before LL, helps SC to succeed */ flushEvents(sbOut); } if (clo_detailed_counts) instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); if (clo_detailed_counts) instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_basic_counts) { // The condition of a branch was inverted by VEX if a taken // branch is in fact a fall trough according to client address tl_assert(iaddr != 0); dst = (sizeof(Addr) == 4) ? st->Ist.Exit.dst->Ico.U32 : st->Ist.Exit.dst->Ico.U64; condition_inverted = (dst == iaddr + ilen); /* Count Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc", VG_(fnptr_to_fnentry)( &add_one_Jcc ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement if (clo_basic_counts) { /* Count non-taken Jcc */ if (!condition_inverted) di = unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_Jcc_untaken ), mkIRExprVec_0() ); else di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc_untaken", VG_(fnptr_to_fnentry)( &add_one_inverted_Jcc_untaken ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } break; default: ppIRStmt(st); tl_assert(0); } } if (clo_basic_counts) { /* Count this basic block. */ di = unsafeIRDirty_0_N( 0, "add_one_SB_completed", VG_(fnptr_to_fnentry)( &add_one_SB_completed ), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
/* This is copied mostly verbatim from lackey */ static IRSB* mv_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, VexArchInfo* archinfo_host, IRType gWordTy, IRType hWordTy ) { Int i; IRSB* sbOut; IRTypeEnv* tyenv = sbIn->tyenv; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } //ppIRSB(sbIn); /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: canCreateModify = False; if (clo_trace_instrs) { addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty), IRTypeToMVType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)), IRTypeToMVType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize, MV_DataInt32 ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize, MV_DataInt32 ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize, IRTypeToMVType(dataTy) ); addEvent_Dw( sbOut, cas->addr, dataSize, IRTypeToMVType(dataTy) ); addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy), IRTypeToMVType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy), IRTypeToMVType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }
void EventQueue::onBeat(jsi::Runtime &runtime) const { flushEvents(runtime); flushStateUpdates(); }
static IRSB* el_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; Char fnname[100]; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; Addr iaddr = 0, dst; UInt ilen = 0; Bool condition_inverted = False; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } events_used = 0; for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_Put: case Ist_PutI: case Ist_MBE: addStmtToIRSB( sbOut, st ); break; case Ist_IMark: // Store the last seen address lastAddress = st->Ist.IMark.addr; addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); VG_(get_filename)(lastAddress, (char*) g_buff1, kBuffSize); if(VG_(strcmp)(g_buff1, clo_filename) == 0) { shouldInterpret = 1; ppIRStmt(st); VG_(printf)("\n"); } else { shouldInterpret = 0; } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. { if(shouldInterpret) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Store: { if(shouldInterpret) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Dirty: { if(shouldInterpret) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { if(shouldInterpret) { Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (cas->dataHi != NULL) dataSize *= 2; /* since it's a doubleword-CAS */ addEvent_Dr( sbOut, cas->addr, dataSize ); addEvent_Dw( sbOut, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { if(shouldInterpret) { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); addEvent_Dr( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); addEvent_Dw( sbOut, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if(shouldInterpret) { } flushEvents(sbOut); addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); return sbOut; }
void Progress::update (double status) { win_.getProgressBar()->set_fraction (status); flushEvents (); }
void Progress::update () { win_.getProgressBar()->pulse (); flushEvents (); }
static IRSB* sh_instrument ( VgCallbackClosure* closure, IRSB* sbIn, VexGuestLayout* layout, VexGuestExtents* vge, IRType gWordTy, IRType hWordTy ) { IRDirty* di; Int i; IRSB* sbOut; IRType type; IRTypeEnv* tyenv = sbIn->tyenv; IRStmt* imarkst; char *fnname = global_fnname; if (gWordTy != hWordTy) { /* We don't currently support this case. */ VG_(tool_panic)("host/guest word size mismatch"); } /* Set up SB */ sbOut = deepCopyIRSBExceptStmts(sbIn); // Copy verbatim any IR preamble preceding the first IMark i = 0; while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) { addStmtToIRSB( sbOut, sbIn->stmts[i] ); i++; } if (clo_trace_mem) { events_used = 0; } for (/*use current i*/; i < sbIn->stmts_used; i++) { IRStmt* st = sbIn->stmts[i]; if (!st || st->tag == Ist_NoOp) continue; /* Prettyprint All IR statements Starting from main that valgrind has generated */ /*if(clo_trace_mem){ ppIRStmt(st); VG_(printf)("\n"); } */ switch (st->tag) { case Ist_NoOp: case Ist_AbiHint: case Ist_PutI: case Ist_MBE: break; case Ist_Put: if(clo_trace_mem){ Int reg_no = st->Ist.Put.offset; if(reg_no == layout->offset_SP || reg_no == 20){ IRExpr* data = st->Ist.Put.data; if(data->tag == Iex_RdTmp){ /* Add registerwrite instrumentation to IRSBout */ addEvent_RegW( sbOut, fnname, reg_no, data); } } } addStmtToIRSB( sbOut, st ); break; case Ist_IMark: imarkst = st; if (VG_(get_fnname_if_entry)( st->Ist.IMark.addr, fnname, 100)){ //VG_(printf)("-- %s --\n",fnname); if(0 == VG_(strcmp)(fnname, "main")) { di = unsafeIRDirty_0_N( 0, "trace_debug", VG_(fnptr_to_fnentry)(trace_debug), mkIRExprVec_0() ); addStmtToIRSB( sbOut, IRStmt_Dirty(di) ); //VG_(printf)("SP:%d\n",layout->offset_SP); clo_trace_mem = True; } if(clo_trace_mem){ addEvent_FnEntry(sbOut, fnname); } } if (clo_trace_mem) { // WARNING: do not remove this function call, even if you // aren't interested in instruction reads. See the comment // above the function itself for more detail. addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ), st->Ist.IMark.len ); } addStmtToIRSB( sbOut, st ); break; case Ist_WrTmp: // Add a call to trace_load() if --trace-mem=yes. if (clo_trace_mem) { IRExpr* data = st->Ist.WrTmp.data; if (data->tag == Iex_Load) { addEvent_Dr( sbOut, fnname, data->Iex.Load.addr, sizeofIRType(data->Iex.Load.ty) ); } } addStmtToIRSB( sbOut, st ); break; case Ist_Store: if (clo_trace_mem) { IRExpr* data = st->Ist.Store.data; addEvent_Dw( sbOut, fnname, st->Ist.Store.addr, sizeofIRType(typeOfIRExpr(tyenv, data)) ); } addStmtToIRSB( sbOut, st ); break; case Ist_Dirty: { if (clo_trace_mem) { Int dsize; IRDirty* d = st->Ist.Dirty.details; if (d->mFx != Ifx_None) { // This dirty helper accesses memory. Collect the details. tl_assert(d->mAddr != NULL); tl_assert(d->mSize != 0); dsize = d->mSize; if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) addEvent_Dr( sbOut, fnname, d->mAddr, dsize ); if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) addEvent_Dw( sbOut, fnname, d->mAddr, dsize ); } else { tl_assert(d->mAddr == NULL); tl_assert(d->mSize == 0); } } addStmtToIRSB( sbOut, st ); break; } case Ist_CAS: { /* We treat it as a read and a write of the location. I think that is the same behaviour as it was before IRCAS was introduced, since prior to that point, the Vex front ends would translate a lock-prefixed instruction into a (normal) read followed by a (normal) write. */ Int dataSize; IRType dataTy; IRCAS* cas = st->Ist.CAS.details; tl_assert(cas->addr != NULL); tl_assert(cas->dataLo != NULL); dataTy = typeOfIRExpr(tyenv, cas->dataLo); dataSize = sizeofIRType(dataTy); if (clo_trace_mem) { addEvent_Dr( sbOut, fnname, cas->addr, dataSize ); addEvent_Dw( sbOut, fnname, cas->addr, dataSize ); } addStmtToIRSB( sbOut, st ); break; } case Ist_LLSC: { IRType dataTy; if (st->Ist.LLSC.storedata == NULL) { /* LL */ dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result); if (clo_trace_mem) addEvent_Dr( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } else { /* SC */ dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata); if (clo_trace_mem) addEvent_Dw( sbOut, fnname, st->Ist.LLSC.addr, sizeofIRType(dataTy) ); } addStmtToIRSB( sbOut, st ); break; } case Ist_Exit: if (clo_trace_mem) { flushEvents(sbOut); } addStmtToIRSB( sbOut, st ); // Original statement break; default: tl_assert(0); } } if(clo_trace_mem){ if(sbIn->jumpkind == Ijk_Ret){ VG_(get_fnname)(imarkst->Ist.IMark.addr, fnname, 100); addEvent_FnExit(sbOut,fnname); } } if (clo_trace_mem) { /* At the end of the sbIn. Flush outstandings. */ flushEvents(sbOut); } return sbOut; }
/** * \brief flushes all the input events */ void CInput::flushAll() { flushKeys(); flushCommands(); flushEvents(); }