Example #1
0
/* add stmt to a bb */
static /*inline*/ void stmt ( HChar cat, PCEnv* pce, IRStmt* st ) {
   if (pce->trace) {
      VG_(printf)("  %c: ", cat);
      ppIRStmt(st);
      VG_(printf)("\n");
   }
   addStmtToIRSB(pce->sb, st);
}
Example #2
0
static VG_REGPARM(3) void trace_op(IRStmt *expr, UInt one, UInt two)
{
   if (spreadTaint(expr->Ist.WrTmp.data)) {
      IRExpr* dest = IRExpr_RdTmp(expr->Ist.WrTmp.tmp);
      VG_(addToXA)(tainted, dest);
      VG_(printf)("TAINTED: ");
      ppIRStmt(expr);
      VG_(printf)("\n");
   } 
}
Example #3
0
static VG_REGPARM(2) void trace_load(Addr addr, SizeT size, IRStmt *expr)
{
   if (addr == taint || spreadTaint(expr->Ist.WrTmp.data)) {
      IRExpr* dest = IRExpr_RdTmp(expr->Ist.WrTmp.tmp);
      VG_(addToXA)(tainted, dest);
      VG_(printf)("TAINTED: ");
      ppIRStmt(expr);
      VG_(printf)("\n");
   } 
}
Example #4
0
static VG_REGPARM(2) void trace_store(Addr addr, SizeT size, IRStmt *expr, Addr last)
{
   if (addr == taint) {
      taint = 1;
   } else if (spreadTaint(expr->Ist.Store.data)) {
      IRExpr* val = NULL;
      if (VG_(lookupFM)(kvStore, NULL, (UWord*)&val,expr->Ist.Store.addr->Iex.RdTmp.tmp)) {
         VG_(sortXA)(tainted);
         VG_(addToXA)(tainted, val);
         VG_(printf)("TAINTED: ");
         ppIRStmt(expr);
         VG_(printf)("\n");
      } 
   }
}
Example #5
0
static
IRBB* cg_instrument ( IRBB* bbIn, VexGuestLayout* layout, 
                      Addr64 orig_addr_noredir, VexGuestExtents* vge,
                      IRType gWordTy, IRType hWordTy )
{
   Int        i, isize;
   IRStmt*    st;
   Addr64     cia; /* address of current insn */
   CgState    cgs;
   IRTypeEnv* tyenv = bbIn->tyenv;
   InstrInfo* curr_inode = NULL;

   if (gWordTy != hWordTy) {
      /* We don't currently support this case. */
      VG_(tool_panic)("host/guest word size mismatch");
   }

   /* Set up BB, including copying of the where-next stuff. */
   cgs.bbOut           = emptyIRBB();
   cgs.bbOut->tyenv    = dopyIRTypeEnv(tyenv);
   tl_assert( isIRAtom(bbIn->next) );
   cgs.bbOut->next     = dopyIRExpr(bbIn->next);
   cgs.bbOut->jumpkind = bbIn->jumpkind;

   // Copy verbatim any IR preamble preceding the first IMark
   i = 0;
   while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
      addStmtToIRBB( cgs.bbOut, bbIn->stmts[i] );
      i++;
   }

   // Get the first statement, and initial cia from it
   tl_assert(bbIn->stmts_used > 0);
   tl_assert(i < bbIn->stmts_used);
   st = bbIn->stmts[i];
   tl_assert(Ist_IMark == st->tag);
   cia = st->Ist.IMark.addr;

   // Set up running state and get block info
   cgs.events_used = 0;
   cgs.bbInfo      = get_BB_info(bbIn, (Addr)orig_addr_noredir);
   cgs.bbInfo_i    = 0;

   if (DEBUG_CG)
      VG_(printf)("\n\n---------- cg_instrument ----------\n");

   // Traverse the block, initialising inodes, adding events and flushing as
   // necessary.
   for (/*use current i*/; i < bbIn->stmts_used; i++) {

      st = bbIn->stmts[i];
      tl_assert(isFlatIRStmt(st));

      switch (st->tag) {
         case Ist_NoOp:
         case Ist_AbiHint:
         case Ist_Put:
         case Ist_PutI:
         case Ist_MFence:
            break;

         case Ist_IMark:
            cia   = st->Ist.IMark.addr;
            isize = st->Ist.IMark.len;

            // If Vex fails to decode an instruction, the size will be zero.
            // Pretend otherwise.
            if (isize == 0) isize = VG_MIN_INSTR_SZB;

            // Sanity-check size.
            tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB)
                     || VG_CLREQ_SZB == isize );

            // Get space for and init the inode, record it as the current one.
            // Subsequent Dr/Dw/Dm events from the same instruction will 
            // also use it.
            curr_inode = setup_InstrInfo(&cgs, cia, isize);

            addEvent_Ir( &cgs, curr_inode );
            break;

         case Ist_Tmp: {
            IRExpr* data = st->Ist.Tmp.data;
            if (data->tag == Iex_Load) {
               IRExpr* aexpr = data->Iex.Load.addr;
               // Note also, endianness info is ignored.  I guess
               // that's not interesting.
               addEvent_Dr( &cgs, curr_inode, sizeofIRType(data->Iex.Load.ty), 
                                  aexpr );
            }
            break;
         }

         case Ist_Store: {
            IRExpr* data  = st->Ist.Store.data;
            IRExpr* aexpr = st->Ist.Store.addr;
            addEvent_Dw( &cgs, curr_inode, 
                         sizeofIRType(typeOfIRExpr(tyenv, data)), aexpr );
            break;
         }

         case Ist_Dirty: {
            Int      dataSize;
            IRDirty* d = st->Ist.Dirty.details;
            if (d->mFx != Ifx_None) {
               /* This dirty helper accesses memory.  Collect the details. */
               tl_assert(d->mAddr != NULL);
               tl_assert(d->mSize != 0);
               dataSize = d->mSize;
               // Large (eg. 28B, 108B, 512B on x86) data-sized
               // instructions will be done inaccurately, but they're
               // very rare and this avoids errors from hitting more
               // than two cache lines in the simulation.
               if (dataSize > MIN_LINE_SIZE)
                  dataSize = MIN_LINE_SIZE;
               if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify)
                  addEvent_Dr( &cgs, curr_inode, dataSize, d->mAddr );
               if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify)
                  addEvent_Dw( &cgs, curr_inode, dataSize, d->mAddr );
            } else {
               tl_assert(d->mAddr == NULL);
               tl_assert(d->mSize == 0);
            }
            break;
         }

         case Ist_Exit:
            /* We may never reach the next statement, so need to flush
               all outstanding transactions now. */
            flushEvents( &cgs );
            break;

         default:
            tl_assert(0);
            break;
      }

      /* Copy the original statement */
      addStmtToIRBB( cgs.bbOut, st );

      if (DEBUG_CG) {
         ppIRStmt(st);
         VG_(printf)("\n");
      }
   }

   /* At the end of the bb.  Flush outstandings. */
   flushEvents( &cgs );

   /* done.  stay sane ... */
   tl_assert(cgs.bbInfo_i == cgs.bbInfo->n_instrs);

   if (DEBUG_CG) {
      VG_(printf)( "goto {");
      ppIRJumpKind(bbIn->jumpkind);
      VG_(printf)( "} ");
      ppIRExpr( bbIn->next );
      VG_(printf)( "}\n");
   }

   return cgs.bbOut;
}
Example #6
0
static
void collectStatementInfo(IRTypeEnv* tyenv, IRBB* bbOut, IRStmt* st,
                          Addr* instrAddr, UInt* instrLen,
                          IRExpr** loadAddrExpr, IRExpr** storeAddrExpr,
                          UInt* dataSize, IRType hWordTy)
{
    CLG_ASSERT(isFlatIRStmt(st));

    switch (st->tag) {
    case Ist_NoOp:
        break;

    case Ist_AbiHint:
        /* ABI hints aren't interesting.  Ignore. */
        break;

    case Ist_IMark:
        /* st->Ist.IMark.addr is a 64-bit int.  ULong_to_Ptr casts this
           to the host's native pointer type; if that is 32 bits then it
           discards the upper 32 bits.  If we are cachegrinding on a
           32-bit host then we are also ensured that the guest word size
           is 32 bits, due to the assertion in cg_instrument that the
           host and guest word sizes must be the same.  Hence
           st->Ist.IMark.addr will have been derived from a 32-bit guest
           code address and truncation of it is safe.  I believe this
           assignment should be correct for both 32- and 64-bit
           machines. */
        *instrAddr = (Addr)ULong_to_Ptr(st->Ist.IMark.addr);
        *instrLen =        st->Ist.IMark.len;
        break;

    case Ist_Tmp: {
        IRExpr* data = st->Ist.Tmp.data;
        if (data->tag == Iex_Load) {
            IRExpr* aexpr = data->Iex.Load.addr;
            CLG_ASSERT( isIRAtom(aexpr) );
            // Note also, endianness info is ignored.  I guess that's not
            // interesting.
            // XXX: repe cmpsb does two loads... the first one is ignored here!
            //tl_assert( NULL == *loadAddrExpr );          // XXX: ???
            *loadAddrExpr = aexpr;
            *dataSize = sizeofIRType(data->Iex.Load.ty);
        }
        break;
    }

    case Ist_Store: {
        IRExpr* data  = st->Ist.Store.data;
        IRExpr* aexpr = st->Ist.Store.addr;
        CLG_ASSERT( isIRAtom(aexpr) );
        if ( NULL == *storeAddrExpr ) {
            /* this is a kludge: ignore all except the first store from
               an instruction. */
            *storeAddrExpr = aexpr;
            *dataSize = sizeofIRType(typeOfIRExpr(tyenv, data));
        }
        break;
    }

    case Ist_Dirty: {
        IRDirty* d = st->Ist.Dirty.details;
        if (d->mFx != Ifx_None) {
            /* This dirty helper accesses memory.  Collect the
               details. */
            CLG_ASSERT(d->mAddr != NULL);
            CLG_ASSERT(d->mSize != 0);
            *dataSize = d->mSize;
            if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify)
                *loadAddrExpr = d->mAddr;
            if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify)
                *storeAddrExpr = d->mAddr;
        } else {
            CLG_ASSERT(d->mAddr == NULL);
            CLG_ASSERT(d->mSize == 0);
        }
        break;
    }

    case Ist_Put:
    case Ist_PutI:
    case Ist_MFence:
    case Ist_Exit:
        break;

    default:
        VG_(printf)("\n");
        ppIRStmt(st);
        VG_(printf)("\n");
        VG_(tool_panic)("Callgrind: unhandled IRStmt");
    }
}
Example #7
0
static
IRSB* lk_instrument ( VgCallbackClosure* closure,
                      IRSB* sbIn,
                      VexGuestLayout* layout,
                      VexGuestExtents* vge,
                      VexArchInfo* archinfo_host,
                      IRType gWordTy, IRType hWordTy )
{
   IRDirty*   di;
   Int        i;
   IRSB*      sbOut;
   HChar      fnname[100];
   IRTypeEnv* tyenv = sbIn->tyenv;
   Addr       iaddr = 0, dst;
   UInt       ilen = 0;
   Bool       condition_inverted = False;

   if (gWordTy != hWordTy) {
      /* We don't currently support this case. */
      VG_(tool_panic)("host/guest word size mismatch");
   }

   /* Set up SB */
   sbOut = deepCopyIRSBExceptStmts(sbIn);

   // Copy verbatim any IR preamble preceding the first IMark
   i = 0;
   while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) {
      addStmtToIRSB( sbOut, sbIn->stmts[i] );
      i++;
   }

   if (clo_basic_counts) {
      /* Count this superblock. */
      di = unsafeIRDirty_0_N( 0, "add_one_SB_entered",
                                 VG_(fnptr_to_fnentry)( &add_one_SB_entered ),
                                 mkIRExprVec_0() );
      addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
   }

   if (clo_trace_sbs) {
      /* Print this superblock's address. */
      di = unsafeIRDirty_0_N(
              0, "trace_superblock",
              VG_(fnptr_to_fnentry)( &trace_superblock ),
              mkIRExprVec_1( mkIRExpr_HWord( vge->base[0] ) )
           );
      addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
   }

   if (clo_trace_mem) {
      events_used = 0;
   }

   for (/*use current i*/; i < sbIn->stmts_used; i++) {
      IRStmt* st = sbIn->stmts[i];
      if (!st || st->tag == Ist_NoOp) continue;

      if (clo_basic_counts) {
         /* Count one VEX statement. */
         di = unsafeIRDirty_0_N( 0, "add_one_IRStmt",
                                    VG_(fnptr_to_fnentry)( &add_one_IRStmt ),
                                    mkIRExprVec_0() );
         addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
      }

      switch (st->tag) {
         case Ist_NoOp:
         case Ist_AbiHint:
         case Ist_Put:
         case Ist_PutI:
         case Ist_MBE:
            addStmtToIRSB( sbOut, st );
            break;

         case Ist_IMark:
            if (clo_basic_counts) {
               /* Needed to be able to check for inverted condition in Ist_Exit */
               iaddr = st->Ist.IMark.addr;
               ilen  = st->Ist.IMark.len;

               /* Count guest instruction. */
               di = unsafeIRDirty_0_N( 0, "add_one_guest_instr",
                                          VG_(fnptr_to_fnentry)( &add_one_guest_instr ),
                                          mkIRExprVec_0() );
               addStmtToIRSB( sbOut, IRStmt_Dirty(di) );

               /* An unconditional branch to a known destination in the
                * guest's instructions can be represented, in the IRSB to
                * instrument, by the VEX statements that are the
                * translation of that known destination. This feature is
                * called 'SB chasing' and can be influenced by command
                * line option --vex-guest-chase-thresh.
                *
                * To get an accurate count of the calls to a specific
                * function, taking SB chasing into account, we need to
                * check for each guest instruction (Ist_IMark) if it is
                * the entry point of a function.
                */
               tl_assert(clo_fnname);
               tl_assert(clo_fnname[0]);
               if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr,
                                            fnname, sizeof(fnname))
                   && 0 == VG_(strcmp)(fnname, clo_fnname)) {
                  di = unsafeIRDirty_0_N(
                          0, "add_one_func_call",
                             VG_(fnptr_to_fnentry)( &add_one_func_call ),
                             mkIRExprVec_0() );
                  addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
               }
            }
            if (clo_trace_mem) {
               // WARNING: do not remove this function call, even if you
               // aren't interested in instruction reads.  See the comment
               // above the function itself for more detail.
               addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ),
                            st->Ist.IMark.len );
            }
            addStmtToIRSB( sbOut, st );
            break;

         case Ist_WrTmp:
            // Add a call to trace_load() if --trace-mem=yes.
            if (clo_trace_mem) {
               IRExpr* data = st->Ist.WrTmp.data;
               if (data->tag == Iex_Load) {
                  addEvent_Dr( sbOut, data->Iex.Load.addr,
                               sizeofIRType(data->Iex.Load.ty) );
               }
            }
            if (clo_detailed_counts) {
               IRExpr* expr = st->Ist.WrTmp.data;
               IRType  type = typeOfIRExpr(sbOut->tyenv, expr);
               tl_assert(type != Ity_INVALID);
               switch (expr->tag) {
                  case Iex_Load:
                    instrument_detail( sbOut, OpLoad, type, NULL/*guard*/ );
                     break;
                  case Iex_Unop:
                  case Iex_Binop:
                  case Iex_Triop:
                  case Iex_Qop:
                  case Iex_ITE:
                     instrument_detail( sbOut, OpAlu, type, NULL/*guard*/ );
                     break;
                  default:
                     break;
               }
            }
            addStmtToIRSB( sbOut, st );
            break;

         case Ist_Store: {
            IRExpr* data = st->Ist.Store.data;
            IRType  type = typeOfIRExpr(tyenv, data);
            tl_assert(type != Ity_INVALID);
            if (clo_trace_mem) {
               addEvent_Dw( sbOut, st->Ist.Store.addr,
                            sizeofIRType(type) );
            }
            if (clo_detailed_counts) {
               instrument_detail( sbOut, OpStore, type, NULL/*guard*/ );
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_StoreG: {
            IRStoreG* sg   = st->Ist.StoreG.details;
            IRExpr*   data = sg->data;
            IRType    type = typeOfIRExpr(tyenv, data);
            tl_assert(type != Ity_INVALID);
            if (clo_trace_mem) {
               addEvent_Dw_guarded( sbOut, sg->addr,
                                    sizeofIRType(type), sg->guard );
            }
            if (clo_detailed_counts) {
               instrument_detail( sbOut, OpStore, type, sg->guard );
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_LoadG: {
            IRLoadG* lg       = st->Ist.LoadG.details;
            IRType   type     = Ity_INVALID; /* loaded type */
            IRType   typeWide = Ity_INVALID; /* after implicit widening */
            typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
            tl_assert(type != Ity_INVALID);
            if (clo_trace_mem) {
               addEvent_Dr_guarded( sbOut, lg->addr,
                                    sizeofIRType(type), lg->guard );
            }
            if (clo_detailed_counts) {
               instrument_detail( sbOut, OpLoad, type, lg->guard );
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_Dirty: {
            if (clo_trace_mem) {
               Int      dsize;
               IRDirty* d = st->Ist.Dirty.details;
               if (d->mFx != Ifx_None) {
                  // This dirty helper accesses memory.  Collect the details.
                  tl_assert(d->mAddr != NULL);
                  tl_assert(d->mSize != 0);
                  dsize = d->mSize;
                  if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify)
                     addEvent_Dr( sbOut, d->mAddr, dsize );
                  if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify)
                     addEvent_Dw( sbOut, d->mAddr, dsize );
               } else {
                  tl_assert(d->mAddr == NULL);
                  tl_assert(d->mSize == 0);
               }
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_CAS: {
            /* We treat it as a read and a write of the location.  I
               think that is the same behaviour as it was before IRCAS
               was introduced, since prior to that point, the Vex
               front ends would translate a lock-prefixed instruction
               into a (normal) read followed by a (normal) write. */
            Int    dataSize;
            IRType dataTy;
            IRCAS* cas = st->Ist.CAS.details;
            tl_assert(cas->addr != NULL);
            tl_assert(cas->dataLo != NULL);
            dataTy   = typeOfIRExpr(tyenv, cas->dataLo);
            dataSize = sizeofIRType(dataTy);
            if (cas->dataHi != NULL)
               dataSize *= 2; /* since it's a doubleword-CAS */
            if (clo_trace_mem) {
               addEvent_Dr( sbOut, cas->addr, dataSize );
               addEvent_Dw( sbOut, cas->addr, dataSize );
            }
            if (clo_detailed_counts) {
               instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ );
               if (cas->dataHi != NULL) /* dcas */
                  instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ );
               instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ );
               if (cas->dataHi != NULL) /* dcas */
                  instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ );
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_LLSC: {
            IRType dataTy;
            if (st->Ist.LLSC.storedata == NULL) {
               /* LL */
               dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result);
               if (clo_trace_mem) {
                  addEvent_Dr( sbOut, st->Ist.LLSC.addr,
                                      sizeofIRType(dataTy) );
                  /* flush events before LL, helps SC to succeed */
                  flushEvents(sbOut);
	       }
               if (clo_detailed_counts)
                  instrument_detail( sbOut, OpLoad, dataTy, NULL/*guard*/ );
            } else {
               /* SC */
               dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata);
               if (clo_trace_mem)
                  addEvent_Dw( sbOut, st->Ist.LLSC.addr,
                                      sizeofIRType(dataTy) );
               if (clo_detailed_counts)
                  instrument_detail( sbOut, OpStore, dataTy, NULL/*guard*/ );
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_Exit:
            if (clo_basic_counts) {
               // The condition of a branch was inverted by VEX if a taken
               // branch is in fact a fall trough according to client address
               tl_assert(iaddr != 0);
               dst = (sizeof(Addr) == 4) ? st->Ist.Exit.dst->Ico.U32 :
                                           st->Ist.Exit.dst->Ico.U64;
               condition_inverted = (dst == iaddr + ilen);

               /* Count Jcc */
               if (!condition_inverted)
                  di = unsafeIRDirty_0_N( 0, "add_one_Jcc",
                                          VG_(fnptr_to_fnentry)( &add_one_Jcc ),
                                          mkIRExprVec_0() );
               else
                  di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc",
                                          VG_(fnptr_to_fnentry)(
                                             &add_one_inverted_Jcc ),
                                          mkIRExprVec_0() );

               addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
            }
            if (clo_trace_mem) {
               flushEvents(sbOut);
            }

            addStmtToIRSB( sbOut, st );      // Original statement

            if (clo_basic_counts) {
               /* Count non-taken Jcc */
               if (!condition_inverted)
                  di = unsafeIRDirty_0_N( 0, "add_one_Jcc_untaken",
                                          VG_(fnptr_to_fnentry)(
                                             &add_one_Jcc_untaken ),
                                          mkIRExprVec_0() );
               else
                  di = unsafeIRDirty_0_N( 0, "add_one_inverted_Jcc_untaken",
                                          VG_(fnptr_to_fnentry)(
                                             &add_one_inverted_Jcc_untaken ),
                                          mkIRExprVec_0() );

               addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
            }
            break;

         default:
            ppIRStmt(st);
            tl_assert(0);
      }
   }

   if (clo_basic_counts) {
      /* Count this basic block. */
      di = unsafeIRDirty_0_N( 0, "add_one_SB_completed",
                                 VG_(fnptr_to_fnentry)( &add_one_SB_completed ),
                                 mkIRExprVec_0() );
      addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
   }

   if (clo_trace_mem) {
      /* At the end of the sbIn.  Flush outstandings. */
      flushEvents(sbOut);
   }

   return sbOut;
}
Example #8
0
static
IRSB* el_instrument ( VgCallbackClosure* closure,
                      IRSB* sbIn,
                      VexGuestLayout* layout,
                      VexGuestExtents* vge,
                      IRType gWordTy, IRType hWordTy )
{
    IRDirty*   di;
    Int        i;
    IRSB*      sbOut;
    Char       fnname[100];
    IRType     type;
    IRTypeEnv* tyenv = sbIn->tyenv;
    Addr       iaddr = 0, dst;
    UInt       ilen = 0;
    Bool       condition_inverted = False;

    if (gWordTy != hWordTy) {
        /* We don't currently support this case. */
        VG_(tool_panic)("host/guest word size mismatch");
    }

    /* Set up SB */
    sbOut = deepCopyIRSBExceptStmts(sbIn);

    // Copy verbatim any IR preamble preceding the first IMark
    i = 0;
    while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) {
        addStmtToIRSB( sbOut, sbIn->stmts[i] );
        i++;
    }

    events_used = 0;


    for (/*use current i*/; i < sbIn->stmts_used; i++) {
        IRStmt* st = sbIn->stmts[i];
        if (!st || st->tag == Ist_NoOp) continue;



        switch (st->tag) {
        case Ist_NoOp:
        case Ist_AbiHint:
        case Ist_Put:
        case Ist_PutI:
        case Ist_MBE:
            addStmtToIRSB( sbOut, st );
            break;

        case Ist_IMark:

            // Store the last seen address
            lastAddress = st->Ist.IMark.addr;

            addEvent_Ir( sbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ),
                         st->Ist.IMark.len );

            VG_(get_filename)(lastAddress, (char*) g_buff1, kBuffSize);

            if(VG_(strcmp)(g_buff1, clo_filename) == 0) {
                shouldInterpret = 1;
                ppIRStmt(st);
                VG_(printf)("\n");
            } else {
                shouldInterpret = 0;
            }

            addStmtToIRSB( sbOut, st );
            break;

        case Ist_WrTmp:
            // Add a call to trace_load() if --trace-mem=yes.
        {
            if(shouldInterpret) {
                IRExpr* data = st->Ist.WrTmp.data;
                if (data->tag == Iex_Load) {
                    addEvent_Dr( sbOut, data->Iex.Load.addr,
                                 sizeofIRType(data->Iex.Load.ty) );
                }

            }
            addStmtToIRSB( sbOut, st );
            break;
        }
        case Ist_Store:
        {
            if(shouldInterpret) {
                IRExpr* data  = st->Ist.Store.data;
                addEvent_Dw( sbOut, st->Ist.Store.addr,
                             sizeofIRType(typeOfIRExpr(tyenv, data)) );


            }
            addStmtToIRSB( sbOut, st );
            break;
        }
        case Ist_Dirty:
        {

            if(shouldInterpret) {
                Int      dsize;
                IRDirty* d = st->Ist.Dirty.details;
                if (d->mFx != Ifx_None) {
                    // This dirty helper accesses memory.  Collect the details.
                    tl_assert(d->mAddr != NULL);
                    tl_assert(d->mSize != 0);
                    dsize = d->mSize;
                    if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify)
                        addEvent_Dr( sbOut, d->mAddr, dsize );
                    if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify)
                        addEvent_Dw( sbOut, d->mAddr, dsize );
                } else {
                    tl_assert(d->mAddr == NULL);
                    tl_assert(d->mSize == 0);
                }
            }

            addStmtToIRSB( sbOut, st );
            break;
        }

        case Ist_CAS: {
            if(shouldInterpret) {
                Int    dataSize;
                IRType dataTy;
                IRCAS* cas = st->Ist.CAS.details;
                tl_assert(cas->addr != NULL);
                tl_assert(cas->dataLo != NULL);
                dataTy   = typeOfIRExpr(tyenv, cas->dataLo);
                dataSize = sizeofIRType(dataTy);
                if (cas->dataHi != NULL)
                    dataSize *= 2; /* since it's a doubleword-CAS */

                addEvent_Dr( sbOut, cas->addr, dataSize );
                addEvent_Dw( sbOut, cas->addr, dataSize );

            }
            addStmtToIRSB( sbOut, st );
            break;
        }

        case Ist_LLSC: {
            if(shouldInterpret) {
                IRType dataTy;
                if (st->Ist.LLSC.storedata == NULL) {
                    /* LL */
                    dataTy = typeOfIRTemp(tyenv, st->Ist.LLSC.result);

                    addEvent_Dr( sbOut, st->Ist.LLSC.addr,
                                 sizeofIRType(dataTy) );

                } else {
                    /* SC */
                    dataTy = typeOfIRExpr(tyenv, st->Ist.LLSC.storedata);

                    addEvent_Dw( sbOut, st->Ist.LLSC.addr,
                                 sizeofIRType(dataTy) );

                }
            }
            addStmtToIRSB( sbOut, st );
            break;
        }

        case Ist_Exit:
            if(shouldInterpret) {

            }

            flushEvents(sbOut);
            addStmtToIRSB( sbOut, st );      // Original statement
            break;

        default:
            tl_assert(0);
        }
    }


    /* At the end of the sbIn.  Flush outstandings. */
    flushEvents(sbOut);


    return sbOut;
}
Example #9
0
static 
Bool handleOneStatement(IRTypeEnv* tyenv, IRBB* bbOut, IRStmt* st, IRStmt* st2,
                        Addr* instrAddr, UInt* instrLen,
                        IRExpr** loadAddrExpr, IRExpr** storeAddrExpr,
                        UInt* dataSize)
{
   tl_assert(isFlatIRStmt(st));

   switch (st->tag) {
   case Ist_NoOp:
   case Ist_AbiHint:
   case Ist_Put:
   case Ist_PutI:
   case Ist_MFence:
      break;

   case Ist_Exit: {
      // This is a conditional jump.  Most of the time, we want to add the
      // instrumentation before it, to ensure it gets executed.  Eg, (1) if
      // this conditional jump is just before an IMark:
      //
      //   t108 = Not1(t107)
      //   [add instrumentation here]
      //   if (t108) goto {Boring} 0x3A96637D:I32
      //   ------ IMark(0x3A966370, 7) ------
      //
      // or (2) if this conditional jump is the last thing before the
      // block-ending unconditional jump:
      //
      //   t111 = Not1(t110)
      //   [add instrumentation here]
      //   if (t111) goto {Boring} 0x3A96637D:I32
      //   goto {Boring} 0x3A966370:I32
      //
      // One case (3) where we want the instrumentation after the conditional
      // jump is when the conditional jump is for an x86 REP instruction:
      //
      //   ------ IMark(0x3A967F13, 2) ------
      //   t1 = GET:I32(4)
      //   t6 = CmpEQ32(t1,0x0:I32) 
      //   if (t6) goto {Boring} 0x3A967F15:I32    # ignore this cond jmp
      //   t7 = Sub32(t1,0x1:I32)
      //   PUT(4) = t7
      //   ...
      //   t56 = Not1(t55)
      //   [add instrumentation here]
      //   if (t56) goto {Boring} 0x3A967F15:I32
      //
      // Therefore, we return true if the next statement is an IMark, or if
      // there is no next statement (which matches case (2), as the final
      // unconditional jump is not represented in the IRStmt list).
      //
      // Note that this approach won't do in the long run for supporting
      // PPC, but it's good enough for x86/AMD64 for the 3.0.X series.
      if (NULL == st2 || Ist_IMark == st2->tag)
         return True;
      else
         return False;
   }

   case Ist_IMark:
      /* st->Ist.IMark.addr is a 64-bit int.  ULong_to_Ptr casts this
         to the host's native pointer type; if that is 32 bits then it
         discards the upper 32 bits.  If we are cachegrinding on a
         32-bit host then we are also ensured that the guest word size
         is 32 bits, due to the assertion in cg_instrument that the
         host and guest word sizes must be the same.  Hence
         st->Ist.IMark.addr will have been derived from a 32-bit guest
         code address and truncation of it is safe.  I believe this
         assignment should be correct for both 32- and 64-bit
         machines. */
      *instrAddr = (Addr)ULong_to_Ptr(st->Ist.IMark.addr);
      *instrLen =        st->Ist.IMark.len;
      break;

   case Ist_Tmp: {
      IRExpr* data = st->Ist.Tmp.data;
      if (data->tag == Iex_Load) {
         IRExpr* aexpr = data->Iex.Load.addr;
         tl_assert( isIRAtom(aexpr) );
         // Note also, endianness info is ignored.  I guess that's not
         // interesting.
         // XXX: repe cmpsb does two loads... the first one is ignored here!
         //tl_assert( NULL == *loadAddrExpr );          // XXX: ???
         *loadAddrExpr = aexpr;
         *dataSize = sizeofIRType(data->Iex.Load.ty);
      }
      break;
   }
      
   case Ist_Store: {
      IRExpr* data  = st->Ist.Store.data;
      IRExpr* aexpr = st->Ist.Store.addr;
      tl_assert( isIRAtom(aexpr) );
      tl_assert( NULL == *storeAddrExpr );          // XXX: ???
      *storeAddrExpr = aexpr;
      *dataSize = sizeofIRType(typeOfIRExpr(tyenv, data));
      break;
   }
   
   case Ist_Dirty: {
      IRDirty* d = st->Ist.Dirty.details;
      if (d->mFx != Ifx_None) {
         /* This dirty helper accesses memory.  Collect the
            details. */
         tl_assert(d->mAddr != NULL);
         tl_assert(d->mSize != 0);
         *dataSize = d->mSize;
         if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify)
            *loadAddrExpr = d->mAddr;
         if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify)
            *storeAddrExpr = d->mAddr;
      } else {
         tl_assert(d->mAddr == NULL);
         tl_assert(d->mSize == 0);
      }
      break;
   }

   default:
      VG_(printf)("\n");
      ppIRStmt(st);
      VG_(printf)("\n");
      VG_(tool_panic)("Cachegrind: unhandled IRStmt");
   }

   return False;
}
Example #10
0
static void iselStmt ( ISelEnv* env, IRStmt* stmt )
{
   if (vex_traceflags & VEX_TRACE_VCODE) {
      vex_printf("\n-- ");
      ppIRStmt(stmt);
      vex_printf("\n");
   }
   switch (stmt->tag) {

   /* --------- STORE --------- */
   /* little-endian write to memory */
   case Ist_Store: {
       HReg   reg;
       IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
       IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
       IREndness end = stmt->Ist.Store.end;

       if (tya != Ity_I32 || end != Iend_LE) 
          goto stmt_fail;

       reg = iselIntExpr_R(env, stmt->Ist.Store.data);

       if (tyd == Ity_I8) {
	   ARMAMode2* am2 = iselIntExpr_AMode2(env, stmt->Ist.Store.addr);
	   addInstr(env, ARMInstr_StoreB(reg,am2));
	   return;
       }
       if (tyd == Ity_I16) {
	   ARMAMode3* am3 = iselIntExpr_AMode3(env, stmt->Ist.Store.addr);
	   addInstr(env, ARMInstr_StoreH(reg,am3));
	   return;
       }
       if (tyd == Ity_I32) {
	   ARMAMode2* am2 = iselIntExpr_AMode2(env, stmt->Ist.Store.addr);
	   addInstr(env, ARMInstr_StoreW(reg,am2));
	   return;
       }       
   }

   /* --------- PUT --------- */
   /* write guest state, fixed offset */
   case Ist_Put: {
       IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
       HReg reg = iselIntExpr_R(env, stmt->Ist.Put.data);

       // CAB: This anywhere near right?!
       if (tyd == Ity_I32) {
	   ARMAMode2* am2 = ARMAMode2_RI(GET_BP_REG(), stmt->Ist.Put.offset);
	   addInstr(env, ARMInstr_StoreW(reg, am2));
	   return;
       }
       if (tyd == Ity_I16) {
	   ARMAMode3* am3 = ARMAMode3_RI(GET_BP_REG(), stmt->Ist.Put.offset);
	   addInstr(env, ARMInstr_StoreH(reg, am3));
	   return;
       }
       if (tyd == Ity_I8) {
	   ARMAMode2* am2 = ARMAMode2_RI(GET_BP_REG(), stmt->Ist.Put.offset);
	   addInstr(env, ARMInstr_StoreB(reg, am2));
	   return;
       }
// CAB: Ity_I32, Ity_I16 ?
       break;
   }

   /* --------- Indexed PUT --------- */
   /* write guest state, run-time offset */
   case Ist_PutI: {
      ARMAMode2* am2
	   = genGuestArrayOffset(
	       env, stmt->Ist.PutI.descr, 
	       stmt->Ist.PutI.ix, stmt->Ist.PutI.bias );
       
       IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.PutI.data);
       
       if (tyd == Ity_I8) {
	   HReg reg = iselIntExpr_R(env, stmt->Ist.PutI.data);
	   addInstr(env, ARMInstr_StoreB(reg, am2));
	   return;
       }
// CAB: Ity_I32, Ity_I16 ?
       break;
   }

   /* --------- TMP --------- */
   /* assign value to temporary */
   case Ist_WrTmp: {
      IRTemp tmp = stmt->Ist.WrTmp.tmp;
      IRType ty = typeOfIRTemp(env->type_env, tmp);

      if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
         ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.WrTmp.data);
         HReg dst = lookupIRTemp(env, tmp);
         addInstr(env, ARMInstr_DPInstr1(ARMalu_MOV,dst,am));
         return;
      }

// CAB: Ity_I1 ?

      break;
   }

   /* --------- Call to DIRTY helper --------- */
   /* call complex ("dirty") helper function */
   case Ist_Dirty: {
     //IRType   retty;
       IRDirty* d = stmt->Ist.Dirty.details;
       Bool     passBBP = False;

      if (d->nFxState == 0)
         vassert(!d->needsBBP);

      passBBP = toBool(d->nFxState > 0 && d->needsBBP);

      /* Marshal args, do the call, clear stack. */
      doHelperCall( env, passBBP, d->guard, d->cee, d->args );

      /* Now figure out what to do with the returned value, if any. */
      if (d->tmp == IRTemp_INVALID)
	  /* No return value.  Nothing to do. */
	  return;
      
      //retty = typeOfIRTemp(env->type_env, d->tmp);

// CAB: ?     if (retty == Ity_I64) {

#if 0
      if (retty == Ity_I32 || retty == Ity_I16 || retty == Ity_I8) {
         /* The returned value is in %eax.  Park it in the register
            associated with tmp. */
         HReg dst = lookupIRTemp(env, d->tmp);
         addInstr(env, mk_iMOVsd_RR(hregX86_EAX(),dst) );
         return;
      }
#endif
      break;
   }

   /* --------- EXIT --------- */
   /* conditional exit from BB */
   case Ist_Exit: {
      ARMBranchDest* dst;
      ARMCondCode cc;
      if (stmt->Ist.Exit.dst->tag != Ico_U32)
         vpanic("isel_arm: Ist_Exit: dst is not a 32-bit value");

      // CAB: Where does jumpkind fit in ?
      // stmt->Ist.Exit.jk

      dst = iselIntExpr_BD(env, IRExpr_Const(stmt->Ist.Exit.dst));
      cc  = iselCondCode(env,stmt->Ist.Exit.guard);
      addInstr(env, ARMInstr_Branch(cc, dst));
      return;
   }

   default: break;
   }
  stmt_fail:
   ppIRStmt(stmt);
   vpanic("iselStmt");
}
/* Inject IR stmts depending on the data provided in the control
   block iricb. */
void
vex_inject_ir(IRSB *irsb, IREndness endian)
{
   IRExpr *data, *rounding_mode, *opnd1, *opnd2, *opnd3, *opnd4;

   rounding_mode = NULL;
   if (iricb.rounding_mode != NO_ROUNDING_MODE) {
      rounding_mode = mkU32(iricb.rounding_mode);
   }

   switch (iricb.num_operands) {
   case 1:
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      if (rounding_mode)
         data = binop(iricb.op, rounding_mode, opnd1);
      else
         data = unop(iricb.op, opnd1);
      break;

   case 2:
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      /* HACK, compiler warning ‘opnd2’ may be used uninitialized */
      opnd2 = opnd1;

      /* immediate_index = 0  immediate value is not used.
       * immediate_index = 2  opnd2 is an immediate value.
       */
      vassert(iricb.immediate_index == 0 || iricb.immediate_index == 2);

      if (iricb.immediate_index == 2) {
         vassert((iricb.t_opnd2 == Ity_I8) || (iricb.t_opnd2 == Ity_I16)
                 || (iricb.t_opnd2 == Ity_I32));

         /* Interpret the memory as an ULong. */
         if (iricb.immediate_type == Ity_I8) {
            opnd2 = mkU8(*((ULong *)iricb.opnd2));
         } else if (iricb.immediate_type == Ity_I16) {
            opnd2 = mkU16(*((ULong *)iricb.opnd2));
         } else if (iricb.immediate_type == Ity_I32) {
            opnd2 = mkU32(*((ULong *)iricb.opnd2));
         }
      } else {
         opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
      }

      if (rounding_mode)
         data = triop(iricb.op, rounding_mode, opnd1, opnd2);
      else
         data = binop(iricb.op, opnd1, opnd2);
      break;

   case 3:
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
      /* HACK, compiler warning ‘opnd3’ may be used uninitialized */
      opnd3 = opnd2;

      /* immediate_index = 0  immediate value is not used.
       * immediate_index = 3  opnd3 is an immediate value.
       */
      vassert(iricb.immediate_index == 0 || iricb.immediate_index == 3);

      if (iricb.immediate_index == 3) {
         vassert((iricb.t_opnd3 == Ity_I8) || (iricb.t_opnd3 == Ity_I16)
                 || (iricb.t_opnd2 == Ity_I32));

         if (iricb.immediate_type == Ity_I8) {
            opnd3 = mkU8(*((ULong *)iricb.opnd3));
         } else if (iricb.immediate_type == Ity_I16) {
            opnd3 = mkU16(*((ULong *)iricb.opnd3));
         } else if (iricb.immediate_type == Ity_I32) {
            opnd3 = mkU32(*((ULong *)iricb.opnd3));
         }
      } else {
         opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3);
      }
      if (rounding_mode)
         data = qop(iricb.op, rounding_mode, opnd1, opnd2, opnd3);
      else
         data = triop(iricb.op, opnd1, opnd2, opnd3);
      break;

   case 4:
      vassert(rounding_mode == NULL);
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
      opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3);
      /* HACK, compiler warning ‘opnd4’ may be used uninitialized */
      opnd4 = opnd3;

      /* immediate_index = 0  immediate value is not used.
       * immediate_index = 4  opnd4 is an immediate value.
       */
      vassert(iricb.immediate_index == 0 || iricb.immediate_index == 4);

      if (iricb.immediate_index == 4) {
         vassert((iricb.t_opnd3 == Ity_I8) || (iricb.t_opnd3 == Ity_I16)
                 || (iricb.t_opnd2 == Ity_I32));

         if (iricb.immediate_type == Ity_I8) {
            opnd4 = mkU8(*((ULong *)iricb.opnd4));
         } else if (iricb.immediate_type == Ity_I16) {
            opnd4 = mkU16(*((ULong *)iricb.opnd4));
         } else if (iricb.immediate_type == Ity_I32) {
            opnd4 = mkU32(*((ULong *)iricb.opnd4));
         }
      } else {
         opnd4 = load(endian, iricb.t_opnd4, iricb.opnd4);
      }
      data = qop(iricb.op, opnd1, opnd2, opnd3, opnd4);
      break;

   default:
      vpanic("unsupported operator");
   }

   store(irsb, endian, iricb.result, data);

   if (0) {
      vex_printf("BEGIN inject\n");
      if (iricb.t_result == Ity_I1 || sizeofIRType(iricb.t_result) <= 8) {
         ppIRStmt(irsb->stmts[irsb->stmts_used - 1]);
      } else if (sizeofIRType(iricb.t_result) == 16) {
         ppIRStmt(irsb->stmts[irsb->stmts_used - 2]);
         vex_printf("\n");
         ppIRStmt(irsb->stmts[irsb->stmts_used - 1]);
      }
      vex_printf("\nEND inject\n");
   }
}
Example #12
0
static
IRSB* tt_instrument ( VgCallbackClosure* closure,
                      IRSB* bb,
                      const VexGuestLayout* layout, 
                      const VexGuestExtents* vge,
                      const VexArchInfo* archinfo_host,
                      IRType gWordTy, IRType hWordTy )
{
   Int        i;
   IRSB*      sbOut;
   IRTypeEnv* tyenv = bb->tyenv;

   if (gWordTy != hWordTy) {
      /* We don't currently support this case. */
      VG_(tool_panic)("host/guest word size mismatch");
   }

   /* Set up SB */
   sbOut = deepCopyIRSBExceptStmts(bb);

   //Move to a init data structures helgrind/hg_main
   tainted = 
      VG_(newXA)(VG_(malloc), "tt_tainted", VG_(free), sizeof(IRExpr));
   VG_(setCmpFnXA)(tainted, cmp_tainted_by_reg);

   kvStore =
      VG_(newFM)(VG_(malloc), "ss_kvstore", VG_(free), cmp_key_by_reg);

   // Copy verbatim any IR preamble preceding the first IMark
   i = 0;
   while (i < bb->stmts_used && bb->stmts[i]->tag != Ist_IMark) {
      addStmtToIRSB( sbOut, bb->stmts[i] );
      i++;
   }

   // Make sure no temp writes in the un instrumented preamble
   for (int j=0; j<i; j++) {
      if (bb->stmts[j]->tag == Ist_WrTmp) {
         VG_(tool_panic)("Wrote to a temporary");
      }
   }

   tl_assert(bb->stmts[i]->tag == Ist_IMark);

   // Iterate over remaining stmts
   for (/*use current i*/; i < bb->stmts_used; i++) {
      IRStmt* st = bb->stmts[i];
      IRStmt* clone = deepMallocIRStmt(st);

      if (!st || st->tag == Ist_NoOp) continue;

      if (0) {
         ppIRStmt(st);
         VG_(printf)("\n");
      }

      switch (st->tag) {
         case Ist_WrTmp: {
            IRExpr* data = st->Ist.WrTmp.data;
            VG_(addToFM)(kvStore, st->Ist.WrTmp.tmp, (UWord) data);
            switch (data->tag) {
               case Iex_Load:
                  mkCall(sbOut, Event_Load, clone, data->Iex.Load.addr);
                  break;
               case Iex_Binop:
               case Iex_Unop:
                  mkCall(sbOut, Event_Op, clone, NULL);
                  break;
               default:
                  break;
                  VG_(tool_panic)("Unfinished");
            }
            addStmtToIRSB( sbOut, st );
            break;
         }

         case Ist_Store: {
            IRType  type = typeOfIRExpr(tyenv, st->Ist.Store.data);
            tl_assert(type != Ity_INVALID);
            mkCall(sbOut, Event_Store, clone, st->Ist.Store.addr);
            addStmtToIRSB( sbOut, st );
            break;
         }

         default:
            addStmtToIRSB( sbOut, st );
      }
   }

   return sbOut;
}
Example #13
0
IRBB* bb_to_IR ( /*OUT*/VexGuestExtents* vge,
                 /*IN*/ void*            callback_opaque,
                 /*IN*/ DisOneInstrFn    dis_instr_fn,
                 /*IN*/ UChar*           guest_code,
                 /*IN*/ Addr64           guest_IP_bbstart,
                 /*IN*/ Bool             (*chase_into_ok)(void*,Addr64),
                 /*IN*/ Bool             host_bigendian,
                 /*IN*/ VexArch          arch_guest,
                 /*IN*/ VexArchInfo*     archinfo_guest,
                 /*IN*/ IRType           guest_word_type,
                 /*IN*/ Bool             do_self_check,
                 /*IN*/ Bool             (*preamble_function)(void*,IRBB*),
                 /*IN*/ Int              offB_TISTART,
                 /*IN*/ Int              offB_TILEN )
{
   Long       delta;
   Int        i, n_instrs, first_stmt_idx;
   Bool       resteerOK, need_to_put_IP, debug_print;
   DisResult  dres;
   IRStmt*    imark;
   static Int n_resteers = 0;
   Int        d_resteers = 0;
   Int        selfcheck_idx = 0;
   IRBB*      irbb;
   Addr64     guest_IP_curr_instr;
   IRConst*   guest_IP_bbstart_IRConst = NULL;

   Bool (*resteerOKfn)(void*,Addr64) = NULL;

   debug_print = toBool(vex_traceflags & VEX_TRACE_FE);

   /* Note: for adler32 to work without % operation for the self
      check, need to limit length of stuff it scans to 5552 bytes.
      Therefore limiting the max bb len to 100 insns seems generously
      conservative. */

   /* check sanity .. */
   vassert(sizeof(HWord) == sizeof(void*));
   vassert(vex_control.guest_max_insns >= 1);
   vassert(vex_control.guest_max_insns < 100);
   vassert(vex_control.guest_chase_thresh >= 0);
   vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns);
   vassert(guest_word_type == Ity_I32 || guest_word_type == Ity_I64);

   /* Start a new, empty extent. */
   vge->n_used  = 1;
   vge->base[0] = guest_IP_bbstart;
   vge->len[0]  = 0;

   /* And a new IR BB to dump the result into. */
   irbb = emptyIRBB();

   /* Delta keeps track of how far along the guest_code array we have
      so far gone. */
   delta    = 0;
   n_instrs = 0;

   /* Guest addresses as IRConsts.  Used in the two self-checks
      generated. */
   if (do_self_check) {
      guest_IP_bbstart_IRConst
         = guest_word_type==Ity_I32 
              ? IRConst_U32(toUInt(guest_IP_bbstart))
              : IRConst_U64(guest_IP_bbstart);
   }

   /* If asked to make a self-checking translation, leave 5 spaces
      in which to put the check statements.  We'll fill them in later
      when we know the length and adler32 of the area to check. */
   if (do_self_check) {
      selfcheck_idx = irbb->stmts_used;
      addStmtToIRBB( irbb, IRStmt_NoOp() );
      addStmtToIRBB( irbb, IRStmt_NoOp() );
      addStmtToIRBB( irbb, IRStmt_NoOp() );
      addStmtToIRBB( irbb, IRStmt_NoOp() );
      addStmtToIRBB( irbb, IRStmt_NoOp() );
   }

   /* If the caller supplied a function to add its own preamble, use
      it now. */
   if (preamble_function) {
      Bool stopNow = preamble_function( callback_opaque, irbb );
      if (stopNow) {
         /* The callback has completed the IR block without any guest
            insns being disassembled into it, so just return it at
            this point, even if a self-check was requested - as there
            is nothing to self-check.  The five self-check no-ops will
            still be in place, but they are harmless. */
         return irbb;
      }
   }

   /* Process instructions. */
   while (True) {
      vassert(n_instrs < vex_control.guest_max_insns);

      /* Regardless of what chase_into_ok says, is chasing permissible
         at all right now?  Set resteerOKfn accordingly. */
      resteerOK 
         = toBool(
              n_instrs < vex_control.guest_chase_thresh
              /* If making self-checking translations, don't chase
                 .. it makes the checks too complicated.  We only want
                 to scan just one sequence of bytes in the check, not
                 a whole bunch. */
              && !do_self_check
              /* we can't afford to have a resteer once we're on the
                 last extent slot. */
              && vge->n_used < 3
           );

      resteerOKfn
         = resteerOK ? chase_into_ok : const_False;

      /* This is the IP of the instruction we're just about to deal
         with. */
      guest_IP_curr_instr = guest_IP_bbstart + delta;

      /* This is the irbb statement array index of the first stmt in
         this insn.  That will always be the instruction-mark
         descriptor. */
      first_stmt_idx = irbb->stmts_used;

      /* Add an instruction-mark statement.  We won't know until after
         disassembling the instruction how long it instruction is, so
         just put in a zero length and we'll fix it up later. */
      addStmtToIRBB( irbb, IRStmt_IMark( guest_IP_curr_instr, 0 ));

      /* for the first insn, the dispatch loop will have set
         %IP, but for all the others we have to do it ourselves. */
      need_to_put_IP = toBool(n_instrs > 0);

      /* Finally, actually disassemble an instruction. */
      dres = dis_instr_fn ( irbb,
                            need_to_put_IP,
                            resteerOKfn,
                            callback_opaque,
                            guest_code,
                            delta,
                            guest_IP_curr_instr,
                            arch_guest,
                            archinfo_guest,
                            host_bigendian );

      /* stay sane ... */
      vassert(dres.whatNext == Dis_StopHere
              || dres.whatNext == Dis_Continue
              || dres.whatNext == Dis_Resteer);
      vassert(dres.len >= 0 && dres.len <= 20);
      if (dres.whatNext != Dis_Resteer)
         vassert(dres.continueAt == 0);

      /* Fill in the insn-mark length field. */
      vassert(first_stmt_idx >= 0 && first_stmt_idx < irbb->stmts_used);
      imark = irbb->stmts[first_stmt_idx];
      vassert(imark);
      vassert(imark->tag == Ist_IMark);
      vassert(imark->Ist.IMark.len == 0);
      imark->Ist.IMark.len = toUInt(dres.len);

      /* Print the resulting IR, if needed. */
      if (vex_traceflags & VEX_TRACE_FE) {
         for (i = first_stmt_idx; i < irbb->stmts_used; i++) {
            vex_printf("              ");
            ppIRStmt(irbb->stmts[i]);
            vex_printf("\n");
         }
      }

      /* If dis_instr_fn terminated the BB at this point, check it
	 also filled in the irbb->next field. */
      if (dres.whatNext == Dis_StopHere) {
         vassert(irbb->next != NULL);
         if (debug_print) {
            vex_printf("              ");
            vex_printf( "goto {");
            ppIRJumpKind(irbb->jumpkind);
            vex_printf( "} ");
            ppIRExpr( irbb->next );
            vex_printf( "\n");
         }
      }

      /* Update the VexGuestExtents we are constructing. */
      /* If vex_control.guest_max_insns is required to be < 100 and
	 each insn is at max 20 bytes long, this limit of 5000 then
	 seems reasonable since the max possible extent length will be
	 100 * 20 == 2000. */
      vassert(vge->len[vge->n_used-1] < 5000);
      vge->len[vge->n_used-1] 
         = toUShort(toUInt( vge->len[vge->n_used-1] + dres.len ));
      n_instrs++;
      if (debug_print) 
         vex_printf("\n");

      /* Advance delta (inconspicuous but very important :-) */
      delta += (Long)dres.len;

      switch (dres.whatNext) {
         case Dis_Continue:
            vassert(irbb->next == NULL);
            if (n_instrs < vex_control.guest_max_insns) {
               /* keep going */
            } else {
               /* We have to stop. */
               irbb->next 
                  = IRExpr_Const(
                       guest_word_type == Ity_I32
                          ? IRConst_U32(toUInt(guest_IP_bbstart+delta))
                          : IRConst_U64(guest_IP_bbstart+delta)
                    );
               goto done;
            }
            break;
         case Dis_StopHere:
            vassert(irbb->next != NULL);
            goto done;
         case Dis_Resteer:
            /* Check that we actually allowed a resteer .. */
            vassert(resteerOK);
            vassert(irbb->next == NULL);
            /* figure out a new delta to continue at. */
            vassert(resteerOKfn(callback_opaque,dres.continueAt));
            delta = dres.continueAt - guest_IP_bbstart;
            /* we now have to start a new extent slot. */
            vge->n_used++;
            vassert(vge->n_used <= 3);
            vge->base[vge->n_used-1] = dres.continueAt;
            vge->len[vge->n_used-1] = 0;
            n_resteers++;
            d_resteers++;
            if (0 && (n_resteers & 0xFF) == 0)
            vex_printf("resteer[%d,%d] to 0x%llx (delta = %lld)\n",
                       n_resteers, d_resteers,
                       dres.continueAt, delta);
            break;
         default:
            vpanic("bb_to_IR");
      }
   }
   /*NOTREACHED*/
   vassert(0);

  done:
   /* We're done.  The only thing that might need attending to is that
      a self-checking preamble may need to be created. */
   if (do_self_check) {

      UInt     len2check, adler32;
      IRTemp   tistart_tmp, tilen_tmp;

      vassert(vge->n_used == 1);
      len2check = vge->len[0];
      if (len2check == 0) 
         len2check = 1;

     adler32 = genericg_compute_adler32( (HWord)guest_code, len2check );

     /* Set TISTART and TILEN.  These will describe to the despatcher
        the area of guest code to invalidate should we exit with a
        self-check failure. */

     tistart_tmp = newIRTemp(irbb->tyenv, guest_word_type);
     tilen_tmp   = newIRTemp(irbb->tyenv, guest_word_type);

     irbb->stmts[selfcheck_idx+0]
        = IRStmt_Tmp(tistart_tmp, IRExpr_Const(guest_IP_bbstart_IRConst) );

     irbb->stmts[selfcheck_idx+1]
        = IRStmt_Tmp(tilen_tmp,
                     guest_word_type==Ity_I32 
                        ? IRExpr_Const(IRConst_U32(len2check)) 
                        : IRExpr_Const(IRConst_U64(len2check))
          );

     irbb->stmts[selfcheck_idx+2]
        = IRStmt_Put( offB_TISTART, IRExpr_Tmp(tistart_tmp) );

     irbb->stmts[selfcheck_idx+3]
        = IRStmt_Put( offB_TILEN, IRExpr_Tmp(tilen_tmp) );

     irbb->stmts[selfcheck_idx+4]
        = IRStmt_Exit( 
             IRExpr_Binop( 
                Iop_CmpNE32, 
                mkIRExprCCall( 
                   Ity_I32, 
                   2/*regparms*/, 
                   "genericg_compute_adler32",
#if defined(__powerpc__) && defined(__powerpc64__)
                   (void*)((ULong*)(&genericg_compute_adler32))[0],
#else
                   &genericg_compute_adler32,
#endif
                   mkIRExprVec_2( 
                      mkIRExpr_HWord( (HWord)guest_code ), 
                      mkIRExpr_HWord( (HWord)len2check )
                   )
                ),
                IRExpr_Const(IRConst_U32(adler32))
             ),
             Ijk_TInval,
             guest_IP_bbstart_IRConst
          );
   }

   return irbb;
}
Example #14
0
/* Inject IR stmts depending on the data provided in the control
   block iricb. */
void
vex_inject_ir(IRSB *irsb, IREndness endian)
{
   IRExpr *data, *rounding_mode, *opnd1, *opnd2, *opnd3, *opnd4;

   rounding_mode = NULL;
   if (iricb.rounding_mode != NO_ROUNDING_MODE) {
      rounding_mode = mkU32(iricb.rounding_mode);
   }

   switch (iricb.num_operands) {
   case 1:
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      if (rounding_mode)
         data = binop(iricb.op, rounding_mode, opnd1);
      else
         data = unop(iricb.op, opnd1);
      break;

   case 2:
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);

      if (iricb.shift_amount_is_immediate) {
         // This implies that the IROp is a shift op
         vassert(iricb.t_opnd2 == Ity_I8);
         opnd2 = mkU8(*((Char *)iricb.opnd2));
      } else {
         opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
      }

      if (rounding_mode)
         data = triop(iricb.op, rounding_mode, opnd1, opnd2);
      else
         data = binop(iricb.op, opnd1, opnd2);
      break;

   case 3:
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
      opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3);
      if (rounding_mode)
         data = qop(iricb.op, rounding_mode, opnd1, opnd2, opnd3);
      else
         data = triop(iricb.op, opnd1, opnd2, opnd3);
      break;

   case 4:
      vassert(rounding_mode == NULL);
      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
      opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
      opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3);
      opnd4 = load(endian, iricb.t_opnd4, iricb.opnd4);
      data = qop(iricb.op, opnd1, opnd2, opnd3, opnd4);
      break;

   default:
      vpanic("unsupported operator");
   }

   store(irsb, endian, iricb.result, data);

   if (0) {
      vex_printf("BEGIN inject\n");
      if (iricb.t_result == Ity_I1 || sizeofIRType(iricb.t_result) <= 8) {
         ppIRStmt(irsb->stmts[irsb->stmts_used - 1]);
      } else if (sizeofIRType(iricb.t_result) == 16) {
         ppIRStmt(irsb->stmts[irsb->stmts_used - 2]);
         vex_printf("\n");
         ppIRStmt(irsb->stmts[irsb->stmts_used - 1]);
      }
      vex_printf("\nEND inject\n");
   }
}