Example #1
0
static
void addConstMemStoreStmt( IRBB* bbOut, UWord addr, UInt val, IRType hWordTy)
{
    addStmtToIRBB( bbOut,
                   IRStmt_Store(CLGEndness,
                                IRExpr_Const(hWordTy == Ity_I32 ?
                                        IRConst_U32( addr ) :
                                        IRConst_U64( addr )),
                                IRExpr_Const(IRConst_U32(val)) ));
}
static void
store_aux(IRSB *irsb, IREndness endian, IRExpr *addr, IRExpr *data)
{
   if (typeOfIRExpr(irsb->tyenv, data) == Ity_D64) {
      /* The insn selectors do not support writing a DFP value to memory.
         So we need to fix it here by reinterpreting the DFP value as an
         integer and storing that. */
      data = unop(Iop_ReinterpD64asI64, data);
   }
   if (typeOfIRExpr(irsb->tyenv, data) == Ity_I1) {
      /* We cannot store a single bit. So we store it in a 32-bit container.
         See also load_aux. */
      data = unop(Iop_1Uto32, data);
   }
   stmt(irsb, IRStmt_Store(endian, addr, data));
}
Example #3
0
IRStmt* pyvex_deepCopyIRStmt ( IRStmt* s )
{
   switch (s->tag) {
      case Ist_NoOp:
         return IRStmt_NoOp();
      case Ist_AbiHint:
         return IRStmt_AbiHint(pyvex_deepCopyIRExpr(s->Ist.AbiHint.base),
                               s->Ist.AbiHint.len,
                               pyvex_deepCopyIRExpr(s->Ist.AbiHint.nia));
      case Ist_IMark:
         return IRStmt_IMark(s->Ist.IMark.addr,
                             s->Ist.IMark.len,
                             s->Ist.IMark.delta);
      case Ist_Put: 
         return IRStmt_Put(s->Ist.Put.offset, 
                           pyvex_deepCopyIRExpr(s->Ist.Put.data));
      case Ist_PutI: 
         return IRStmt_PutI(pyvex_deepCopyIRPutI(s->Ist.PutI.details));
      case Ist_WrTmp:
         return IRStmt_WrTmp(s->Ist.WrTmp.tmp,
                             pyvex_deepCopyIRExpr(s->Ist.WrTmp.data));
      case Ist_Store: 
         return IRStmt_Store(s->Ist.Store.end,
                             pyvex_deepCopyIRExpr(s->Ist.Store.addr),
                             pyvex_deepCopyIRExpr(s->Ist.Store.data));
      case Ist_CAS:
         return IRStmt_CAS(pyvex_deepCopyIRCAS(s->Ist.CAS.details));
      case Ist_LLSC:
         return IRStmt_LLSC(s->Ist.LLSC.end,
                            s->Ist.LLSC.result,
                            pyvex_deepCopyIRExpr(s->Ist.LLSC.addr),
                            s->Ist.LLSC.storedata
                               ? pyvex_deepCopyIRExpr(s->Ist.LLSC.storedata)
                               : NULL);
      case Ist_Dirty: 
         return IRStmt_Dirty(pyvex_deepCopyIRDirty(s->Ist.Dirty.details));
      case Ist_MBE:
         return IRStmt_MBE(s->Ist.MBE.event);
      case Ist_Exit: 
         return IRStmt_Exit(pyvex_deepCopyIRExpr(s->Ist.Exit.guard),
                            s->Ist.Exit.jk,
                            pyvex_deepCopyIRConst(s->Ist.Exit.dst),
                            s->Ist.Exit.offsIP);
      default: 
         vpanic("pyvex_deepCopyIRStmt");
   }
}
Example #4
0
/* This version of flushEvents avoids callbacks entirely, except when the
   number of outstanding events is enough to be flushed - in which case a
   call to flush_data() is made.  In all other cases, events are handled by
   creating IR to encode and store the memory access information to the
   array of outstanding events.  */
static void flushEventsRange(IRSB* sb, Int start, Int size)
{
    // Conditionally call the flush method if there's not enough room for
    // all the new events.  This may flush an incomplete block.
    IRExpr *entries_addr = mkU64((ULong)&theEntries);
    IRExpr *entries = load(ENDIAN, Ity_I32, entries_addr);

    IRExpr *max_entries_addr = mkU64((ULong)&theMaxEntries);
    IRExpr *max_entries = load(ENDIAN, Ity_I32, max_entries_addr);

    IRDirty*   di =
        unsafeIRDirty_0_N(0,
            "flush_data", VG_(fnptr_to_fnentry)( flush_data ),
            mkIRExprVec_0() );

    di->guard =
        binop(Iop_CmpLT32S, max_entries,
                binop(Iop_Add32, entries, mkU32(size)));

    addStmtToIRSB( sb, IRStmt_Dirty(di) );

    // Reload entries since it might have been changed by the callback
    entries = load(ENDIAN, Ity_I32, entries_addr);

    // Initialize the first address where we'll write trace information.
    // This will be advanced in the loop.
    IRExpr *addr =
        binop(Iop_Add64,
                load(ENDIAN, Ity_I64, mkU64((ULong)&theBlock)),
                unop(Iop_32Uto64,
                    binop(Iop_Mul32, entries, mkU32(sizeof(MV_TraceAddr)))));

    // Grab the thread id
    IRExpr *thread = load(ENDIAN, Ity_I32, mkU64((ULong)&theThread));

    Int        i;
    for (i = start; i < start+size; i++) {

        Event*     ev = &events[i];

        uint32 type = 0;
        switch (ev->ekind) {
            case Event_Ir:
                type = MV_ShiftedInstr;
                break;
            case Event_Dr:
                type = MV_ShiftedRead;
                break;
            case Event_Dw:
            case Event_Dm:
                type = MV_ShiftedWrite;
                break;
            default:
                tl_assert(0);
        }

        type |= ev->type << MV_DataShift;
        type |= ((uint32)ev->size << MV_SizeShift);

        // Construct the address and store it
        IRExpr *data = binop(Iop_Or32, mkU32(type), thread);

        IRStmt *store;

        store = IRStmt_Store(ENDIAN, addr, ev->addr);
        addStmtToIRSB( sb, store );

        // Advance to the type
        addr = binop(Iop_Add64, addr, mkU64(sizeof(uint64)));

        store = IRStmt_Store(ENDIAN, addr, data);
        addStmtToIRSB( sb, store );

        // Advance to the next entry
        addr = binop(Iop_Add64, addr, mkU64(sizeof(MV_TraceAddr)-sizeof(uint64)));
    }

    // Store the new entry count
    IRStmt *entries_store =
        IRStmt_Store(ENDIAN, entries_addr,
                binop(Iop_Add32, entries, mkU32(size)));

    addStmtToIRSB( sb, entries_store );
}