예제 #1
0
/* See if wv is contained within wsu.  If so, deallocate wv and return
   the index of the already-present copy.  If not, add wv to both the
   vec2ix and ix2vec mappings and return its index. 
*/
static WordSet add_or_dealloc_WordVec( WordSetU* wsu, WordVec* wv_new )
{
   Bool     have;
   WordVec* wv_old;
   Word/*Set*/ ix_old = -1;
   /* Really WordSet, but need something that can safely be casted to
      a Word* in the lookupFM.  Making it WordSet (which is 32 bits)
      causes failures on a 64-bit platform. */
   tl_assert(wv_new->owner == wsu);
   have = HG_(lookupFM)( wsu->vec2ix, 
                         (Word*)&wv_old, (Word*)&ix_old,
                         (Word)wv_new );
   if (have) {
      tl_assert(wv_old != wv_new);
      tl_assert(wv_old);
      tl_assert(wv_old->owner == wsu);
      tl_assert(ix_old < wsu->ix2vec_used);
      tl_assert(wsu->ix2vec[ix_old] == wv_old);
      delete_WV( wv_new );
      return (WordSet)ix_old;
   } else {
      ensure_ix2vec_space( wsu );
      tl_assert(wsu->ix2vec);
      tl_assert(wsu->ix2vec_used < wsu->ix2vec_size);
      wsu->ix2vec[wsu->ix2vec_used] = wv_new;
      HG_(addToFM)( wsu->vec2ix, (Word)wv_new, (Word)wsu->ix2vec_used );
      if (0) VG_(printf)("aodW %d\n", (Int)wsu->ix2vec_used );
      wsu->ix2vec_used++;
      tl_assert(wsu->ix2vec_used <= wsu->ix2vec_size);
      return (WordSet)(wsu->ix2vec_used - 1);
   }
}
예제 #2
0
void HG_(record_error_LockOrder)(
    Thread*     thr,
    Lock*       shouldbe_earlier_lk,
    Lock*       shouldbe_later_lk,
    ExeContext* shouldbe_earlier_ec,
    ExeContext* shouldbe_later_ec,
    ExeContext* actual_earlier_ec
)
{
    XError xe;
    tl_assert( HG_(is_sane_Thread)(thr) );
    tl_assert(HG_(clo_track_lockorders));
    init_XError(&xe);
    xe.tag = XE_LockOrder;
    xe.XE.LockOrder.thr       = thr;
    xe.XE.LockOrder.shouldbe_earlier_lk
        = mk_LockP_from_LockN(shouldbe_earlier_lk,
                              False/*!allowed_to_be_invalid*/);
    xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
    xe.XE.LockOrder.shouldbe_later_lk
        = mk_LockP_from_LockN(shouldbe_later_lk,
                              False/*!allowed_to_be_invalid*/);
    xe.XE.LockOrder.shouldbe_later_ec   = shouldbe_later_ec;
    xe.XE.LockOrder.actual_earlier_ec   = actual_earlier_ec;
    // FIXME: tid vs thr
    tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
    tl_assert( thr->coretid != VG_INVALID_THREADID );
    VG_(maybe_record_error)( thr->coretid,
                             XE_LockOrder, 0, NULL, &xe );
}
예제 #3
0
void HG_(record_error_Race) ( Thread* thr,
                              Addr data_addr, Int szB, Bool isWrite,
                              Thread* h1_ct,
                              ExeContext* h1_ct_segstart,
                              ExeContext* h1_ct_mbsegendEC )
{
    XError xe;
    tl_assert( HG_(is_sane_Thread)(thr) );

#  if defined(VGO_linux)
    /* Skip any races on locations apparently in GOTPLT sections.  This
       is said to be caused by ld.so poking PLT table entries (or
       whatever) when it writes the resolved address of a dynamically
       linked routine, into the table (or whatever) when it is called
       for the first time. */
    {
        VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, data_addr );
        if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
                               data_addr, VG_(pp_SectKind)(sect));
        /* SectPLT is required on ???-linux */
        if (sect == Vg_SectGOTPLT) return;
        /* SectPLT is required on ppc32/64-linux */
        if (sect == Vg_SectPLT) return;
        /* SectGOT is required on arm-linux */
        if (sect == Vg_SectGOT) return;
    }
#  endif

    init_XError(&xe);
    xe.tag = XE_Race;
    xe.XE.Race.data_addr   = data_addr;
    xe.XE.Race.szB         = szB;
    xe.XE.Race.isWrite     = isWrite;
    xe.XE.Race.thr         = thr;
    tl_assert(isWrite == False || isWrite == True);
    tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
    /* Skip on the detailed description of the raced-on address at this
       point; it's expensive.  Leave it for the update_extra function
       if we ever make it that far. */
    xe.XE.Race.data_addrinfo.tag = Addr_Undescribed;
    // FIXME: tid vs thr
    // Skip on any of the conflicting-access info at this point.
    // It's expensive to obtain, and this error is more likely than
    // not to be discarded.  We'll fill these fields in in
    // HG_(update_extra) just above, assuming the error ever makes
    // it that far (unlikely).
    xe.XE.Race.h2_ct_accSzB = 0;
    xe.XE.Race.h2_ct_accIsW = False;
    xe.XE.Race.h2_ct_accEC  = NULL;
    xe.XE.Race.h2_ct        = NULL;
    tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
    tl_assert( thr->coretid != VG_INVALID_THREADID );

    xe.XE.Race.h1_ct              = h1_ct;
    xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
    xe.XE.Race.h1_ct_mbsegendEC   = h1_ct_mbsegendEC;

    VG_(maybe_record_error)( thr->coretid,
                             XE_Race, data_addr, NULL, &xe );
}
예제 #4
0
static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
{
   Lock* lk1 = (Lock*)lk1W;
   Lock* lk2 = (Lock*)lk2W;
   tl_assert( HG_(is_sane_LockNorP)(lk1) );
   tl_assert( HG_(is_sane_LockNorP)(lk2) );
   if (lk1->unique < lk2->unique) return -1;
   if (lk1->unique > lk2->unique) return 1;
   return 0;
}
예제 #5
0
void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
{
   XError xe;
   tl_assert( HG_(is_sane_Thread)(thr) );
   init_XError(&xe);
   xe.tag = XE_UnlockBogus;
   xe.XE.UnlockBogus.thr     = thr;
   xe.XE.UnlockBogus.lock_ga = lock_ga;
   // FIXME: tid vs thr
   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
   tl_assert( thr->coretid != VG_INVALID_THREADID );
   VG_(maybe_record_error)( thr->coretid,
                            XE_UnlockBogus, 0, NULL, &xe );
}
예제 #6
0
void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
{
   XError xe;
   tl_assert( HG_(is_sane_Thread)(thr) );
   tl_assert( HG_(is_sane_LockN)(lk) );
   init_XError(&xe);
   xe.tag = XE_UnlockUnlocked;
   xe.XE.UnlockUnlocked.thr  = thr;
   xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk);
   // FIXME: tid vs thr
   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
   tl_assert( thr->coretid != VG_INVALID_THREADID );
   VG_(maybe_record_error)( thr->coretid,
                            XE_UnlockUnlocked, 0, NULL, &xe );
}
예제 #7
0
void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
{
   XError xe;
   tl_assert( HG_(is_sane_Thread)(thr) );
   tl_assert(errstr);
   init_XError(&xe);
   xe.tag = XE_Misc;
   xe.XE.Misc.thr    = thr;
   xe.XE.Misc.errstr = string_table_strdup(errstr);
   // FIXME: tid vs thr
   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
   tl_assert( thr->coretid != VG_INVALID_THREADID );
   VG_(maybe_record_error)( thr->coretid,
                            XE_Misc, 0, NULL, &xe );
}
void HG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai )
{
   tl_assert(ai->tag == Addr_Undescribed);

   /* hctxt/haddr/hszB describe the addr if it is a heap block. */
   ExeContext* hctxt;
   Addr        haddr;
   SizeT       hszB;

   /* First, see if it's in any heap block.  Unfortunately this
      means a linear search through all allocated heap blocks.  The
      assertion says that if it's detected as a heap block, then we
      must have an allocation context for it, since all heap blocks
      should have an allocation context. */
   Bool is_heapblock
      = HG_(mm_find_containing_block)( 
           &hctxt,
           &haddr,
           &hszB,
           a
        );
   if (is_heapblock) {
      tl_assert(is_heapblock == (hctxt != NULL));
      ai->tag = Addr_Block;
      ai->Addr.Block.block_kind = Block_Mallocd;
      ai->Addr.Block.block_desc = "block";
      ai->Addr.Block.block_szB  = hszB;
      ai->Addr.Block.rwoffset   = (Word)(a) - (Word)(haddr);
      ai->Addr.Block.allocated_at = hctxt;
      ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
   } else {
      /* No block found. Search a non-heap block description. */
      VG_(describe_addr) (a, ai);
   }
}
예제 #9
0
/* Announce 'lk'. */
static void announce_LockP ( Lock* lk )
{
    tl_assert(lk);
    if (lk == Lock_INVALID)
        return; /* Can't be announced -- we know nothing about it. */
    tl_assert(lk->magic == LockP_MAGIC);

    if (VG_(clo_xml)) {
        if (lk->appeared_at) {
            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
                  (void*)lk );
            VG_(pp_ExeContext)( lk->appeared_at );
        }

    } else {
        if (lk->appeared_at) {
            VG_(umsg)( " Lock at %p was first observed\n",
                       (void*)lk->guestaddr );
            VG_(pp_ExeContext)( lk->appeared_at );
        } else {
            VG_(umsg)( " Lock at %p : no stacktrace for first observation\n",
                       (void*)lk->guestaddr );
        }
        HG_(get_and_pp_addrdescr) (lk->guestaddr);
        VG_(umsg)("\n");
    }
}
예제 #10
0
void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr,
                                    const HChar* auxstr, ExeContext* auxctx )
{
    XError xe;
    tl_assert( HG_(is_sane_Thread)(thr) );
    tl_assert(errstr);
    init_XError(&xe);
    xe.tag = XE_Misc;
    xe.XE.Misc.thr    = thr;
    xe.XE.Misc.errstr = string_table_strdup(errstr);
    xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
    xe.XE.Misc.auxctx = auxctx;
    // FIXME: tid vs thr
    tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
    tl_assert( thr->coretid != VG_INVALID_THREADID );
    VG_(maybe_record_error)( thr->coretid,
                             XE_Misc, 0, NULL, &xe );
}
예제 #11
0
void HG_(deleteWordSetU) ( WordSetU* wsu )
{
   void (*dealloc)(void*) = wsu->dealloc;
   tl_assert(wsu->vec2ix);
   HG_(deleteFM)( wsu->vec2ix, delete_WV_for_FM, NULL/*val-finalizer*/ );
   if (wsu->ix2vec)
      dealloc(wsu->ix2vec);
   dealloc(wsu);
}
예제 #12
0
void HG_(record_error_UnlockForeign) ( Thread* thr,
                                       Thread* owner, Lock* lk )
{
    XError xe;
    tl_assert( HG_(is_sane_Thread)(thr) );
    tl_assert( HG_(is_sane_Thread)(owner) );
    tl_assert( HG_(is_sane_LockN)(lk) );
    init_XError(&xe);
    xe.tag = XE_UnlockForeign;
    xe.XE.UnlockForeign.thr   = thr;
    xe.XE.UnlockForeign.owner = owner;
    xe.XE.UnlockForeign.lock
        = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
    // FIXME: tid vs thr
    tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
    tl_assert( thr->coretid != VG_INVALID_THREADID );
    VG_(maybe_record_error)( thr->coretid,
                             XE_UnlockForeign, 0, NULL, &xe );
}
예제 #13
0
void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname, 
                                     Word err, HChar* errstr )
{
   XError xe;
   tl_assert( HG_(is_sane_Thread)(thr) );
   tl_assert(fnname);
   tl_assert(errstr);
   init_XError(&xe);
   xe.tag = XE_PthAPIerror;
   xe.XE.PthAPIerror.thr    = thr;
   xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
   xe.XE.PthAPIerror.err    = err;
   xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
   // FIXME: tid vs thr
   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
   tl_assert( thr->coretid != VG_INVALID_THREADID );
   VG_(maybe_record_error)( thr->coretid,
                            XE_PthAPIerror, 0, NULL, &xe );
}
예제 #14
0
void HG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai )
{
   tl_assert(ai->tag == Addr_Undescribed);

   /* hctxt/tnr/haddr/hszB describe the addr if it is a heap block. */
   ExeContext* hctxt;
   UInt        tnr;
   Addr        haddr;
   SizeT       hszB;

   /* First, see if it's in any heap block.  Unfortunately this
      means a linear search through all allocated heap blocks.  The
      assertion says that if it's detected as a heap block, then we
      must have an allocation context for it, since all heap blocks
      should have an allocation context. */
   Bool is_heapblock
      = HG_(mm_find_containing_block)( 
           &hctxt,
           &tnr,
           &haddr,
           &hszB,
           a
        );
   if (is_heapblock) {
      tl_assert(is_heapblock == (hctxt != NULL));
      ai->tag = Addr_Block;
      ai->Addr.Block.block_kind = Block_Mallocd;
      ai->Addr.Block.block_desc = "block";
      ai->Addr.Block.block_szB  = hszB;
      ai->Addr.Block.rwoffset   = (Word)(a) - (Word)(haddr);
      ai->Addr.Block.allocated_at = hctxt;
      VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
      ai->Addr.Block.alloc_tinfo.tnr = tnr;
      ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
   } else {
      /* No block found. Search a non-heap block description. */
      VG_(describe_addr) (a, ai);

      /* In case ai contains a tid, set tnr to the corresponding helgrind
         thread number. */
      if (ai->tag == Addr_Stack) {
         Thread* thr = get_admin_threads();

         tl_assert(ai->Addr.Stack.tinfo.tid);
         while (thr) {
            if (thr->coretid == ai->Addr.Stack.tinfo.tid) {
               ai->Addr.Stack.tinfo.tnr = thr->errmsg_index;
               break;
            }
            thr = thr->admin;
         }
      }
   }
}
예제 #15
0
static HChar* string_table_strdup ( const HChar* str ) {
    HChar* copy = NULL;
    HG_(stats__string_table_queries)++;
    if (!str)
        str = "(null)";
    if (!string_table) {
        string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
                                   HG_(free), string_table_cmp );
    }
    if (VG_(lookupFM)( string_table,
                       NULL, (UWord*)&copy, (UWord)str )) {
        tl_assert(copy);
        if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
        return copy;
    } else {
        copy = HG_(strdup)("hg.sts.2", str);
        VG_(addToFM)( string_table, (UWord)copy, (UWord)copy );
        return copy;
    }
}
static Bool is_sane_Bag_of_Threads ( WordBag* bag )
{
   Thread* thr;
   Word    count;
   VG_(initIterBag)( bag );
   while (VG_(nextIterBag)( bag, (Word*)&thr, &count )) {
      if (count < 1) return False;
      if (!HG_(is_sane_Thread)(thr)) return False;
   }
   VG_(doneIterBag)( bag );
   return True;
}
예제 #17
0
void HG_(print_access) (StackTrace ips, UInt n_ips,
                        Thr* thr_a,
                        Addr  ga,
                        SizeT SzB,
                        Bool  isW,
                        WordSetID locksHeldW )
{
    Thread* threadp;

    threadp = libhb_get_Thr_hgthread( thr_a );
    tl_assert(threadp);
    if (!threadp->announced) {
        /* This is for interactive use. We announce the thread if needed,
           but reset it to not announced afterwards, because we want
           the thread to be announced on the error output/log if needed. */
        announce_one_thread (threadp);
        threadp->announced = False;
    }

    announce_one_thread (threadp);
    VG_(printf) ("%s of size %d at %p by thread #%d",
                 isW ? "write" : "read",
                 (int)SzB, (void*)ga, threadp->errmsg_index);
    if (threadp->coretid == VG_INVALID_THREADID)
        VG_(printf)(" tid (exited)\n");
    else
        VG_(printf)(" tid %u\n", threadp->coretid);
    {
        Lock** locksHeldW_P;
        locksHeldW_P = enumerate_WordSet_into_LockP_vector(
                           HG_(get_univ_lsets)(),
                           locksHeldW,
                           True/*allowed_to_be_invalid*/
                       );
        show_LockP_summary_textmode( locksHeldW_P, "" );
        HG_(free) (locksHeldW_P);
    }
    VG_(pp_StackTrace) (ips, n_ips);
    VG_(printf) ("\n");
}
예제 #18
0
static Lock* mk_LockP_from_LockN ( Lock* lkn )
{
   Lock* lkp = NULL;
   HG_(stats__LockN_to_P_queries)++;
   tl_assert( HG_(is_sane_LockN)(lkn) );
   if (!map_LockN_to_P) {
      map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
                                   HG_(free), lock_unique_cmp );
      tl_assert(map_LockN_to_P);
   }
   if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
      lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
      *lkp = *lkn;
      lkp->admin = NULL;
      lkp->magic = LockP_MAGIC;
      /* Forget about the bag of lock holders - don't copy that.
         Also, acquired_at should be NULL whenever heldBy is, and vice
         versa.  Also forget about the associated libhb synch object. */
      lkp->heldW  = False;
      lkp->heldBy = NULL;
      lkp->acquired_at = NULL;
      lkp->hbso = NULL;
      VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
   }
   tl_assert( HG_(is_sane_LockP)(lkp) );
   return lkp;
}
예제 #19
0
void HG_(record_error_LockOrder)(
        Thread* thr, Addr before_ga, Addr after_ga,
        ExeContext* before_ec, ExeContext* after_ec 
     )
{
   XError xe;
   tl_assert( HG_(is_sane_Thread)(thr) );
   if (!HG_(clo_track_lockorders))
      return;
   init_XError(&xe);
   xe.tag = XE_LockOrder;
   xe.XE.LockOrder.thr       = thr;
   xe.XE.LockOrder.before_ga = before_ga;
   xe.XE.LockOrder.before_ec = before_ec;
   xe.XE.LockOrder.after_ga  = after_ga;
   xe.XE.LockOrder.after_ec  = after_ec;
   // FIXME: tid vs thr
   tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
   tl_assert( thr->coretid != VG_INVALID_THREADID );
   VG_(maybe_record_error)( thr->coretid,
                            XE_LockOrder, 0, NULL, &xe );
}
Bool HG_(get_and_pp_addrdescr) (Addr addr)
{

   Bool ret;
   AddrInfo glai;

   glai.tag = Addr_Undescribed;
   HG_(describe_addr) (addr, &glai);
   VG_(pp_addrinfo) (addr, &glai);
   ret = glai.tag != Addr_Unknown;

   VG_(clear_addrinfo) (&glai);

   return ret;
}
예제 #21
0
/* Given a normal Lock (LockN), convert it to a persistent Lock
   (LockP).  In some cases the LockN could be invalid (if it's been
   freed), so we enquire, in hg_main.c's admin_locks list, whether it
   is in fact valid.  If allowed_to_be_invalid is True, then it's OK
   for the LockN to be invalid, in which case Lock_INVALID is
   returned.  In all other cases, we insist that the LockN is a valid
   lock, and return its corresponding LockP.

   Why can LockNs sometimes be invalid?  Because they are harvested
   from locksets that are attached to the OldRef info for conflicting
   threads.  By the time we detect a race, the some of the elements of
   the lockset may have been destroyed by the client, in which case
   the corresponding Lock structures we maintain will have been freed.

   So we check that each LockN is a member of the admin_locks double
   linked list of all Lock structures.  That stops us prodding around
   in potentially freed-up Lock structures.  However, it's not quite a
   proper check: if a new Lock has been reallocated at the same
   address as one which was previously freed, we'll wind up copying
   the new one as the basis for the LockP, which is completely bogus
   because it is unrelated to the previous Lock that lived there.
   Let's hope that doesn't happen too often.
*/
static Lock* mk_LockP_from_LockN ( Lock* lkn,
                                   Bool allowed_to_be_invalid )
{
    Lock* lkp = NULL;
    HG_(stats__LockN_to_P_queries)++;

    /* First off, let's do some sanity checks.  If
       allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
       in admin_locks; else we must assert.  If it is True, it's OK for
       it not to be findable, but in that case we must return
       Lock_INVALID right away. */
    Lock* lock_list = HG_(get_admin_locks)();
    while (lock_list) {
        if (lock_list == lkn)
            break;
        lock_list = lock_list->admin_next;
    }
    if (lock_list == NULL) {
        /* We didn't find it.  That possibility has to be OK'd by the
           caller. */
        tl_assert(allowed_to_be_invalid);
        return Lock_INVALID;
    }

    /* So we must be looking at a valid LockN. */
    tl_assert( HG_(is_sane_LockN)(lkn) );

    if (!map_LockN_to_P) {
        map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
                                     HG_(free), lock_unique_cmp );
    }
    if (!VG_(lookupFM)( map_LockN_to_P, NULL, (UWord*)&lkp, (UWord)lkn)) {
        lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
        *lkp = *lkn;
        lkp->admin_next = NULL;
        lkp->admin_prev = NULL;
        lkp->magic = LockP_MAGIC;
        /* Forget about the bag of lock holders - don't copy that.
           Also, acquired_at should be NULL whenever heldBy is, and vice
           versa.  Also forget about the associated libhb synch object. */
        lkp->heldW  = False;
        lkp->heldBy = NULL;
        lkp->acquired_at = NULL;
        lkp->hbso = NULL;
        VG_(addToFM)( map_LockN_to_P, (UWord)lkp, (UWord)lkp );
    }
    tl_assert( HG_(is_sane_LockP)(lkp) );
    return lkp;
}
예제 #22
0
Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
{
   XError *xe1, *xe2;

   tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));

   xe1 = (XError*)VG_(get_error_extra)(e1);
   xe2 = (XError*)VG_(get_error_extra)(e2);
   tl_assert(xe1);
   tl_assert(xe2);

   switch (VG_(get_error_kind)(e1)) {
      case XE_Race:
         return xe1->XE.Race.szB == xe2->XE.Race.szB
                && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
                && (HG_(clo_cmp_race_err_addrs)
                       ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
                       : True);
      case XE_UnlockUnlocked:
         return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
                && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
      case XE_UnlockForeign:
         return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
                && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
                && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
      case XE_UnlockBogus:
         return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
                && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
      case XE_PthAPIerror:
         return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
                && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
                                  xe2->XE.PthAPIerror.fnname)
                && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
      case XE_LockOrder:
         return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
      case XE_Misc:
         return xe1->XE.Misc.thr == xe2->XE.Misc.thr
                && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
      default:
         tl_assert(0);
   }

   /*NOTREACHED*/
   tl_assert(0);
}
예제 #23
0
/* Announce (that is, print the point-of-creation) of 'thr'.  Only do
   this once, as we only want to see these announcements once per
   thread.  Returned Bool indicates whether or not an announcement was
   made.
*/
static Bool announce_one_thread ( Thread* thr )
{
    tl_assert(HG_(is_sane_Thread)(thr));
    tl_assert(thr->errmsg_index >= 1);
    if (thr->announced)
        return False;

    if (VG_(clo_xml)) {

        VG_(printf_xml)("<announcethread>\n");
        VG_(printf_xml)("  <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
        if (thr->errmsg_index == 1) {
            tl_assert(thr->created_at == NULL);
            VG_(printf_xml)("  <isrootthread></isrootthread>\n");
        } else {
            tl_assert(thr->created_at != NULL);
            VG_(pp_ExeContext)( thr->created_at );
        }
        VG_(printf_xml)("</announcethread>\n\n");

    } else {

        VG_(umsg)("---Thread-Announcement----------"
                  "--------------------------------" "\n");
        VG_(umsg)("\n");

        if (thr->errmsg_index == 1) {
            tl_assert(thr->created_at == NULL);
            VG_(message)(Vg_UserMsg,
                         "Thread #%d is the program's root thread\n",
                         thr->errmsg_index);
        } else {
            tl_assert(thr->created_at != NULL);
            VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
                         thr->errmsg_index);
            VG_(pp_ExeContext)( thr->created_at );
        }
        VG_(message)(Vg_UserMsg, "\n");

    }

    thr->announced = True;
    return True;
}
예제 #24
0
WordSetU* HG_(newWordSetU) ( void* (*alloc_nofail)( SizeT ),
                             void  (*dealloc)(void*),
                             Word  cacheSize )
{
   WordSetU* wsu;
   WordVec*  empty;

   wsu          = alloc_nofail( sizeof(WordSetU) );
   VG_(memset)( wsu, 0, sizeof(WordSetU) );
   wsu->alloc   = alloc_nofail;
   wsu->dealloc = dealloc;
   wsu->vec2ix  = HG_(newFM)( alloc_nofail, dealloc, cmp_WordVecs_for_FM );
   wsu->ix2vec_used = 0;
   wsu->ix2vec_size = 0;
   wsu->ix2vec      = NULL;
   WCache_INIT(wsu->cache_addTo,     cacheSize);
   WCache_INIT(wsu->cache_delFrom,   cacheSize);
   WCache_INIT(wsu->cache_intersect, cacheSize);
   WCache_INIT(wsu->cache_minus,     cacheSize);
   empty = new_WV_of_size( wsu, 0 );
   wsu->empty = add_or_dealloc_WordVec( wsu, empty );

   return wsu;
}
예제 #25
0
WordSet HG_(isSubsetOf) ( WordSetU* wsu, WordSet small, WordSet big )
{
   wsu->n_isSubsetOf++;
   return small == HG_(intersectWS)( wsu, small, big );
}
예제 #26
0
void HG_(record_error_Misc) ( Thread* thr, const HChar* errstr )
{
    HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
}
예제 #27
0
void HG_(pp_Error) ( Error* err )
{
   const Bool xml = VG_(clo_xml); /* a shorthand, that's all */

   XError *xe = (XError*)VG_(get_error_extra)(err);
   tl_assert(xe);

   switch (VG_(get_error_kind)(err)) {

   case XE_Misc: {
      tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );

      if (xml) {

         emit( "  <kind>Misc</kind>\n");
         emit( "  <xwhat>\n" );
         emit( "    <text>Thread #%d: %s</text>\n",
               (Int)xe->XE.Misc.thr->errmsg_index,
               xe->XE.Misc.errstr );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.Misc.thr->errmsg_index );
         emit( "  </xwhat>\n" );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

      } else {

         emit( "Thread #%d: %s\n",
               (Int)xe->XE.Misc.thr->errmsg_index,
               xe->XE.Misc.errstr );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

      }
      break;
   }

   case XE_LockOrder: {
      tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );

      if (xml) {

         emit( "  <kind>LockOrder</kind>\n");
         emit( "  <xwhat>\n" );
         emit( "    <text>Thread #%d: lock order \"%p before %p\" "
                    "violated</text>\n",
               (Int)xe->XE.LockOrder.thr->errmsg_index,
               (void*)xe->XE.LockOrder.before_ga,
               (void*)xe->XE.LockOrder.after_ga );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.LockOrder.thr->errmsg_index );
         emit( "  </xwhat>\n" );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
         if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
            emit( "  <auxwhat>Required order was established by "
                  "acquisition of lock at %p</auxwhat>\n",
                  (void*)xe->XE.LockOrder.before_ga );
            VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
            emit( "  <auxwhat>followed by a later acquisition "
                  "of lock at %p</auxwhat>\n",
                  (void*)xe->XE.LockOrder.after_ga );
            VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
         }

      } else {

         emit( "Thread #%d: lock order \"%p before %p\" violated\n",
               (Int)xe->XE.LockOrder.thr->errmsg_index,
               (void*)xe->XE.LockOrder.before_ga,
               (void*)xe->XE.LockOrder.after_ga );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
         if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
            emit( "  Required order was established by "
                  "acquisition of lock at %p\n",
                  (void*)xe->XE.LockOrder.before_ga );
            VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
            emit( "  followed by a later acquisition of lock at %p\n",
                  (void*)xe->XE.LockOrder.after_ga );
            VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
         }

      }

      break;
   }

   case XE_PthAPIerror: {
      tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );

      if (xml) {

         emit( "  <kind>PthAPIerror</kind>\n");
         emit( "  <xwhat>\n" );
         emit_no_f_c(
            "    <text>Thread #%d's call to %t failed</text>\n",
            (Int)xe->XE.PthAPIerror.thr->errmsg_index,
            xe->XE.PthAPIerror.fnname );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.PthAPIerror.thr->errmsg_index );
         emit( "  </xwhat>\n" );
         emit( "  <what>with error code %ld (%s)</what>\n",
               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

      } else {

         emit_no_f_c( "Thread #%d's call to %t failed\n",
                      (Int)xe->XE.PthAPIerror.thr->errmsg_index,
                      xe->XE.PthAPIerror.fnname );
         emit( "   with error code %ld (%s)\n",
               xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

      }

      break;
   }

   case XE_UnlockBogus: {
      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );

      if (xml) {

         emit( "  <kind>UnlockBogus</kind>\n");
         emit( "  <xwhat>\n" );
         emit( "    <text>Thread #%d unlocked an invalid "
                    "lock at %p</text>\n",
               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
               (void*)xe->XE.UnlockBogus.lock_ga );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.UnlockBogus.thr->errmsg_index );
         emit( "  </xwhat>\n" );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

      } else {

         emit( "Thread #%d unlocked an invalid lock at %p\n",
               (Int)xe->XE.UnlockBogus.thr->errmsg_index,
               (void*)xe->XE.UnlockBogus.lock_ga );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

      }

      break;
   }

   case XE_UnlockForeign: {
      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );

      if (xml) {

         emit( "  <kind>UnlockForeign</kind>\n");
         emit( "  <xwhat>\n" );
         emit( "    <text>Thread #%d unlocked lock at %p "
                    "currently held by thread #%d</text>\n",
               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
               (void*)xe->XE.UnlockForeign.lock->guestaddr,
               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.UnlockForeign.thr->errmsg_index );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
         emit( "  </xwhat>\n" );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

         if (xe->XE.UnlockForeign.lock->appeared_at) {
            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
                  (void*)xe->XE.UnlockForeign.lock->guestaddr );
            VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
         }

      } else {

         emit( "Thread #%d unlocked lock at %p "
               "currently held by thread #%d\n",
               (Int)xe->XE.UnlockForeign.thr->errmsg_index,
               (void*)xe->XE.UnlockForeign.lock->guestaddr,
               (Int)xe->XE.UnlockForeign.owner->errmsg_index );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
         if (xe->XE.UnlockForeign.lock->appeared_at) {
            emit( "  Lock at %p was first observed\n",
                  (void*)xe->XE.UnlockForeign.lock->guestaddr );
            VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
         }

      }

      break;
   }

   case XE_UnlockUnlocked: {
      tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
      tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );

      if (xml) {

         emit( "  <kind>UnlockUnlocked</kind>\n");
         emit( "  <xwhat>\n" );
         emit( "    <text>Thread #%d unlocked a "
                    "not-locked lock at %p</text>\n",
               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
         emit( "  </xwhat>\n" );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
         if (xe->XE.UnlockUnlocked.lock->appeared_at) {
            emit( "  <auxwhat>Lock at %p was first observed</auxwhat>\n",
                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
            VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
         }

      } else {

         emit( "Thread #%d unlocked a not-locked lock at %p\n",
               (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
               (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );
         if (xe->XE.UnlockUnlocked.lock->appeared_at) {
            emit( "  Lock at %p was first observed\n",
                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
            VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
         }

      }

      break;
   }

   case XE_Race: {
      Addr      err_ga;
      HChar*    what;
      Int       szB;
      what      = xe->XE.Race.isWrite ? "write" : "read";
      szB       = xe->XE.Race.szB;
      err_ga = VG_(get_error_address)(err);

      tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
      if (xe->XE.Race.h2_ct)
         tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));

      if (xml) {

         /* ------ XML ------ */
         emit( "  <kind>Race</kind>\n" );
         emit( "  <xwhat>\n" );
         emit( "    <text>Possible data race during %s of size %d "
                    "at %#lx by thread #%d</text>\n",
              what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index );
         emit( "    <hthreadid>%d</hthreadid>\n",
               (Int)xe->XE.Race.thr->errmsg_index );
         emit( "  </xwhat>\n" );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

         if (xe->XE.Race.h2_ct) {
            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
            emit( "  <xauxwhat>\n");
            emit( "    <text>This conflicts with a previous %s of size %d "
                            "by thread #%d</text>\n",
                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
                  xe->XE.Race.h2_ct_accSzB,
                  xe->XE.Race.h2_ct->errmsg_index );
            emit( "    <hthreadid>%d</hthreadid>\n", 
                  xe->XE.Race.h2_ct->errmsg_index);
            emit("  </xauxwhat>\n");
            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
         }

         if (xe->XE.Race.h1_ct) {
            emit( "  <xauxwhat>\n");
            emit( "    <text>This conflicts with a previous access "
                  "by thread #%d, after</text>\n",
                  xe->XE.Race.h1_ct->errmsg_index );
            emit( "    <hthreadid>%d</hthreadid>\n", 
                  xe->XE.Race.h1_ct->errmsg_index );
            emit("  </xauxwhat>\n");
            if (xe->XE.Race.h1_ct_mbsegstartEC) {
               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
            } else {
               emit( "  <auxwhat>(the start of the thread)</auxwhat>\n" );
            }
            emit( "  <auxwhat>but before</auxwhat>\n" );
            if (xe->XE.Race.h1_ct_mbsegendEC) {
               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
            } else {
               emit( "  <auxwhat>(the end of the the thread)</auxwhat>\n" );
            }
         }

      } else {

         /* ------ Text ------ */
         emit( "Possible data race during %s of size %d "
               "at %#lx by thread #%d\n",
               what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index );
         VG_(pp_ExeContext)( VG_(get_error_where)(err) );

         if (xe->XE.Race.h2_ct) {
            tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
            emit( " This conflicts with a previous %s of size %d "
                  "by thread #%d\n",
                  xe->XE.Race.h2_ct_accIsW ? "write" : "read",
                  xe->XE.Race.h2_ct_accSzB,
                  xe->XE.Race.h2_ct->errmsg_index );
            VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
         }

         if (xe->XE.Race.h1_ct) {
            emit( " This conflicts with a previous access by thread #%d, "
                  "after\n",
                  xe->XE.Race.h1_ct->errmsg_index );
            if (xe->XE.Race.h1_ct_mbsegstartEC) {
               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
            } else {
               emit( "   (the start of the thread)\n" );
            }
            emit( " but before\n" );
            if (xe->XE.Race.h1_ct_mbsegendEC) {
               VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
            } else {
               emit( "   (the end of the the thread)\n" );
            }
         }

      }

      /* If we have a better description of the address, show it.
         Note that in XML mode, it will already by nicely wrapped up
         in tags, either <auxwhat> or <xauxwhat>, so we can just emit
         it verbatim. */
      if (xe->XE.Race.descr1)
         emit( "%s%s\n", xml ? "  " : " ",
                         (HChar*)VG_(indexXA)( xe->XE.Race.descr1, 0 ) );
      if (xe->XE.Race.descr2)
         emit( "%s%s\n", xml ? "  " : " ",
                         (HChar*)VG_(indexXA)( xe->XE.Race.descr2, 0 ) );

      break; /* case XE_Race */
   } /* case XE_Race */

   default:
      tl_assert(0);
   } /* switch (VG_(get_error_kind)(err)) */
}
예제 #28
0
void HG_(pp_Error) ( const Error* err )
{
    const Bool xml = VG_(clo_xml); /* a shorthand, that's all */

    if (!xml) {
        VG_(umsg)("--------------------------------"
                  "--------------------------------" "\n");
        VG_(umsg)("\n");
    }

    XError *xe = (XError*)VG_(get_error_extra)(err);
    tl_assert(xe);

    if (xml)
        emit( "  <kind>%s</kind>\n", HG_(get_error_name)(err));

    switch (VG_(get_error_kind)(err)) {

    case XE_Misc: {
        tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );

        if (xml) {

            emit( "  <xwhat>\n" );
            emit( "    <text>Thread #%d: %s</text>\n",
                  (Int)xe->XE.Misc.thr->errmsg_index,
                  xe->XE.Misc.errstr );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.Misc.thr->errmsg_index );
            emit( "  </xwhat>\n" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            if (xe->XE.Misc.auxstr) {
                emit("  <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
                if (xe->XE.Misc.auxctx)
                    VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
            }

        } else {

            emit( "Thread #%d: %s\n",
                  (Int)xe->XE.Misc.thr->errmsg_index,
                  xe->XE.Misc.errstr );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            if (xe->XE.Misc.auxstr) {
                emit(" %s\n", xe->XE.Misc.auxstr);
                if (xe->XE.Misc.auxctx)
                    VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
            }

        }
        break;
    }

    case XE_LockOrder: {
        tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );

        if (xml) {

            emit( "  <xwhat>\n" );
            emit( "    <text>Thread #%d: lock order \"%p before %p\" "
                  "violated</text>\n",
                  (Int)xe->XE.LockOrder.thr->errmsg_index,
                  (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
                  (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.LockOrder.thr->errmsg_index );
            emit( "  </xwhat>\n" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            if (xe->XE.LockOrder.shouldbe_earlier_ec
                    && xe->XE.LockOrder.shouldbe_later_ec) {
                emit( "  <auxwhat>Required order was established by "
                      "acquisition of lock at %p</auxwhat>\n",
                      (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
                VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
                emit( "  <auxwhat>followed by a later acquisition "
                      "of lock at %p</auxwhat>\n",
                      (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
                VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
            }
            announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
            announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );

        } else {

            emit( "Thread #%d: lock order \"%p before %p\" violated\n",
                  (Int)xe->XE.LockOrder.thr->errmsg_index,
                  (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
                  (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
            emit( "\n" );
            emit( "Observed (incorrect) order is: "
                  "acquisition of lock at %p\n",
                  (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr);
            if (xe->XE.LockOrder.actual_earlier_ec) {
                VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
            } else {
                emit("   (stack unavailable)\n");
            }
            emit( "\n" );
            emit(" followed by a later acquisition of lock at %p\n",
                 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr);
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            if (xe->XE.LockOrder.shouldbe_earlier_ec
                    && xe->XE.LockOrder.shouldbe_later_ec) {
                emit("\n");
                emit( "Required order was established by "
                      "acquisition of lock at %p\n",
                      (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
                VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
                emit( "\n" );
                emit( " followed by a later acquisition of lock at %p\n",
                      (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
                VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
            }
            emit("\n");
            announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
            announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );

        }

        break;
    }

    case XE_PthAPIerror: {
        tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );

        if (xml) {

            emit( "  <xwhat>\n" );
            emit(
                "    <text>Thread #%d's call to %pS failed</text>\n",
                (Int)xe->XE.PthAPIerror.thr->errmsg_index,
                xe->XE.PthAPIerror.fnname );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.PthAPIerror.thr->errmsg_index );
            emit( "  </xwhat>\n" );
            emit( "  <what>with error code %ld (%s)</what>\n",
                  xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );

        } else {

            emit( "Thread #%d's call to %pS failed\n",
                  (Int)xe->XE.PthAPIerror.thr->errmsg_index,
                  xe->XE.PthAPIerror.fnname );
            emit( "   with error code %ld (%s)\n",
                  xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );

        }

        break;
    }

    case XE_UnlockBogus: {
        tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );

        if (xml) {

            emit( "  <xwhat>\n" );
            emit( "    <text>Thread #%d unlocked an invalid "
                  "lock at %p</text>\n",
                  (Int)xe->XE.UnlockBogus.thr->errmsg_index,
                  (void*)xe->XE.UnlockBogus.lock_ga );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.UnlockBogus.thr->errmsg_index );
            emit( "  </xwhat>\n" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );

        } else {

            emit( "Thread #%d unlocked an invalid lock at %p\n",
                  (Int)xe->XE.UnlockBogus.thr->errmsg_index,
                  (void*)xe->XE.UnlockBogus.lock_ga );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );

        }

        break;
    }

    case XE_UnlockForeign: {
        tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
        tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
        tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );

        if (xml) {

            emit( "  <xwhat>\n" );
            emit( "    <text>Thread #%d unlocked lock at %p "
                  "currently held by thread #%d</text>\n",
                  (Int)xe->XE.UnlockForeign.thr->errmsg_index,
                  (void*)xe->XE.UnlockForeign.lock->guestaddr,
                  (Int)xe->XE.UnlockForeign.owner->errmsg_index );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.UnlockForeign.thr->errmsg_index );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.UnlockForeign.owner->errmsg_index );
            emit( "  </xwhat>\n" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            announce_LockP ( xe->XE.UnlockForeign.lock );

        } else {

            emit( "Thread #%d unlocked lock at %p "
                  "currently held by thread #%d\n",
                  (Int)xe->XE.UnlockForeign.thr->errmsg_index,
                  (void*)xe->XE.UnlockForeign.lock->guestaddr,
                  (Int)xe->XE.UnlockForeign.owner->errmsg_index );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            announce_LockP ( xe->XE.UnlockForeign.lock );

        }

        break;
    }

    case XE_UnlockUnlocked: {
        tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
        tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );

        if (xml) {

            emit( "  <xwhat>\n" );
            emit( "    <text>Thread #%d unlocked a "
                  "not-locked lock at %p</text>\n",
                  (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
            emit( "  </xwhat>\n" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            announce_LockP ( xe->XE.UnlockUnlocked.lock);

        } else {

            emit( "Thread #%d unlocked a not-locked lock at %p\n",
                  (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
                  (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );
            announce_LockP ( xe->XE.UnlockUnlocked.lock);

        }

        break;
    }

    case XE_Race: {
        Addr      err_ga;
        const HChar* what;
        Int       szB;
        what      = xe->XE.Race.isWrite ? "write" : "read";
        szB       = xe->XE.Race.szB;
        err_ga = VG_(get_error_address)(err);

        tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
        if (xe->XE.Race.h2_ct)
            tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));

        if (xml) {

            /* ------ XML ------ */
            emit( "  <xwhat>\n" );
            emit( "    <text>Possible data race during %s of size %d "
                  "at %p by thread #%d</text>\n",
                  what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
            emit( "    <hthreadid>%d</hthreadid>\n",
                  (Int)xe->XE.Race.thr->errmsg_index );
            emit( "  </xwhat>\n" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );

            if (xe->XE.Race.h2_ct) {
                tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
                emit( "  <xauxwhat>\n");
                emit( "    <text>This conflicts with a previous %s of size %d "
                      "by thread #%d</text>\n",
                      xe->XE.Race.h2_ct_accIsW ? "write" : "read",
                      xe->XE.Race.h2_ct_accSzB,
                      xe->XE.Race.h2_ct->errmsg_index );
                emit( "    <hthreadid>%d</hthreadid>\n",
                      xe->XE.Race.h2_ct->errmsg_index);
                emit("  </xauxwhat>\n");
                VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
            }

            if (xe->XE.Race.h1_ct) {
                emit( "  <xauxwhat>\n");
                emit( "    <text>This conflicts with a previous access "
                      "by thread #%d, after</text>\n",
                      xe->XE.Race.h1_ct->errmsg_index );
                emit( "    <hthreadid>%d</hthreadid>\n",
                      xe->XE.Race.h1_ct->errmsg_index );
                emit("  </xauxwhat>\n");
                if (xe->XE.Race.h1_ct_mbsegstartEC) {
                    VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
                } else {
                    emit( "  <auxwhat>(the start of the thread)</auxwhat>\n" );
                }
                emit( "  <auxwhat>but before</auxwhat>\n" );
                if (xe->XE.Race.h1_ct_mbsegendEC) {
                    VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
                } else {
                    emit( "  <auxwhat>(the end of the thread)</auxwhat>\n" );
                }
            }

        } else {

            /* ------ Text ------ */
            announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
                                          xe->XE.Race.h2_ct_locksHeldW );

            emit( "Possible data race during %s of size %d "
                  "at %p by thread #%d\n",
                  what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );

            tl_assert(xe->XE.Race.locksHeldW);
            show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
            VG_(pp_ExeContext)( VG_(get_error_where)(err) );

            if (xe->XE.Race.h2_ct) {
                tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
                tl_assert(xe->XE.Race.h2_ct_locksHeldW);
                emit( "\n" );
                emit( "This conflicts with a previous %s of size %d "
                      "by thread #%d\n",
                      xe->XE.Race.h2_ct_accIsW ? "write" : "read",
                      xe->XE.Race.h2_ct_accSzB,
                      xe->XE.Race.h2_ct->errmsg_index );
                show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
                VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
            }

            if (xe->XE.Race.h1_ct) {
                emit( " This conflicts with a previous access by thread #%d, "
                      "after\n",
                      xe->XE.Race.h1_ct->errmsg_index );
                if (xe->XE.Race.h1_ct_mbsegstartEC) {
                    VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
                } else {
                    emit( "   (the start of the thread)\n" );
                }
                emit( " but before\n" );
                if (xe->XE.Race.h1_ct_mbsegendEC) {
                    VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
                } else {
                    emit( "   (the end of the thread)\n" );
                }
            }

        }
        VG_(pp_addrinfo) (err_ga, &xe->XE.Race.data_addrinfo);
        break; /* case XE_Race */
        } /* case XE_Race */

    default:
        tl_assert(0);
    } /* switch (VG_(get_error_kind)(err)) */
}
예제 #29
0
WordSet HG_(singletonWS) ( WordSetU* wsu, Word w )
{
   return HG_(doubletonWS)( wsu, w, w );
}
예제 #30
0
/* Updates the copy with address info if necessary. */
UInt HG_(update_extra) ( Error* err )
{
   XError* xe = (XError*)VG_(get_error_extra)(err);
   tl_assert(xe);
   //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
   //   describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
   //}

   if (xe->tag == XE_Race) {

      /* See if we can come up with a source level description of the
         raced-upon address.  This is potentially expensive, which is
         why it's only done at the update_extra point, not when the
         error is initially created. */
      static Int xxx = 0;
      xxx++;
      if (0)
         VG_(printf)("HG_(update_extra): "
                     "%d conflicting-event queries\n", xxx);
      tl_assert(!xe->XE.Race.descr1);
      tl_assert(!xe->XE.Race.descr2);

      xe->XE.Race.descr1
         = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr1",
                       HG_(free), sizeof(HChar) );
      xe->XE.Race.descr2
         = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr2",
                       HG_(free), sizeof(HChar) );

      (void) VG_(get_data_description)( xe->XE.Race.descr1,
                                        xe->XE.Race.descr2,
                                        xe->XE.Race.data_addr );

      /* If there's nothing in descr1/2, free it.  Why is it safe to
         to VG_(indexXA) at zero here?  Because
         VG_(get_data_description) guarantees to zero terminate
         descr1/2 regardless of the outcome of the call.  So there's
         always at least one element in each XA after the call.
      */
      if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr1, 0 ))) {
         VG_(deleteXA)( xe->XE.Race.descr1 );
         xe->XE.Race.descr1 = NULL;
      }
      if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr2, 0 ))) {
         VG_(deleteXA)( xe->XE.Race.descr2 );
         xe->XE.Race.descr2 = NULL;
      }

      /* And poke around in the conflicting-event map, to see if we
         can rustle up a plausible-looking conflicting memory access
         to show. */
      if (HG_(clo_history_level) >= 2) { 
         Thr* thrp = NULL;
         ExeContext* wherep = NULL;
         Addr  acc_addr = xe->XE.Race.data_addr;
         Int   acc_szB  = xe->XE.Race.szB;
         Thr*  acc_thr  = xe->XE.Race.thr->hbthr;
         Bool  acc_isW  = xe->XE.Race.isWrite;
         SizeT conf_szB = 0;
         Bool  conf_isW = False;
         tl_assert(!xe->XE.Race.h2_ct_accEC);
         tl_assert(!xe->XE.Race.h2_ct);
         if (libhb_event_map_lookup(
                &wherep, &thrp, &conf_szB, &conf_isW,
                acc_thr, acc_addr, acc_szB, acc_isW )) {
            Thread* threadp;
            tl_assert(wherep);
            tl_assert(thrp);
            threadp = libhb_get_Thr_opaque( thrp );
            tl_assert(threadp);
            xe->XE.Race.h2_ct_accEC  = wherep;
            xe->XE.Race.h2_ct        = threadp;
            xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
            xe->XE.Race.h2_ct_accIsW = conf_isW;
        }
      }

      // both NULL or both non-NULL
      tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
   }

   return sizeof(XError);
}