Ejemplo n.º 1
0
/** Called before pthread_rwlock_wrlock() is invoked. If a data structure for
 *  the client-side object was not yet created, do this now. Also check whether
 *  an attempt is made to lock recursively a synchronization object that must
 *  not be locked recursively.
 */
void rwlock_pre_wrlock(const Addr rwlock, const SizeT size)
{
  struct rwlock_info* p;

  p = rwlock_get(rwlock);

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] pre_rwlock_wrlock  0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 rwlock);
  }

  if (p == 0)
  {
    p = rwlock_get_or_allocate(rwlock, size);
  }

  tl_assert(p);

  if (rwlock_is_wrlocked_by(p, thread_get_running_tid()))
  {
    RwlockErrInfo REI = { p->a1 };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            RwlockErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Recursive writer locking not allowed",
                            &REI);
  }
}
Ejemplo n.º 2
0
/** Called before pthread_rwlock_rdlock() is invoked. If a data structure for
 *  the client-side object was not yet created, do this now. Also check whether
 *  an attempt is made to lock recursively a synchronization object that must
 *  not be locked recursively.
 */
void rwlock_pre_rdlock(const Addr rwlock, const SizeT size)
{
  struct rwlock_info* p;

  p = rwlock_get_or_allocate(rwlock, size);

  tl_assert(p);

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] pre_rwlock_rdlock  0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 rwlock);
  }

  if (rwlock_is_wrlocked_by(p, thread_get_running_tid()))
  {
    VG_(message)(Vg_UserMsg,
                 "reader-writer lock 0x%lx is already locked for"
                 " writing by calling thread",
                 p->a1);
  }
}
Ejemplo n.º 3
0
/**
 * Update rwlock_info state when locking the pthread_rwlock_t rwlock.
 * Note: this function must be called after pthread_rwlock_wrlock() has been
 * called, or a race condition is triggered !
 */
void rwlock_post_wrlock(const Addr rwlock, const Bool took_lock)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  struct rwlock_info* p;
  struct rwlock_thread_info* q;

  p = rwlock_get(rwlock);

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] post_rwlock_wrlock 0x%lx",
                 VG_(get_running_tid)(),
                 drd_tid,
                 rwlock);
  }

  if (! p || ! took_lock)
    return;

  q = lookup_or_insert_node(p->thread_info, thread_get_running_tid());
  tl_assert(q->writer_nesting_count == 0);
  q->writer_nesting_count++;
  tl_assert(q->writer_nesting_count == 1);
  rwlock_combine_other_vc(p, drd_tid);
  thread_new_segment(drd_tid);
}
Ejemplo n.º 4
0
static void drd_trace_mem_access(const Addr addr, const SizeT size,
                                 const BmAccessTypeT access_type)
{
  if (drd_is_any_traced(addr, addr + size))
  {
    char vc[80];
    vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
    VG_(message)(Vg_UserMsg,
                 "%s 0x%lx size %ld (vg %d / drd %d / vc %s)",
                 access_type == eLoad
                 ? "load "
                 : access_type == eStore
                 ? "store"
                 : access_type == eStart
                 ? "start"
                 : access_type == eEnd
                 ? "end  "
                 : "????",
                 addr,
                 size,
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 vc);
    VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
                               VG_(clo_backtrace_size));
    tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
              == VG_(get_running_tid)());
  }
}
Ejemplo n.º 5
0
/** Called before pthread_mutex_lock() is invoked. If a data structure for
 *  the client-side object was not yet created, do this now. Also check whether
 *  an attempt is made to lock recursively a synchronization object that must
 *  not be locked recursively.
 */
void mutex_pre_lock(const Addr mutex, const SizeT size, MutexT mutex_type)
{
  struct mutex_info* p;

  p = mutex_get(mutex);

  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] pre_mutex_lock  %s 0x%lx rc %d owner %d",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 p ? mutex_get_typename(p) : "(?)",
                 mutex,
                 p ? p->recursion_count : 0,
                 p ? p->owner : VG_INVALID_THREADID);
  }

  if (mutex_type == mutex_type_invalid_mutex)
  {
    GenericErrInfo GEI;
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            GenericErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Not a mutex",
                            &GEI);
    return;
  }

  if (p == 0)
  {
    p = mutex_init(mutex, size, mutex_type);
  }

  tl_assert(p);

  if (p->owner == thread_get_running_tid()
      && p->recursion_count >= 1
      && mutex_type != mutex_type_recursive_mutex)
  {
    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            MutexErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Recursive locking not allowed",
                            &MEI);
  }
}
Ejemplo n.º 6
0
/* Assumption: stacks grow downward.                                       */
static void drd_stop_using_mem_stack(const Addr a, const SizeT len)
{
    thread_set_stack_min(thread_get_running_tid(),
                         a + len - VG_STACK_REDZONE_SZB);
    drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
                       True);
}
Ejemplo n.º 7
0
/** Called after sem_destroy(). */
void semaphore_destroy(const Addr semaphore)
{
  struct semaphore_info* p;

  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_destroy 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }

  p = semaphore_get(semaphore);

  if (p == 0)
  {
    GenericErrInfo GEI;
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            GenericErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Not a semaphore",
                            &GEI);
    return;
  }

  clientobj_remove(semaphore, ClientSemaphore);
}
Ejemplo n.º 8
0
/* Assumption: stacks grow downward.                                     */
static __inline__
void drd_start_using_mem_stack(const Addr a, const SizeT len)
{
  thread_set_stack_min(thread_get_running_tid(), a - VG_STACK_REDZONE_SZB);
  drd_start_using_mem(a - VG_STACK_REDZONE_SZB, 
                      len + VG_STACK_REDZONE_SZB);
}
Ejemplo n.º 9
0
/** Call this function whenever a thread is no longer using the memory
 *  [ a1, a2 [, e.g. because of a call to free() or a stack pointer
 *  increase.
 */
void thread_stop_using_mem(const Addr a1, const Addr a2)
{
  DrdThreadId other_user;
  unsigned i;

  /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
  other_user = DRD_INVALID_THREADID;
  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
  {
    Segment* p;
    for (p = s_threadinfo[i].first; p; p = p->next)
    {
      if (other_user == DRD_INVALID_THREADID
          && i != s_drd_running_tid)
      {
        if (UNLIKELY(bm_test_and_clear(p->bm, a1, a2)))
        {
          other_user = i;
        }
        continue;
      }
      bm_clear(p->bm, a1, a2);
    }
  }

  /* If any other thread had accessed memory in [ a1, a2 [, update the */
  /* danger set. */
  if (other_user != DRD_INVALID_THREADID
      && bm_has_any_access(s_danger_set, a1, a2))
  {
    thread_compute_danger_set(&s_danger_set, thread_get_running_tid());
  }
}
Ejemplo n.º 10
0
/** Deallocate the memory that was allocated by rwlock_initialize(). */
static void rwlock_cleanup(struct rwlock_info* p)
{
  struct rwlock_thread_info* q;

  tl_assert(p);

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] rwlock_destroy   0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 p->a1);
  }

  if (rwlock_is_locked(p))
  {
    RwlockErrInfo REI = { p->a1 };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            RwlockErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Destroying locked rwlock",
                            &REI);
  }

  VG_(OSetGen_ResetIter)(p->thread_info);
  for ( ; (q = VG_(OSetGen_Next)(p->thread_info)); q++)
  {
    vc_cleanup(&q->vc);
  }
  VG_(OSetGen_Destroy)(p->thread_info);
}
Ejemplo n.º 11
0
/** Called before pthread_rwlock_init(). */
struct rwlock_info*
rwlock_pre_init(const Addr rwlock, const SizeT size)
{
  struct rwlock_info* p;

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] rwlock_init      %s 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 rwlock);
  }

  p = rwlock_get(rwlock);

  if (p)
  {
    const ThreadId vg_tid = VG_(get_running_tid)();
    RwlockErrInfo REI
      = { p->a1 };
    VG_(maybe_record_error)(vg_tid,
                            RwlockErr,
                            VG_(get_IP)(vg_tid),
                            "Reader-writer lock reinitialization",
                            &REI);
    return p;
  }

  p = rwlock_get_or_allocate(rwlock, size);

  return p;
}
Ejemplo n.º 12
0
/** Called before pthread_cond_init(). */
void cond_pre_init(const Addr cond, const SizeT size)
{
  struct cond_info* p;

  if (s_trace_cond)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] cond_init 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 cond);
  }

  tl_assert(size > 0);

  p = cond_get(cond);

  if (p)
  {
    CondErrInfo cei = { .cond = cond };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            CondErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "initialized twice",
                            &cei);
  }

  p = cond_get_or_allocate(cond, size);
}
Ejemplo n.º 13
0
static void drd_report_race(const Addr addr, const SizeT size,
                            const BmAccessTypeT access_type)
{
  DataRaceErrInfo drei;

  drei.tid  = thread_get_running_tid();
  drei.addr = addr;
  drei.size = size;
  drei.access_type = access_type;
  VG_(maybe_record_error)(VG_(get_running_tid)(),
                          DataRaceErr,
                          VG_(get_IP)(VG_(get_running_tid)()),
                          "Conflicting accesses",
                          &drei);
}
Ejemplo n.º 14
0
VG_REGPARM(2) void drd_trace_store(Addr addr, SizeT size)
{
#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
  /* The assert below has been commented out because of performance reasons.*/
  tl_assert(thread_get_running_tid()
            == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
#endif

  if (running_thread_is_recording()
      && (s_drd_check_stack_accesses || ! thread_address_on_stack(addr))
      && bm_access_store_triggers_conflict(addr, addr + size)
      && ! drd_is_suppressed(addr, addr + size))
  {
    drd_report_race(addr, size, eStore);
  }
}
Ejemplo n.º 15
0
/**
 * Update rwlock_info state when unlocking the pthread_rwlock_t rwlock.
 * Note: this function must be called before pthread_rwlock_unlock() is called,
 * or a race condition is triggered !
 * @return New value of the rwlock recursion count.
 * @param rwlock Pointer to pthread_rwlock_t data structure in the client space.
 * @param tid ThreadId of the thread calling pthread_rwlock_unlock().
 * @param vc Pointer to the current vector clock of thread tid.
 */
void rwlock_pre_unlock(const Addr rwlock)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  const ThreadId vg_tid = VG_(get_running_tid)();
  const VectorClock* const vc = thread_get_vc(drd_tid);
  struct rwlock_info* const p = rwlock_get(rwlock);
  struct rwlock_thread_info* q;

  if (s_trace_rwlock && p != 0)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] rwlock_unlock    0x%lx",
                 vg_tid,
                 drd_tid,
                 rwlock);
  }

  if (p == 0 || ! rwlock_is_locked_by(p, drd_tid))
  {
    RwlockErrInfo REI = { p->a1 };
    VG_(maybe_record_error)(vg_tid,
                            RwlockErr,
                            VG_(get_IP)(vg_tid),
                            "Reader-writer lock not locked by calling thread",
                            &REI);
    return;
  }
  tl_assert(p);
  q = lookup_or_insert_node(p->thread_info, drd_tid);
  tl_assert(q);
  if (q->reader_nesting_count > 0)
    q->reader_nesting_count--;
  else if (q->writer_nesting_count > 0)
    q->writer_nesting_count--;
  else
    tl_assert(False);

  if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
  {
    /* This pthread_rwlock_unlock() call really unlocks the rwlock. Save the */
    /* current vector clock of the thread such that it is available when  */
    /* this rwlock is locked again.                                        */
    vc_assign(&q->vc, vc);

    thread_new_segment(drd_tid);
  }
}
Ejemplo n.º 16
0
/**
 * Update mutex_info state when locking the pthread_mutex_t mutex.
 * Note: this function must be called after pthread_mutex_lock() has been
 * called, or a race condition is triggered !
 */
void mutex_post_lock(const Addr mutex, const Bool took_lock)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  struct mutex_info* p;

  p = mutex_get(mutex);

  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] post_mutex_lock %s 0x%lx rc %d owner %d",
                 VG_(get_running_tid)(),
                 drd_tid,
                 p ? mutex_get_typename(p) : "(?)",
                 mutex,
                 p ? p->recursion_count : 0,
                 p ? p->owner : VG_INVALID_THREADID);
  }

  if (! p || ! took_lock)
     return;

  if (p->recursion_count == 0)
  {
    p->owner = drd_tid;
    s_mutex_lock_count++;
  }
  else if (p->owner != drd_tid)
  {
    VG_(message)(Vg_UserMsg,
                 "The impossible happened: mutex 0x%lx is locked"
                 " simultaneously by two threads (recursion count %d,"
                 " owners %d and %d) !",
                 p->a1, p->recursion_count, p->owner, drd_tid);
    p->owner = drd_tid;
  }
  p->recursion_count++;

  if (p->recursion_count == 1)
  {
     const DrdThreadId last_owner = p->owner;

    if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
      thread_combine_vc2(drd_tid, mutex_get_last_vc(mutex));
    thread_new_segment(drd_tid);
  }
}
Ejemplo n.º 17
0
/** Called before sem_wait(). */
void semaphore_pre_wait(const Addr semaphore)
{
  struct semaphore_info* p;

  p = semaphore_get_or_allocate(semaphore);
  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_pre_wait 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }
  tl_assert(p);
  tl_assert(p->waiters >= 0);
  p->waiters++;
  tl_assert(p->waiters > 0);
}
Ejemplo n.º 18
0
/** Called before pthread_mutex_init(). */
struct mutex_info*
mutex_init(const Addr mutex, const SizeT size, const MutexT mutex_type)
{
  struct mutex_info* p;

  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] mutex_init      %s 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 mutex_type_name(mutex_type),
                 mutex);
  }

  if (mutex_type == mutex_type_invalid_mutex)
  {
    GenericErrInfo GEI;
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            GenericErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Not a mutex",
                            &GEI);
    return 0;
  }

  p = mutex_get(mutex);
  if (p)
  {
    const ThreadId vg_tid = VG_(get_running_tid)();
    MutexErrInfo MEI
      = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(vg_tid,
                            MutexErr,
                            VG_(get_IP)(vg_tid),
                            "Mutex reinitialization",
                            &MEI);
    return p;
  }
  p = mutex_get_or_allocate(mutex, size, mutex_type);

  return p;
}
Ejemplo n.º 19
0
/** Called before sem_post(). */
void semaphore_pre_post(const DrdThreadId tid, const Addr semaphore)
{
  struct semaphore_info* p;

  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_post 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }
  p = semaphore_get_or_allocate(semaphore);
  p->value++;
  if (p->value == 1)
  {
    p->last_sem_post_tid = tid;
    thread_new_segment(tid);
    thread_get_latest_segment(&p->last_sem_post_segment, tid);
  }
}
Ejemplo n.º 20
0
/** Called before sem_init(). */
struct semaphore_info* semaphore_init(const Addr semaphore,
                                      const Word pshared, const UWord value)
{
  struct semaphore_info* p;

  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_init 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }
  if (semaphore_get(semaphore))
  {
    // To do: print an error message that a semaphore is being reinitialized.
  }
  p = semaphore_get_or_allocate(semaphore);
  p->value = value;
  return p;
}
Ejemplo n.º 21
0
/** Called after sem_wait() finished.
 *  @note Do not rely on the value of 'waited' -- some glibc versions do
 *        not set it correctly.
 */
void semaphore_post_wait(const DrdThreadId tid, const Addr semaphore,
                         const Bool waited)
{
  struct semaphore_info* p;

  p = semaphore_get(semaphore);
  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_post_wait 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }
  tl_assert(p->waiters > 0);
  p->waiters--;
  tl_assert(p->waiters >= 0);
  tl_assert(p->value >= 0);
  if (p->value == 0)
  {
    SemaphoreErrInfo sei = { semaphore };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            SemaphoreErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Invalid semaphore",
                            &sei);
    return;
  }
  p->value--;
  tl_assert(p->value >= 0);
  if (p->last_sem_post_tid != tid
      && p->last_sem_post_tid != DRD_INVALID_THREADID)
  {
    tl_assert(p->last_sem_post_segment);
    thread_combine_vc2(tid, &p->last_sem_post_segment->vc);
  }
  thread_new_segment(tid);
}
Ejemplo n.º 22
0
/** Deallocate the memory that was allocated by mutex_initialize(). */
static void mutex_cleanup(struct mutex_info* p)
{
  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] mutex_destroy   %s 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 mutex_get_typename(p),
                 p->a1);
  }

  if (mutex_is_locked(p))
  {
    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            MutexErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Destroying locked mutex",
                            &MEI);
  }

  vc_cleanup(&p->vc);
}
Ejemplo n.º 23
0
VG_REGPARM(2) void drd_trace_store(Addr addr, SizeT size)
{
    Segment* sg;

#if 0
    /* The assert below has been commented out because of performance reasons.*/
    tl_assert(thread_get_running_tid()
              == VgThreadIdToDrdThreadId(VG_(get_running_tid())));
#endif

    if (! running_thread_is_recording())
        return;

    if (range_any_is_traced(addr, size))
    {
        drd_trace_mem_access(addr, size, eStore);
    }
    sg = running_thread_get_segment();
    bm_access_range_store(sg->bm, addr, addr + size);
    if (bm_store_has_conflict_with(thread_get_danger_set(), addr, addr + size))
    {
        drd_report_race(addr, size, eStore);
    }
}
Ejemplo n.º 24
0
/** Called after pthread_cond_destroy(). */
void cond_post_destroy(const Addr cond)
{
  struct cond_info* p;

  if (s_trace_cond)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] cond_destroy 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 cond);
  }

  p = cond_get(cond);
  if (p == 0)
  {
    CondErrInfo cei = { .cond = cond };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            CondErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "not a condition variable",
                            &cei);
    return;
  }
Ejemplo n.º 25
0
/**
 * Update mutex_info state when unlocking the pthread_mutex_t mutex.
 * Note: this function must be called before pthread_mutex_unlock() is called,
 * or a race condition is triggered !
 * @return New value of the mutex recursion count.
 * @param mutex Pointer to pthread_mutex_t data structure in the client space.
 * @param tid ThreadId of the thread calling pthread_mutex_unlock().
 * @param vc Pointer to the current vector clock of thread tid.
 */
void mutex_unlock(const Addr mutex, const MutexT mutex_type)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  const ThreadId vg_tid = VG_(get_running_tid)();
  const VectorClock* const vc = thread_get_vc(drd_tid);
  struct mutex_info* const p = mutex_get(mutex);

  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] mutex_unlock    %s 0x%lx rc %d",
                 vg_tid,
                 drd_tid,
                 p ? mutex_get_typename(p) : "?",
                 mutex,
                 p ? p->recursion_count : 0,
                 p ? p->owner : 0);
  }

  if (p == 0 || mutex_type == mutex_type_invalid_mutex)
  {
     GenericErrInfo GEI;
     VG_(maybe_record_error)(vg_tid,
                             GenericErr,
                             VG_(get_IP)(vg_tid),
                             "Not a mutex",
                             &GEI);
     return;
  }

  if (p->owner == DRD_INVALID_THREADID)
  {
    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(vg_tid,
                            MutexErr,
                            VG_(get_IP)(vg_tid),
                            "Mutex not locked",
                            &MEI);
     return;
  }

  tl_assert(p);
  if (p->mutex_type != mutex_type)
  {
    VG_(message)(Vg_UserMsg, "??? mutex %p: type changed from %d into %d",
	         p->a1, p->mutex_type, mutex_type);
  }
  tl_assert(p->mutex_type == mutex_type);
  tl_assert(p->owner != DRD_INVALID_THREADID);

  if (p->owner != drd_tid || p->recursion_count <= 0)
  {
    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(vg_tid,
                            MutexErr,
                            VG_(get_IP)(vg_tid),
                            "Mutex not locked by calling thread",
                            &MEI);
    return;
  }
  tl_assert(p->recursion_count > 0);
  p->recursion_count--;
  tl_assert(p->recursion_count >= 0);

  if (p->recursion_count == 0)
  {
    /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
    /* current vector clock of the thread such that it is available when  */
    /* this mutex is locked again.                                        */
    vc_assign(&p->vc, vc);

    thread_new_segment(drd_tid);
  }
}
Ejemplo n.º 26
0
static Bool drd_handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
{
  UWord result = 0;
  const DrdThreadId drd_tid = thread_get_running_tid();

  tl_assert(vg_tid == VG_(get_running_tid()));
  tl_assert(VgThreadIdToDrdThreadId(vg_tid) == drd_tid);

  switch (arg[0])
  {
  case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
    result = vg_tid;
    break;

  case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
    result = drd_tid;
    break;

  case VG_USERREQ__DRD_START_SUPPRESSION:
    drd_start_suppression(arg[1], arg[1] + arg[2], "client");
    break;

  case VG_USERREQ__DRD_FINISH_SUPPRESSION:
    drd_finish_suppression(arg[1], arg[1] + arg[2]);
    break;

  case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
  {
    const Addr topmost_sp = highest_used_stack_address(vg_tid);
#if 0
    UInt nframes;
    const UInt n_ips = 20;
    Addr ips[n_ips], sps[n_ips], fps[n_ips];
    Char desc[128];
    unsigned i;

    nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, fps, 0);

    VG_(message)(Vg_DebugMsg, "thread %d/%d", vg_tid, drd_tid);
    for (i = 0; i < nframes; i++)
    {
      VG_(describe_IP)(ips[i], desc, sizeof(desc));
      VG_(message)(Vg_DebugMsg, "[%2d] sp 0x%09lx fp 0x%09lx ip %s",
                   i, sps[i], fps[i], desc);
    }
#endif
    thread_set_stack_startup(drd_tid, VG_(get_SP)(vg_tid));
    drd_start_suppression(topmost_sp, VG_(thread_get_stack_max)(vg_tid),
                          "stack top");
    break;
  }

  case VG_USERREQ__DRD_START_NEW_SEGMENT:
    thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
    break;

  case VG_USERREQ__DRD_START_TRACE_ADDR:
    drd_start_tracing_address_range(arg[1], arg[1] + arg[2]);
    break;

  case VG_USERREQ__DRD_STOP_TRACE_ADDR:
    drd_stop_tracing_address_range(arg[1], arg[1] + arg[2]);
    break;

  case VG_USERREQ__DRD_STOP_RECORDING:
    thread_stop_recording(drd_tid);
    break;

  case VG_USERREQ__DRD_START_RECORDING:
    thread_start_recording(drd_tid);
    break;

  case VG_USERREQ__SET_PTHREADID:
    // pthread_self() returns 0 for programs not linked with libpthread.so.
    if (arg[1] != INVALID_POSIX_THREADID)
      thread_set_pthreadid(drd_tid, arg[1]);
    break;

  case VG_USERREQ__SET_JOINABLE:
    thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]);
    break;

  case VG_USERREQ__POST_THREAD_JOIN:
    tl_assert(arg[1]);
    drd_post_thread_join(drd_tid,
                         PtThreadIdToDrdThreadId(arg[1]));
    break;

  case VG_USERREQ__PRE_THREAD_CANCEL:
    tl_assert(arg[1]);
    drd_pre_thread_cancel(drd_tid, PtThreadIdToDrdThreadId(arg[1]));
    break;

  case VG_USERREQ__POST_THREAD_CANCEL:
    tl_assert(arg[1]);
    drd_post_thread_cancel(drd_tid, PtThreadIdToDrdThreadId(arg[1]), arg[2]);
    break;

  case VG_USERREQ__PRE_MUTEX_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_mutex_init(arg[1], arg[2]);
    break;

  case VG_USERREQ__POST_MUTEX_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_MUTEX_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_MUTEX_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_mutex_destroy(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_MUTEX_LOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_mutex_lock(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__POST_MUTEX_LOCK:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_mutex_lock(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_MUTEX_UNLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_mutex_unlock(arg[1], arg[2]);
    break;

  case VG_USERREQ__POST_MUTEX_UNLOCK:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_spin_init_or_unlock(arg[1]);
    break;

  case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_COND_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_init(arg[1]);
    break;

  case VG_USERREQ__POST_COND_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_COND_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_COND_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_cond_destroy(arg[1]);
    break;

  case VG_USERREQ__PRE_COND_WAIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_wait(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__POST_COND_WAIT:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_cond_wait(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__PRE_COND_SIGNAL:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_signal(arg[1]);
    break;

  case VG_USERREQ__POST_COND_SIGNAL:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_COND_BROADCAST:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_broadcast(arg[1]);
    break;

  case VG_USERREQ__POST_COND_BROADCAST:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_SEM_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_semaphore_init(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__POST_SEM_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_SEM_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_SEM_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_semaphore_destroy(arg[1]);
    break;

  case VG_USERREQ__PRE_SEM_WAIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_semaphore_pre_wait(drd_tid, arg[1]);
    break;

  case VG_USERREQ__POST_SEM_WAIT:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_semaphore_post_wait(drd_tid, arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_SEM_POST:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_semaphore_pre_post(drd_tid, arg[1]);
    break;

  case VG_USERREQ__POST_SEM_POST:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_semaphore_post_post(drd_tid, arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_BARRIER_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_barrier_init(arg[1], arg[2], arg[3], arg[4]);
    break;

  case VG_USERREQ__POST_BARRIER_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_BARRIER_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_BARRIER_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_barrier_destroy(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_BARRIER_WAIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_barrier_pre_wait(drd_tid, arg[1], arg[2]);
    break;

  case VG_USERREQ__POST_BARRIER_WAIT:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_barrier_post_wait(drd_tid, arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__PRE_RWLOCK_INIT:
    rwlock_pre_init(arg[1]);
    break;

  case VG_USERREQ__POST_RWLOCK_DESTROY:
    rwlock_post_destroy(arg[1]);
    break;

  case VG_USERREQ__PRE_RWLOCK_RDLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      rwlock_pre_rdlock(arg[1]);
    break;

  case VG_USERREQ__POST_RWLOCK_RDLOCK:
    if (thread_leave_synchr(drd_tid) == 0)
      rwlock_post_rdlock(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_RWLOCK_WRLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      rwlock_pre_wrlock(arg[1]);
    break;

  case VG_USERREQ__POST_RWLOCK_WRLOCK:
    if (thread_leave_synchr(drd_tid) == 0)
      rwlock_post_wrlock(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_RWLOCK_UNLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      rwlock_pre_unlock(arg[1]);
    break;
      
  case VG_USERREQ__POST_RWLOCK_UNLOCK:
    thread_leave_synchr(drd_tid);
    break;

  default:
    VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
                 arg[0], arg[1]);
    tl_assert(0);
    return False;
  }

  *ret = result;
  return True;
}
Ejemplo n.º 27
0
static Bool drd_handle_client_request(ThreadId tid, UWord* arg, UWord* ret)
{
   UWord result = 0;

   switch (arg[0])
   {
   case VG_USERREQ__GET_THREAD_SELF:
      result = tid;
      break;

   case VG_USERREQ__SET_THREAD_NAME:
      thread_set_name_fmt(VgThreadIdToDrdThreadId(VG_(get_running_tid)()),
                          (char*)arg[1], arg[2]);
      break;

   case VG_USERREQ__DRD_START_SUPPRESSION:
      drd_start_suppression(arg[1], arg[1] + arg[2], "client");
      break;

   case VG_USERREQ__DRD_FINISH_SUPPRESSION:
      drd_finish_suppression(arg[1], arg[1] + arg[2]);
      break;

   case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
      thread_set_stack_startup(thread_get_running_tid(),
                               VG_(get_SP)(VG_(get_running_tid)()));
      break;

   case VG_USERREQ__DRD_START_NEW_SEGMENT:
      thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__DRD_START_RECORDING:
      thread_start_recording(PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__DRD_STOP_RECORDING:
      thread_stop_recording(PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__SET_PTHREADID:
      thread_set_pthreadid(thread_get_running_tid(), arg[1]);
      break;

   case VG_USERREQ__SET_JOINABLE:
      thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]);
      break;

   case VG_USERREQ__POST_THREAD_JOIN:
      tl_assert(arg[1]);
      drd_post_thread_join(thread_get_running_tid(),
                           PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__PRE_MUTEX_INIT:
      drd_pre_mutex_init(arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__POST_MUTEX_DESTROY:
      drd_post_mutex_destroy(arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_PTHREAD_MUTEX_LOCK:
      drd_pre_mutex_lock(thread_get_running_tid(), arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__POST_PTHREAD_MUTEX_LOCK:
      drd_post_mutex_lock(thread_get_running_tid(), arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__PRE_PTHREAD_MUTEX_UNLOCK:
      drd_pre_mutex_unlock(thread_get_running_tid(), arg[1], arg[3]);
      break;

   case VG_USERREQ__SPIN_INIT_OR_UNLOCK:
      drd_spin_init_or_unlock(arg[1], arg[2]);
      break;

   case VG_USERREQ__POST_PTHREAD_COND_INIT:
      drd_post_cond_init(arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_DESTROY:
      drd_pre_cond_destroy(arg[1]);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_WAIT:
      drd_pre_cond_wait(arg[1]/*cond*/, arg[2]/*cond_size*/, arg[3]/*mutex*/);
      break;

   case VG_USERREQ__POST_PTHREAD_COND_WAIT:
      drd_post_cond_wait(arg[1]/*cond*/, arg[3]/*mutex*/,
                         arg[4]/*mutex_size*/);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_SIGNAL:
      drd_pre_cond_signal(arg[1]);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_BROADCAST:
      drd_pre_cond_broadcast(arg[1]);
      break;

   default:
      VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
                   arg[0], arg[1]);
      tl_assert(0);
      return False;
   }

   *ret = result;
   return True;
}