コード例 #1
0
ファイル: drd_rwlock.c プロジェクト: svn2github/valgrind-3
/**
 * Update rwlock_info state when locking the pthread_rwlock_t rwlock.
 * Note: this function must be called after pthread_rwlock_wrlock() has been
 * called, or a race condition is triggered !
 */
void rwlock_post_wrlock(const Addr rwlock, const Bool took_lock)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  struct rwlock_info* p;
  struct rwlock_thread_info* q;

  p = rwlock_get(rwlock);

  if (s_trace_rwlock)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] post_rwlock_wrlock 0x%lx",
                 VG_(get_running_tid)(),
                 drd_tid,
                 rwlock);
  }

  if (! p || ! took_lock)
    return;

  q = lookup_or_insert_node(p->thread_info, thread_get_running_tid());
  tl_assert(q->writer_nesting_count == 0);
  q->writer_nesting_count++;
  tl_assert(q->writer_nesting_count == 1);
  rwlock_combine_other_vc(p, drd_tid);
  thread_new_segment(drd_tid);
}
コード例 #2
0
ファイル: drd_main.c プロジェクト: svn2github/valgrind-3
/* after thread drd_joiner joined thread drd_joinee.                         */
void drd_post_thread_join(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
{
  tl_assert(IsValidDrdThreadId(drd_joiner));
  tl_assert(IsValidDrdThreadId(drd_joinee));

  thread_new_segment(drd_joiner);
  thread_combine_vc(drd_joiner, drd_joinee);
  thread_new_segment(drd_joinee);

  if (s_drd_trace_fork_join)
  {
    const ThreadId joiner = DrdThreadIdToVgThreadId(drd_joiner);
    const ThreadId joinee = DrdThreadIdToVgThreadId(drd_joinee);
    const unsigned msg_size = 256;
    char* msg;

    msg = VG_(malloc)("drd.main.dptj.1", msg_size);
    tl_assert(msg);
    VG_(snprintf)(msg, msg_size,
                  "drd_post_thread_join joiner = %d/%d, joinee = %d/%d",
                  joiner, drd_joiner, joinee, drd_joinee);
    if (joiner)
    {
      VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
                    ", new vc: ");
      vc_snprint(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
                 thread_get_vc(drd_joiner));
    }
    VG_(message)(Vg_DebugMsg, "%s", msg);
    VG_(free)(msg);
  }

  if (! s_drd_check_stack_accesses)
  {
    drd_finish_suppression(thread_get_stack_max(drd_joinee)
                           - thread_get_stack_size(drd_joinee),
                           thread_get_stack_max(drd_joinee));
  }
  thread_delete(drd_joinee);
  mutex_thread_delete(drd_joinee);
  cond_thread_delete(drd_joinee);
  semaphore_thread_delete(drd_joinee);
  barrier_thread_delete(drd_joinee);
}
コード例 #3
0
ファイル: drd_rwlock.c プロジェクト: svn2github/valgrind-3
/**
 * Update rwlock_info state when unlocking the pthread_rwlock_t rwlock.
 * Note: this function must be called before pthread_rwlock_unlock() is called,
 * or a race condition is triggered !
 * @return New value of the rwlock recursion count.
 * @param rwlock Pointer to pthread_rwlock_t data structure in the client space.
 * @param tid ThreadId of the thread calling pthread_rwlock_unlock().
 * @param vc Pointer to the current vector clock of thread tid.
 */
void rwlock_pre_unlock(const Addr rwlock)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  const ThreadId vg_tid = VG_(get_running_tid)();
  const VectorClock* const vc = thread_get_vc(drd_tid);
  struct rwlock_info* const p = rwlock_get(rwlock);
  struct rwlock_thread_info* q;

  if (s_trace_rwlock && p != 0)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] rwlock_unlock    0x%lx",
                 vg_tid,
                 drd_tid,
                 rwlock);
  }

  if (p == 0 || ! rwlock_is_locked_by(p, drd_tid))
  {
    RwlockErrInfo REI = { p->a1 };
    VG_(maybe_record_error)(vg_tid,
                            RwlockErr,
                            VG_(get_IP)(vg_tid),
                            "Reader-writer lock not locked by calling thread",
                            &REI);
    return;
  }
  tl_assert(p);
  q = lookup_or_insert_node(p->thread_info, drd_tid);
  tl_assert(q);
  if (q->reader_nesting_count > 0)
    q->reader_nesting_count--;
  else if (q->writer_nesting_count > 0)
    q->writer_nesting_count--;
  else
    tl_assert(False);

  if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
  {
    /* This pthread_rwlock_unlock() call really unlocks the rwlock. Save the */
    /* current vector clock of the thread such that it is available when  */
    /* this rwlock is locked again.                                        */
    vc_assign(&q->vc, vc);

    thread_new_segment(drd_tid);
  }
}
コード例 #4
0
ファイル: drd_mutex.c プロジェクト: svn2github/valgrind-3
/**
 * Update mutex_info state when locking the pthread_mutex_t mutex.
 * Note: this function must be called after pthread_mutex_lock() has been
 * called, or a race condition is triggered !
 */
void mutex_post_lock(const Addr mutex, const Bool took_lock)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  struct mutex_info* p;

  p = mutex_get(mutex);

  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] post_mutex_lock %s 0x%lx rc %d owner %d",
                 VG_(get_running_tid)(),
                 drd_tid,
                 p ? mutex_get_typename(p) : "(?)",
                 mutex,
                 p ? p->recursion_count : 0,
                 p ? p->owner : VG_INVALID_THREADID);
  }

  if (! p || ! took_lock)
     return;

  if (p->recursion_count == 0)
  {
    p->owner = drd_tid;
    s_mutex_lock_count++;
  }
  else if (p->owner != drd_tid)
  {
    VG_(message)(Vg_UserMsg,
                 "The impossible happened: mutex 0x%lx is locked"
                 " simultaneously by two threads (recursion count %d,"
                 " owners %d and %d) !",
                 p->a1, p->recursion_count, p->owner, drd_tid);
    p->owner = drd_tid;
  }
  p->recursion_count++;

  if (p->recursion_count == 1)
  {
     const DrdThreadId last_owner = p->owner;

    if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
      thread_combine_vc2(drd_tid, mutex_get_last_vc(mutex));
    thread_new_segment(drd_tid);
  }
}
コード例 #5
0
ファイル: drd_main.c プロジェクト: svn2github/valgrind-3
static
void drd_pre_thread_create(const ThreadId creator, const ThreadId created)
{
  const DrdThreadId drd_creator = VgThreadIdToDrdThreadId(creator);
  tl_assert(created != VG_INVALID_THREADID);
  thread_pre_create(drd_creator, created);
  if (IsValidDrdThreadId(drd_creator))
  {
    thread_new_segment(drd_creator);
  }
  if (s_drd_trace_fork_join)
  {
    VG_(message)(Vg_DebugMsg,
                 "drd_pre_thread_create creator = %d/%d, created = %d",
                 creator, drd_creator, created);
  }
}
コード例 #6
0
ファイル: drd_semaphore.c プロジェクト: svn2github/valgrind-3
/** Called before sem_post(). */
void semaphore_pre_post(const DrdThreadId tid, const Addr semaphore)
{
  struct semaphore_info* p;

  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_post 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }
  p = semaphore_get_or_allocate(semaphore);
  p->value++;
  if (p->value == 1)
  {
    p->last_sem_post_tid = tid;
    thread_new_segment(tid);
    thread_get_latest_segment(&p->last_sem_post_segment, tid);
  }
}
コード例 #7
0
ファイル: drd_semaphore.c プロジェクト: svn2github/valgrind-3
/** Called after sem_wait() finished.
 *  @note Do not rely on the value of 'waited' -- some glibc versions do
 *        not set it correctly.
 */
void semaphore_post_wait(const DrdThreadId tid, const Addr semaphore,
                         const Bool waited)
{
  struct semaphore_info* p;

  p = semaphore_get(semaphore);
  if (s_trace_semaphore)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] semaphore_post_wait 0x%lx",
                 VG_(get_running_tid)(),
                 thread_get_running_tid(),
                 semaphore);
  }
  tl_assert(p->waiters > 0);
  p->waiters--;
  tl_assert(p->waiters >= 0);
  tl_assert(p->value >= 0);
  if (p->value == 0)
  {
    SemaphoreErrInfo sei = { semaphore };
    VG_(maybe_record_error)(VG_(get_running_tid)(),
                            SemaphoreErr,
                            VG_(get_IP)(VG_(get_running_tid)()),
                            "Invalid semaphore",
                            &sei);
    return;
  }
  p->value--;
  tl_assert(p->value >= 0);
  if (p->last_sem_post_tid != tid
      && p->last_sem_post_tid != DRD_INVALID_THREADID)
  {
    tl_assert(p->last_sem_post_segment);
    thread_combine_vc2(tid, &p->last_sem_post_segment->vc);
  }
  thread_new_segment(tid);
}
コード例 #8
0
static Bool drd_handle_client_request(ThreadId tid, UWord* arg, UWord* ret)
{
   UWord result = 0;

   switch (arg[0])
   {
   case VG_USERREQ__GET_THREAD_SELF:
      result = tid;
      break;

   case VG_USERREQ__SET_THREAD_NAME:
      thread_set_name_fmt(VgThreadIdToDrdThreadId(VG_(get_running_tid)()),
                          (char*)arg[1], arg[2]);
      break;

   case VG_USERREQ__DRD_START_SUPPRESSION:
      drd_start_suppression(arg[1], arg[1] + arg[2], "client");
      break;

   case VG_USERREQ__DRD_FINISH_SUPPRESSION:
      drd_finish_suppression(arg[1], arg[1] + arg[2]);
      break;

   case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
      thread_set_stack_startup(thread_get_running_tid(),
                               VG_(get_SP)(VG_(get_running_tid)()));
      break;

   case VG_USERREQ__DRD_START_NEW_SEGMENT:
      thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__DRD_START_RECORDING:
      thread_start_recording(PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__DRD_STOP_RECORDING:
      thread_stop_recording(PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__SET_PTHREADID:
      thread_set_pthreadid(thread_get_running_tid(), arg[1]);
      break;

   case VG_USERREQ__SET_JOINABLE:
      thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]);
      break;

   case VG_USERREQ__POST_THREAD_JOIN:
      tl_assert(arg[1]);
      drd_post_thread_join(thread_get_running_tid(),
                           PtThreadIdToDrdThreadId(arg[1]));
      break;

   case VG_USERREQ__PRE_MUTEX_INIT:
      drd_pre_mutex_init(arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__POST_MUTEX_DESTROY:
      drd_post_mutex_destroy(arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_PTHREAD_MUTEX_LOCK:
      drd_pre_mutex_lock(thread_get_running_tid(), arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__POST_PTHREAD_MUTEX_LOCK:
      drd_post_mutex_lock(thread_get_running_tid(), arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__PRE_PTHREAD_MUTEX_UNLOCK:
      drd_pre_mutex_unlock(thread_get_running_tid(), arg[1], arg[3]);
      break;

   case VG_USERREQ__SPIN_INIT_OR_UNLOCK:
      drd_spin_init_or_unlock(arg[1], arg[2]);
      break;

   case VG_USERREQ__POST_PTHREAD_COND_INIT:
      drd_post_cond_init(arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_DESTROY:
      drd_pre_cond_destroy(arg[1]);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_WAIT:
      drd_pre_cond_wait(arg[1]/*cond*/, arg[2]/*cond_size*/, arg[3]/*mutex*/);
      break;

   case VG_USERREQ__POST_PTHREAD_COND_WAIT:
      drd_post_cond_wait(arg[1]/*cond*/, arg[3]/*mutex*/,
                         arg[4]/*mutex_size*/);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_SIGNAL:
      drd_pre_cond_signal(arg[1]);
      break;

   case VG_USERREQ__PRE_PTHREAD_COND_BROADCAST:
      drd_pre_cond_broadcast(arg[1]);
      break;

   default:
      VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
                   arg[0], arg[1]);
      tl_assert(0);
      return False;
   }

   *ret = result;
   return True;
}
コード例 #9
0
ファイル: drd_mutex.c プロジェクト: svn2github/valgrind-3
/**
 * Update mutex_info state when unlocking the pthread_mutex_t mutex.
 * Note: this function must be called before pthread_mutex_unlock() is called,
 * or a race condition is triggered !
 * @return New value of the mutex recursion count.
 * @param mutex Pointer to pthread_mutex_t data structure in the client space.
 * @param tid ThreadId of the thread calling pthread_mutex_unlock().
 * @param vc Pointer to the current vector clock of thread tid.
 */
void mutex_unlock(const Addr mutex, const MutexT mutex_type)
{
  const DrdThreadId drd_tid = thread_get_running_tid();
  const ThreadId vg_tid = VG_(get_running_tid)();
  const VectorClock* const vc = thread_get_vc(drd_tid);
  struct mutex_info* const p = mutex_get(mutex);

  if (s_trace_mutex)
  {
    VG_(message)(Vg_UserMsg,
                 "[%d/%d] mutex_unlock    %s 0x%lx rc %d",
                 vg_tid,
                 drd_tid,
                 p ? mutex_get_typename(p) : "?",
                 mutex,
                 p ? p->recursion_count : 0,
                 p ? p->owner : 0);
  }

  if (p == 0 || mutex_type == mutex_type_invalid_mutex)
  {
     GenericErrInfo GEI;
     VG_(maybe_record_error)(vg_tid,
                             GenericErr,
                             VG_(get_IP)(vg_tid),
                             "Not a mutex",
                             &GEI);
     return;
  }

  if (p->owner == DRD_INVALID_THREADID)
  {
    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(vg_tid,
                            MutexErr,
                            VG_(get_IP)(vg_tid),
                            "Mutex not locked",
                            &MEI);
     return;
  }

  tl_assert(p);
  if (p->mutex_type != mutex_type)
  {
    VG_(message)(Vg_UserMsg, "??? mutex %p: type changed from %d into %d",
	         p->a1, p->mutex_type, mutex_type);
  }
  tl_assert(p->mutex_type == mutex_type);
  tl_assert(p->owner != DRD_INVALID_THREADID);

  if (p->owner != drd_tid || p->recursion_count <= 0)
  {
    MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner };
    VG_(maybe_record_error)(vg_tid,
                            MutexErr,
                            VG_(get_IP)(vg_tid),
                            "Mutex not locked by calling thread",
                            &MEI);
    return;
  }
  tl_assert(p->recursion_count > 0);
  p->recursion_count--;
  tl_assert(p->recursion_count >= 0);

  if (p->recursion_count == 0)
  {
    /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
    /* current vector clock of the thread such that it is available when  */
    /* this mutex is locked again.                                        */
    vc_assign(&p->vc, vc);

    thread_new_segment(drd_tid);
  }
}
コード例 #10
0
ファイル: drd_clientreq.c プロジェクト: svn2github/valgrind-3
static Bool drd_handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
{
  UWord result = 0;
  const DrdThreadId drd_tid = thread_get_running_tid();

  tl_assert(vg_tid == VG_(get_running_tid()));
  tl_assert(VgThreadIdToDrdThreadId(vg_tid) == drd_tid);

  switch (arg[0])
  {
  case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
    result = vg_tid;
    break;

  case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
    result = drd_tid;
    break;

  case VG_USERREQ__DRD_START_SUPPRESSION:
    drd_start_suppression(arg[1], arg[1] + arg[2], "client");
    break;

  case VG_USERREQ__DRD_FINISH_SUPPRESSION:
    drd_finish_suppression(arg[1], arg[1] + arg[2]);
    break;

  case VG_USERREQ__DRD_SUPPRESS_CURRENT_STACK:
  {
    const Addr topmost_sp = highest_used_stack_address(vg_tid);
#if 0
    UInt nframes;
    const UInt n_ips = 20;
    Addr ips[n_ips], sps[n_ips], fps[n_ips];
    Char desc[128];
    unsigned i;

    nframes = VG_(get_StackTrace)(vg_tid, ips, n_ips, sps, fps, 0);

    VG_(message)(Vg_DebugMsg, "thread %d/%d", vg_tid, drd_tid);
    for (i = 0; i < nframes; i++)
    {
      VG_(describe_IP)(ips[i], desc, sizeof(desc));
      VG_(message)(Vg_DebugMsg, "[%2d] sp 0x%09lx fp 0x%09lx ip %s",
                   i, sps[i], fps[i], desc);
    }
#endif
    thread_set_stack_startup(drd_tid, VG_(get_SP)(vg_tid));
    drd_start_suppression(topmost_sp, VG_(thread_get_stack_max)(vg_tid),
                          "stack top");
    break;
  }

  case VG_USERREQ__DRD_START_NEW_SEGMENT:
    thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
    break;

  case VG_USERREQ__DRD_START_TRACE_ADDR:
    drd_start_tracing_address_range(arg[1], arg[1] + arg[2]);
    break;

  case VG_USERREQ__DRD_STOP_TRACE_ADDR:
    drd_stop_tracing_address_range(arg[1], arg[1] + arg[2]);
    break;

  case VG_USERREQ__DRD_STOP_RECORDING:
    thread_stop_recording(drd_tid);
    break;

  case VG_USERREQ__DRD_START_RECORDING:
    thread_start_recording(drd_tid);
    break;

  case VG_USERREQ__SET_PTHREADID:
    // pthread_self() returns 0 for programs not linked with libpthread.so.
    if (arg[1] != INVALID_POSIX_THREADID)
      thread_set_pthreadid(drd_tid, arg[1]);
    break;

  case VG_USERREQ__SET_JOINABLE:
    thread_set_joinable(PtThreadIdToDrdThreadId(arg[1]), (Bool)arg[2]);
    break;

  case VG_USERREQ__POST_THREAD_JOIN:
    tl_assert(arg[1]);
    drd_post_thread_join(drd_tid,
                         PtThreadIdToDrdThreadId(arg[1]));
    break;

  case VG_USERREQ__PRE_THREAD_CANCEL:
    tl_assert(arg[1]);
    drd_pre_thread_cancel(drd_tid, PtThreadIdToDrdThreadId(arg[1]));
    break;

  case VG_USERREQ__POST_THREAD_CANCEL:
    tl_assert(arg[1]);
    drd_post_thread_cancel(drd_tid, PtThreadIdToDrdThreadId(arg[1]), arg[2]);
    break;

  case VG_USERREQ__PRE_MUTEX_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_mutex_init(arg[1], arg[2]);
    break;

  case VG_USERREQ__POST_MUTEX_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_MUTEX_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_MUTEX_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_mutex_destroy(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_MUTEX_LOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_mutex_lock(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__POST_MUTEX_LOCK:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_mutex_lock(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_MUTEX_UNLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_mutex_unlock(arg[1], arg[2]);
    break;

  case VG_USERREQ__POST_MUTEX_UNLOCK:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_spin_init_or_unlock(arg[1]);
    break;

  case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_COND_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_init(arg[1]);
    break;

  case VG_USERREQ__POST_COND_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_COND_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_COND_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_cond_destroy(arg[1]);
    break;

  case VG_USERREQ__PRE_COND_WAIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_wait(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__POST_COND_WAIT:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_post_cond_wait(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__PRE_COND_SIGNAL:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_signal(arg[1]);
    break;

  case VG_USERREQ__POST_COND_SIGNAL:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_COND_BROADCAST:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_pre_cond_broadcast(arg[1]);
    break;

  case VG_USERREQ__POST_COND_BROADCAST:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_SEM_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_semaphore_init(arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__POST_SEM_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_SEM_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_SEM_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_semaphore_destroy(arg[1]);
    break;

  case VG_USERREQ__PRE_SEM_WAIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_semaphore_pre_wait(drd_tid, arg[1]);
    break;

  case VG_USERREQ__POST_SEM_WAIT:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_semaphore_post_wait(drd_tid, arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_SEM_POST:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_semaphore_pre_post(drd_tid, arg[1]);
    break;

  case VG_USERREQ__POST_SEM_POST:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_semaphore_post_post(drd_tid, arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_BARRIER_INIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_barrier_init(arg[1], arg[2], arg[3], arg[4]);
    break;

  case VG_USERREQ__POST_BARRIER_INIT:
    thread_leave_synchr(drd_tid);
    break;

  case VG_USERREQ__PRE_BARRIER_DESTROY:
    thread_enter_synchr(drd_tid);
    break;

  case VG_USERREQ__POST_BARRIER_DESTROY:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_barrier_destroy(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_BARRIER_WAIT:
    if (thread_enter_synchr(drd_tid) == 0)
      drd_barrier_pre_wait(drd_tid, arg[1], arg[2]);
    break;

  case VG_USERREQ__POST_BARRIER_WAIT:
    if (thread_leave_synchr(drd_tid) == 0)
      drd_barrier_post_wait(drd_tid, arg[1], arg[2], arg[3]);
    break;

  case VG_USERREQ__PRE_RWLOCK_INIT:
    rwlock_pre_init(arg[1]);
    break;

  case VG_USERREQ__POST_RWLOCK_DESTROY:
    rwlock_post_destroy(arg[1]);
    break;

  case VG_USERREQ__PRE_RWLOCK_RDLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      rwlock_pre_rdlock(arg[1]);
    break;

  case VG_USERREQ__POST_RWLOCK_RDLOCK:
    if (thread_leave_synchr(drd_tid) == 0)
      rwlock_post_rdlock(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_RWLOCK_WRLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      rwlock_pre_wrlock(arg[1]);
    break;

  case VG_USERREQ__POST_RWLOCK_WRLOCK:
    if (thread_leave_synchr(drd_tid) == 0)
      rwlock_post_wrlock(arg[1], arg[2]);
    break;

  case VG_USERREQ__PRE_RWLOCK_UNLOCK:
    if (thread_enter_synchr(drd_tid) == 0)
      rwlock_pre_unlock(arg[1]);
    break;
      
  case VG_USERREQ__POST_RWLOCK_UNLOCK:
    thread_leave_synchr(drd_tid);
    break;

  default:
    VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
                 arg[0], arg[1]);
    tl_assert(0);
    return False;
  }

  *ret = result;
  return True;
}