Пример #1
0
// pthread_rwlock_init
PTH_FUNC(int, pthreadZurwlockZuinit, // pthread_rwlock_init
              pthread_rwlock_t *rwl,
              pthread_rwlockattr_t* attr)
{
   int    ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   if (TRACE_PTH_FNS) {
      fprintf(stderr, "<< pthread_rwl_init %p", rwl); fflush(stderr);
   }

   CALL_FN_W_WW(ret, fn, rwl,attr);

   if (ret == 0 /*success*/) {
      DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,
                  pthread_rwlock_t*,rwl);
   } else { 
      DO_PthAPIerror( "pthread_rwlock_init", ret );
   }

   if (TRACE_PTH_FNS) {
      fprintf(stderr, " :: rwl_init -> %d >>\n", ret);
   }
   return ret;
}
Пример #2
0
// pthread_join
PTH_FUNC(int, pthreadZujoin, // pthread_join
              pthread_t thread, void** value_pointer)
{
   int ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   if (TRACE_PTH_FNS) {
      fprintf(stderr, "<< pthread_join wrapper"); fflush(stderr);
   }

   CALL_FN_W_WW(ret, fn, thread,value_pointer);

   /* At least with NPTL as the thread library, this is safe because
      it is guaranteed (by NPTL) that the joiner will completely gone
      before pthread_join (the original) returns.  See email below.*/
   if (ret == 0 /*success*/) {
      DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_JOIN_POST, pthread_t,thread);
   } else { 
      DO_PthAPIerror( "pthread_join", ret );
   }

   if (TRACE_PTH_FNS) {
      fprintf(stderr, " :: pth_join -> %d >>\n", ret);
   }
   return ret;
}
Пример #3
0
// pthread_mutex_timedlock.  Identical logic to pthread_mutex_trylock.
PTH_FUNC(int, pthreadZumutexZutimedlock, // pthread_mutex_timedlock
	 pthread_mutex_t *mutex,
         void* timeout)
{
   int    ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   if (TRACE_PTH_FNS) {
      fprintf(stderr, "<< pthread_mxtimedlock %p %p", mutex, timeout); 
      fflush(stderr);
   }

   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE,
                pthread_mutex_t*,mutex, long,1/*isTryLock-ish*/);

   CALL_FN_W_WW(ret, fn, mutex,timeout);

   /* There's a hole here: libpthread now knows the lock is locked,
      but the tool doesn't, so some other thread could run and detect
      that the lock has been acquired by someone (this thread).  Does
      this matter?  Not sure, but I don't think so. */

   if (ret == 0 /*success*/) {
      DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,
                  pthread_mutex_t*,mutex);
   } else { 
      if (ret != ETIMEDOUT)
         DO_PthAPIerror( "pthread_mutex_timedlock", ret );
   }

   if (TRACE_PTH_FNS) {
      fprintf(stderr, " :: mxtimedlock -> %d >>\n", ret);
   }
   return ret;
}
Пример #4
0
// pthread_mutex_init
PTH_FUNC(int, pthreadZumutexZuinit, // pthread_mutex_init
              pthread_mutex_t *mutex,
              pthread_mutexattr_t* attr)
{
   int    ret;
   long   mbRec;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   if (TRACE_PTH_FNS) {
      fprintf(stderr, "<< pthread_mxinit %p", mutex); fflush(stderr);
   }

   mbRec = 0;
   if (attr) {
      int ty, zzz;
      zzz = pthread_mutexattr_gettype(attr, &ty);
      if (zzz == 0 && ty == PTHREAD_MUTEX_RECURSIVE)
         mbRec = 1;
   }

   CALL_FN_W_WW(ret, fn, mutex,attr);

   if (ret == 0 /*success*/) {
      DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,
                   pthread_mutex_t*,mutex, long,mbRec);
   } else { 
      DO_PthAPIerror( "pthread_mutex_init", ret );
   }

   if (TRACE_PTH_FNS) {
      fprintf(stderr, " :: mxinit -> %d >>\n", ret);
   }
   return ret;
}
Пример #5
0
// TODO: Can we do additional checks?
CUresult I_WRAP_SONAME_FNNAME_ZZ(libcudaZdsoZa, cuArrayGetDescriptor)(CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor, CUarray hArray) {
   OrigFn         fn;
   CUresult       result;
   CUcontext      ctx = NULL;
   cgCtxListType  *ctxNode;
   cgArrListType  *node;
   
   VALGRIND_GET_ORIG_FN(fn);
   cgLock();
   CALL_FN_W_WW(result, fn, pArrayDescriptor, hArray);
   
   // Determine context of current thread ..
   cgGetCtx(&ctx);
   // .. locate the respective ctx node ..
   ctxNode = cgFindCtx(ctx);
   // .. and finally locate the array in the context's list of arrays.
   node = cgFindArr(ctxNode, hArray);
   
   if (result == CUDA_SUCCESS && !node) {
      VALGRIND_PRINTF("cuArrayGetDescriptor returned successfully, but array not found\n");
      VALGRIND_PRINTF_BACKTRACE("   in cudagrind's internal list. Reason: Unknown\n");
   } else if (result != CUDA_SUCCESS && node) {
      VALGRIND_PRINTF("cuArrayGetDescriptor returned with error code: %d,\n", result);
      VALGRIND_PRINTF_BACKTRACE("   but array is found in cudagrind's internal list.\n");
   } else if (result != CUDA_SUCCESS) {
      VALGRIND_PRINTF("cuArrayGetDescriptor returned with error code: %d,\n", result);
      VALGRIND_PRINTF_BACKTRACE("   possible reason: Wrong context or array not previously created.\n");
   }
   
   cgUnlock();
   return result;
}
Пример #6
0
// pthread_cond_wait
PTH_FUNC(int, pthreadZucondZuwaitZAZa, // pthread_cond_wait@*
              pthread_cond_t* cond, pthread_mutex_t* mutex)
{
   int ret;
   OrigFn fn;
   unsigned long mutex_is_valid;

   VALGRIND_GET_ORIG_FN(fn);

   if (TRACE_PTH_FNS) {
      fprintf(stderr, "<< pthread_cond_wait %p %p", cond, mutex);
      fflush(stderr);
   }

   /* Tell the tool a cond-wait is about to happen, so it can check
      for bogus argument values.  In return it tells us whether it
      thinks the mutex is valid or not. */
   DO_CREQ_W_WW(mutex_is_valid,
                _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,
                pthread_cond_t*,cond, pthread_mutex_t*,mutex);
   assert(mutex_is_valid == 1 || mutex_is_valid == 0);

   /* Tell the tool we're about to drop the mutex.  This reflects the
      fact that in a cond_wait, we show up holding the mutex, and the
      call atomically drops the mutex and waits for the cv to be
      signalled. */
   if (mutex_is_valid) {
      DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,
                  pthread_mutex_t*,mutex);
   }

   CALL_FN_W_WW(ret, fn, cond,mutex);

   /* these conditionals look stupid, but compare w/ same logic for
      pthread_cond_timedwait below */
   if (ret == 0 && mutex_is_valid) {
      /* and now we have the mutex again */
      DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,
                  pthread_mutex_t*,mutex);
   }

   if (ret == 0 && mutex_is_valid) {
      DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,
                   pthread_cond_t*,cond, pthread_mutex_t*,mutex);
   }

   if (ret != 0) {
      DO_PthAPIerror( "pthread_cond_wait", ret );
   }

   if (TRACE_PTH_FNS) {
      fprintf(stderr, " cowait -> %d >>\n", ret);
   }

   return ret;
}
Пример #7
0
static __always_inline
int pthread_rwlock_init_intercept(pthread_rwlock_t* rwlock,
                                  const pthread_rwlockattr_t* attr)
{
   int   ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_INIT,
                                   rwlock, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, rwlock, attr);
   return ret;
}
Пример #8
0
UInt I_WRAP_SONAME_FNNAME_ZU(NONE,fn_2) ( UInt a1, UInt a2 )
{
   UInt r;
   void* fn;
   VALGRIND_GET_ORIG_FN(fn);
   printf("fn_2  wrapper pre ( %d, %d )\n", (int)a1, (int)a2);
   CALL_FN_W_WW(r, fn, a1, a2);
   printf("fn_2  wrapper post1 = %d\n", (int)r);
   CALL_FN_v_WW(fn, a1, a2);
   printf("fn_2  wrapper post2 = %d\n", (int)r);
   return r;
}
// pthread_spin_init
PTH_FUNC(int, pthreadZuspinZuinit, // pthread_spin_init
         pthread_spinlock_t *spinlock,
         int pshared)
{
    int ret;
    int res;
    OrigFn fn;
    VALGRIND_GET_ORIG_FN(fn);
    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__SPIN_INIT_OR_UNLOCK,
                               spinlock, mutex_type_spinlock, 0, 0, 0);
    CALL_FN_W_WW(ret, fn, spinlock, pshared);
    return ret;
}
Пример #10
0
static __always_inline
int sem_timedwait_intercept(sem_t *sem, const struct timespec *abs_timeout)
{
   int   ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_WAIT,
                                   sem, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, sem, abs_timeout);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_WAIT,
                                   sem, ret == 0, 0, 0, 0);
   return ret;
}
// QMutex::QMutex(RecursionMode) -- _ZN6QMutexC1ENS_13RecursionModeE,
QT4CORE_FUNC(void, _ZN6QMutexC1ENS_13RecursionModeE,
             void* mutex,
             qt_mutex_mode mode)
{
   int    ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_INIT,
                                   mutex, qt_to_drd_mutex_type(mode), 0, 0, 0);
   CALL_FN_W_WW(ret, fn, mutex, mode);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_INIT,
                                   mutex, 0, 0, 0, 0);
}
Пример #12
0
static __always_inline
int pthread_spin_init_intercept(pthread_spinlock_t *spinlock, int pshared)
{
   int ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK,
                                   spinlock, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, spinlock, pshared);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK,
                                   spinlock, 0, 0, 0, 0);
   return ret;
}
Пример #13
0
GOMP_FUNC(void, gompZubarrierZureinit, // gomp_barrier_reinit
          gomp_barrier_t* barrier, unsigned count)
{
    int    ret;
    int    res;
    OrigFn fn;
    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_BARRIER_INIT,
                               barrier, gomp_barrier, count, 1, 0);
    VALGRIND_GET_ORIG_FN(fn);
    CALL_FN_W_WW(ret, fn, barrier, count);
    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_BARRIER_INIT,
                               barrier, gomp_barrier, 0, 0, 0);
}
Пример #14
0
// pthread_cond_init
PTH_FUNC(int, pthreadZucondZuinitZa, // pthread_cond_init*
         pthread_cond_t* cond,
         const pthread_condattr_t* attr)
{
    int ret;
    int res;
    OrigFn fn;
    VALGRIND_GET_ORIG_FN(fn);
    VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_INIT,
                               cond, 0, 0, 0, 0);
    CALL_FN_W_WW(ret, fn, cond, attr);
    return ret;
}
Пример #15
0
static __always_inline
int pthread_cond_wait_intercept(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
   int   ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_WAIT,
                                   cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
   CALL_FN_W_WW(ret, fn, cond, mutex);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_WAIT,
                                   cond, mutex, 1, 0, 0);
   return ret;
}
Пример #16
0
static __always_inline
int pthread_cond_init_intercept(pthread_cond_t* cond,
                                const pthread_condattr_t* attr)
{
   int ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_INIT,
                                   cond, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, cond, attr);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_INIT,
                                   cond, 0, 0, 0, 0);
   return ret;
}
void* VG_WRAP_FUNCTION_ZZ(dyld, ZuZZN4dyld18fastBindLazySymbolEPP11ImageLoaderm)
     (void** imageLoaderCache, uintptr_t lazyBindingInfoOffset)
{
   void* res;
   OrigFn fn;

   VALGRIND_GET_ORIG_FN(fn);

   ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
   CALL_FN_W_WW(res, fn, imageLoaderCache, lazyBindingInfoOffset);
   ANNOTATE_IGNORE_READS_AND_WRITES_END();

   return res;
}
static __always_inline
int pthread_rwlock_timedwrlock_intercept(pthread_rwlock_t* rwlock,
                                         const struct timespec *timeout)
{
   int   ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_WRLOCK,
                                   rwlock, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, rwlock, timeout);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_WRLOCK,
                                   rwlock, ret == 0, 0, 0, 0);
   return ret;
}
// QMutex::tryLock(int) -- _ZN6QMutex7tryLockEi
QT4CORE_FUNC(int, _ZN6QMutex7tryLockEi,
             void* mutex,
             int timeout_ms)
{
   int    ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
                                   mutex, mutex_type(mutex), 1, 0, 0);
   CALL_FN_W_WW(ret, fn, mutex, timeout_ms);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
                                   mutex, ret, 0, 0, 0);
   return ret;
}
Пример #20
0
long I_WRAP_SONAME_FNNAME_ZU(NONE,foo) (long x, long y)
{

	OrigFn fn;
	VALGRIND_GET_ORIG_FN(fn);

	printf("WRAPPER: foo(%ld, %ld);\n", x, y);

	long result;
	CALL_FN_W_WW(result, fn, x,y);
	printf("WRAPPER: return %ld;\n", result);

	return result;
}
Пример #21
0
// pthread_rwlock_init
PTH_FUNC(int,
         pthreadZurwlockZuinitZa, // pthread_rwlock_init*
         pthread_rwlock_t* rwlock,
         const pthread_rwlockattr_t* attr)
{
   int   ret;
   int   res;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_RWLOCK_INIT,
                              rwlock, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, rwlock, attr);
   return ret;
}
Пример #22
0
// sem_timedwait
PTH_FUNC(int, semZutimedwait, // sem_timedwait
         sem_t *sem, const struct timespec *abs_timeout)
{
   int   ret;
   int   res;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_SEM_WAIT,
                              sem, 0, 0, 0, 0);
   CALL_FN_W_WW(ret, fn, sem, abs_timeout);
   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_SEM_WAIT,
                              sem, ret == 0, 0, 0, 0);
   return ret;
}
Пример #23
0
static __always_inline
int pthread_mutex_timedlock_intercept(pthread_mutex_t *mutex,
                                      const struct timespec *abs_timeout)
{
   int   ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK,
                                   mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
   CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK,
                                   mutex, ret == 0, 0, 0, 0);
   return ret;
}
Пример #24
0
// pthread_mutex_timedlock
PTH_FUNC(int, pthreadZumutexZutimedlock, // pthread_mutex_timedlock
         pthread_mutex_t *mutex,
         const struct timespec *abs_timeout)
{
   int   ret;
   int   res;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_MUTEX_LOCK,
                              mutex, DRD_(mutex_type)(mutex), 0, 0, 0);
   CALL_FN_W_WW(ret, fn, mutex, abs_timeout);
   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_LOCK,
                              mutex, ret == 0, 0, 0, 0);
   return ret;
}
Пример #25
0
// pthread_cond_wait
PTH_FUNC(int, pthreadZucondZuwaitZa, // pthread_cond_wait*
         pthread_cond_t *cond,
         pthread_mutex_t *mutex)
{
   int   ret;
   int   res;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_COND_WAIT,
                              cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
   CALL_FN_W_WW(ret, fn, cond, mutex);
   VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_COND_WAIT,
                              cond, mutex, 1, 0, 0);
   return ret;
}
Пример #26
0
// pthread_once
PTH_FUNC(int, pthreadZuonceZa, // pthread_once*
         pthread_once_t *once_control, void (*init_routine)(void))
{
   int ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   /*
    * Ignore any data races triggered by the implementation of pthread_once().
    * Necessary for Darwin. This is not necessary for Linux but doesn't have
    * any known adverse effects.
    */
   DRD_IGNORE_VAR(*once_control);
   CALL_FN_W_WW(ret, fn, once_control, init_routine);
   DRD_STOP_IGNORING_VAR(*once_control);
   return ret;
}
Пример #27
0
// pthread_join
PTH_FUNC(int, pthreadZujoinZa, // pthread_join*
         pthread_t pt_joinee, void **thread_return)
{
   int      ret;
   int      res;
   OrigFn   fn;

   VALGRIND_GET_ORIG_FN(fn);
   CALL_FN_W_WW(ret, fn, pt_joinee, thread_return);
   if (ret == 0)
   {
      VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_THREAD_JOIN,
                                 pt_joinee, 0, 0, 0, 0);
   }
   return ret;
}
Пример #28
0
// pthread_mutex_init
PTH_FUNC(int, pthreadZumutexZuinit,
         pthread_mutex_t *mutex,
         const pthread_mutexattr_t* attr)
{
  int ret;
  int res;
  OrigFn fn;
  int mt;
  VALGRIND_GET_ORIG_FN(fn);
  mt = PTHREAD_MUTEX_DEFAULT;
  if (attr)
    pthread_mutexattr_gettype(attr, &mt);
  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__PRE_MUTEX_INIT,
                             mutex, pthread_to_drd_mutex_type(mt), 0, 0, 0);
  CALL_FN_W_WW(ret, fn, mutex, attr);
  VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__POST_MUTEX_INIT,
                             mutex, 0, 0, 0, 0);
  return ret;
}
Пример #29
0
static __always_inline
int pthread_once_intercept(pthread_once_t *once_control,
                           void (*init_routine)(void))
{
   int ret;
   OrigFn fn;
   VALGRIND_GET_ORIG_FN(fn);
   /*
    * Ignore any data races triggered by the implementation of pthread_once().
    * Necessary for Darwin. This is not necessary for Linux but doesn't have
    * any known adverse effects.
    */
   DRD_IGNORE_VAR(*once_control);
   ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
   CALL_FN_W_WW(ret, fn, once_control, init_routine);
   ANNOTATE_IGNORE_READS_AND_WRITES_END();
   DRD_STOP_IGNORING_VAR(*once_control);
   return ret;
}
Пример #30
0
static __always_inline
int pthread_mutex_init_intercept(pthread_mutex_t *mutex,
                                 const pthread_mutexattr_t* attr)
{
   int ret;
   OrigFn fn;
   int mt;
   VALGRIND_GET_ORIG_FN(fn);
   mt = PTHREAD_MUTEX_DEFAULT;
   if (attr)
      pthread_mutexattr_gettype(attr, &mt);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_INIT,
                                   mutex, DRD_(pthread_to_drd_mutex_type)(mt),
                                   0, 0, 0);
   CALL_FN_W_WW(ret, fn, mutex, attr);
   VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_INIT,
                                   mutex, 0, 0, 0, 0);
   return ret;
}