void VG_NOTIFY_ON_LOAD(freeres)(Vg_FreeresToRun to_run) { # if !defined(__UCLIBC__) \ && !defined(VGPV_arm_linux_android) \ && !defined(VGPV_x86_linux_android) \ && !defined(VGPV_mips32_linux_android) \ && !defined(VGPV_arm64_linux_android) /* g++ mangled __gnu_cxx::__freeres yields -> _ZN9__gnu_cxx9__freeresEv */ extern void _ZN9__gnu_cxx9__freeresEv(void) __attribute__((weak)); if (((to_run & VG_RUN__GNU_CXX__FREERES) != 0) && (_ZN9__gnu_cxx9__freeresEv != NULL)) { _ZN9__gnu_cxx9__freeresEv(); } # if defined(VGO_linux) /* __libc_freeres() not yet available on Solaris. */ extern void __libc_freeres(void); if ((to_run & VG_RUN__LIBC_FREERES) != 0) { __libc_freeres(); } # endif # endif VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREERES_DONE, 0, 0, 0, 0, 0); /*NOTREACHED*/ *(volatile int *)0 = 'x'; }
static __always_inline int sem_post_intercept(sem_t *sem) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_POST, sem, 0, 0, 0, 0); CALL_FN_W_W(ret, fn, sem); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_POST, sem, ret == 0, 0, 0, 0); return ret; }
// QMutex::unlock() -- _ZN6QMutex6unlockEv QT4CORE_FUNC(void, _ZN6QMutex6unlockEv, void* mutex) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_UNLOCK, mutex, mutex_type(mutex), 0, 0, 0); CALL_FN_W_W(ret, fn, mutex); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_UNLOCK, mutex, 0, 0, 0, 0); }
static __always_inline void __cxa_guard_abort_release_intercept(void *guard) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_UNLOCK, guard, mutex_type_cxa_guard, 0, 0, 0); CALL_FN_W_W(ret, fn, guard); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_UNLOCK, guard, 0, 0, 0, 0); }
static __always_inline int pthread_rwlock_unlock_intercept(pthread_rwlock_t* rwlock) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_UNLOCK, rwlock, 0, 0, 0, 0); CALL_FN_W_W(ret, fn, rwlock); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_UNLOCK, rwlock, ret == 0, 0, 0, 0); return ret; }
static __always_inline int sem_timedwait_intercept(sem_t *sem, const struct timespec *abs_timeout) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_WAIT, sem, 0, 0, 0, 0); CALL_FN_W_WW(ret, fn, sem, abs_timeout); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_WAIT, sem, ret == 0, 0, 0, 0); return ret; }
static __always_inline int pthread_cancel_intercept(pthread_t pt_thread) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_THREAD_CANCEL, pt_thread, 0, 0, 0, 0); CALL_FN_W_W(ret, fn, pt_thread); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_THREAD_CANCEL, pt_thread, ret==0, 0, 0, 0); return ret; }
// QMutex::tryLock() -- _ZN6QMutex7tryLockEv QT4CORE_FUNC(int, _ZN6QMutex7tryLockEv, void* mutex) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK, mutex, mutex_type(mutex), 1, 0, 0); CALL_FN_W_W(ret, fn, mutex); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK, mutex, ret, 0, 0, 0); return ret; }
static __always_inline int pthread_cond_broadcast_intercept(pthread_cond_t* cond) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_BROADCAST, cond, 0, 0, 0, 0); CALL_FN_W_W(ret, fn, cond); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_BROADCAST, cond, 0, 0, 0, 0); return ret; }
static __always_inline int pthread_spin_destroy_intercept(pthread_spinlock_t *spinlock) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_DESTROY, spinlock, 0, 0, 0, 0); CALL_FN_W_W(ret, fn, spinlock); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_DESTROY, spinlock, mutex_type_spinlock, 0, 0, 0); return ret; }
static __always_inline int sem_init_intercept(sem_t *sem, int pshared, unsigned int value) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_INIT, sem, pshared, value, 0, 0); CALL_FN_W_WWW(ret, fn, sem, pshared, value); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_INIT, sem, 0, 0, 0, 0); return ret; }
static __always_inline int pthread_barrier_destroy_intercept(pthread_barrier_t* barrier) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_BARRIER_DESTROY, barrier, pthread_barrier, 0, 0, 0); CALL_FN_W_W(ret, fn, barrier); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_BARRIER_DESTROY, barrier, pthread_barrier, 0, 0, 0); return ret; }
static __always_inline int pthread_spin_unlock_intercept(pthread_spinlock_t *spinlock) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK, spinlock, mutex_type_spinlock, 0, 0, 0); CALL_FN_W_W(ret, fn, spinlock); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK, spinlock, 0, 0, 0, 0); return ret; }
static __always_inline int pthread_cond_wait_intercept(pthread_cond_t *cond, pthread_mutex_t *mutex) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_WAIT, cond, mutex, DRD_(mutex_type)(mutex), 0, 0); CALL_FN_W_WW(ret, fn, cond, mutex); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_WAIT, cond, mutex, 1, 0, 0); return ret; }
// QMutex::QMutex(RecursionMode) -- _ZN6QMutexC1ENS_13RecursionModeE, QT4CORE_FUNC(void, _ZN6QMutexC1ENS_13RecursionModeE, void* mutex, qt_mutex_mode mode) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_INIT, mutex, qt_to_drd_mutex_type(mode), 0, 0, 0); CALL_FN_W_WW(ret, fn, mutex, mode); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_INIT, mutex, 0, 0, 0, 0); }
static __always_inline int pthread_cond_destroy_intercept(pthread_cond_t* cond) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_DESTROY, cond, 0, 0, 0, 0); CALL_FN_W_W(ret, fn, cond); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_DESTROY, cond, ret==0, 0, 0, 0); return ret; }
static __always_inline int pthread_mutex_unlock_intercept(pthread_mutex_t *mutex) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_UNLOCK, mutex, DRD_(mutex_type)(mutex), 0, 0, 0); CALL_FN_W_W(ret, fn, mutex); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_UNLOCK, mutex, 0, 0, 0, 0); return ret; }
static __always_inline int pthread_cond_init_intercept(pthread_cond_t* cond, const pthread_condattr_t* attr) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_INIT, cond, 0, 0, 0, 0); CALL_FN_W_WW(ret, fn, cond, attr); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_INIT, cond, 0, 0, 0, 0); return ret; }
static __always_inline int pthread_rwlock_init_intercept(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* attr) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_INIT, rwlock, 0, 0, 0, 0); CALL_FN_W_WW(ret, fn, rwlock, attr); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_INIT, rwlock, 0, 0, 0, 0); return ret; }
static __always_inline int pthread_mutex_timedlock_intercept(pthread_mutex_t *mutex, const struct timespec *abs_timeout) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK, mutex, DRD_(mutex_type)(mutex), 0, 0, 0); CALL_FN_W_WW(ret, fn, mutex, abs_timeout); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK, mutex, ret == 0, 0, 0, 0); return ret; }
static __always_inline int pthread_rwlock_timedwrlock_intercept(pthread_rwlock_t* rwlock, const struct timespec *timeout) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_RWLOCK_WRLOCK, rwlock, 0, 0, 0, 0); CALL_FN_W_WW(ret, fn, rwlock, timeout); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_RWLOCK_WRLOCK, rwlock, ret == 0, 0, 0, 0); return ret; }
static __always_inline sem_t* sem_open_intercept(const char *name, int oflag, mode_t mode, unsigned int value) { sem_t *ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_SEM_OPEN, name, oflag, mode, value, 0); CALL_FN_W_WWWW(ret, fn, name, oflag, mode, value); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_SEM_OPEN, ret != SEM_FAILED ? ret : 0, name, oflag, mode, value); return ret; }
static __always_inline int pthread_barrier_wait_intercept(pthread_barrier_t* barrier) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_BARRIER_WAIT, barrier, pthread_barrier, 0, 0, 0); CALL_FN_W_W(ret, fn, barrier); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_BARRIER_WAIT, barrier, pthread_barrier, ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD, ret == PTHREAD_BARRIER_SERIAL_THREAD, 0); return ret; }
static __always_inline int pthread_barrier_init_intercept(pthread_barrier_t* barrier, const pthread_barrierattr_t* attr, unsigned count) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_BARRIER_INIT, barrier, pthread_barrier, count, 0, 0); CALL_FN_W_WWW(ret, fn, barrier, attr, count); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_BARRIER_INIT, barrier, pthread_barrier, 0, 0, 0); return ret; }
void * VG_NOTIFY_ON_LOAD(ifunc_wrapper) (void) { OrigFn fn; Addr result = 0; Addr fnentry; /* Call the original indirect function and get it's result */ VALGRIND_GET_ORIG_FN(fn); CALL_FN_W_v(result, fn); #if defined(VGP_ppc64be_linux) /* ppc64be uses function descriptors, so get the actual function entry address for the client request, but return the function descriptor from this function. result points to the function descriptor, which starts with the function entry. */ fnentry = *(Addr*)result; #else fnentry = result; #endif /* Ask the valgrind core running on the real CPU (as opposed to this code which runs on the emulated CPU) to update the redirection that led to this function. This client request eventually gives control to the function VG_(redir_add_ifunc_target) in m_redir.c */ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ADD_IFUNC_TARGET, fn.nraddr, fnentry, 0, 0, 0); return (void*)result; }
static __always_inline int pthread_mutex_init_intercept(pthread_mutex_t *mutex, const pthread_mutexattr_t* attr) { int ret; OrigFn fn; int mt; VALGRIND_GET_ORIG_FN(fn); mt = PTHREAD_MUTEX_DEFAULT; if (attr) pthread_mutexattr_gettype(attr, &mt); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_INIT, mutex, DRD_(pthread_to_drd_mutex_type)(mt), 0, 0, 0); CALL_FN_W_WW(ret, fn, mutex, attr); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_INIT, mutex, 0, 0, 0, 0); return ret; }
static __always_inline int __cxa_guard_acquire_intercept(void *guard) { int ret; OrigFn fn; VALGRIND_GET_ORIG_FN(fn); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_LOCK, guard, mutex_type_cxa_guard, 0, 0, 0); CALL_FN_W_W(ret, fn, guard); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_LOCK, guard, 1, 0, 0, 0); if (ret == 0) { VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_MUTEX_UNLOCK, guard, mutex_type_cxa_guard, 0, 0, 0); VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_MUTEX_UNLOCK, guard, 0, 0, 0, 0); } return ret; }
void VG_NOTIFY_ON_LOAD(freeres)( void ) { # if !defined(__UCLIBC__) \ && !defined(__ANDROID__) extern void __libc_freeres(void); __libc_freeres(); # endif VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LIBC_FREERES_DONE, 0, 0, 0, 0, 0); /*NOTREACHED*/ *(volatile int *)0 = 'x'; }
void VG_NOTIFY_ON_LOAD(freeres)( void ) { # if !defined(__UCLIBC__) \ && !defined(VGPV_arm_linux_android) && !defined(VGPV_x86_linux_android) \ && !defined(VGPV_mips32_linux_android) && !defined(VGPV_arm64_linux_android) extern void __libc_freeres(void); __libc_freeres(); # endif VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LIBC_FREERES_DONE, 0, 0, 0, 0, 0); /*NOTREACHED*/ *(volatile int *)0 = 'x'; }
void * VG_NOTIFY_ON_LOAD(ifunc_wrapper) (void) { OrigFn fn; Addr result = 0; /* Call the original indirect function and get it's result */ VALGRIND_GET_ORIG_FN(fn); CALL_FN_W_v(result, fn); /* Ask the valgrind core running on the real CPU (as opposed to this code which runs on the emulated CPU) to update the redirection that led to this function. This client request eventually gives control to the function VG_(redir_add_ifunc_target) in m_redir.c */ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ADD_IFUNC_TARGET, fn.nraddr, result, 0, 0, 0); return (void*)result; }