void task2(void *arg) { os_printf("task2 start....\n"); for (;;) { #if msg_queue_test memset(buffer, 0, 10); msg_get(&my_queue, buffer); os_printf("queue 0 read = %s\n", buffer); memset(buffer, 0, 10); msg_get(&my_queue1, buffer); os_printf("queue 1 read = %s\n", buffer); memset(buffer, 0, 10); msg_get(&my_queue2, buffer); os_printf("queue 2 read = %s\n", buffer); #endif #if sem_test sem_get(&sem); os_printf("task2 sem.count = %d\n", sem.count ); #endif #if mutex_test os_printf("task2 running\n" ); mutex_get(&mutex); os_printf("task2 priority: %d\n", new_task->prio ); mutex_put(&mutex); #endif os_delay(100); }; }
/* Mutex is being destroyed. Fails if: - mutex was not initialized - mutex is locked (?) */ void VG_(tm_mutex_destroy)(ThreadId tid, Addr mutexp) { struct mutex *mx = mutex_get(mutexp); if (mx == NULL) mutex_report(tid, mutexp, MXE_NotExist, "destroying"); else { switch(mx->state) { case MX_Dead: mutex_report(tid, mutexp, MXE_NotInit, "destroying"); break; case MX_Locked: case MX_Unlocking: mutex_report(tid, mutexp, MXE_Locked, "destroying"); VG_(tm_mutex_unlock)(tid, mutexp); break; case MX_Init: case MX_Free: /* OK */ break; } mutex_setstate(tid, mx, MX_Dead); } }
void task1(void *arg) { os_printf("task1 start....\n"); for (;;) { #if msg_queue_test msg_put(&my_queue, &msg1, FIFO); msg_put(&my_queue, &msg2, FIFO); msg_put(&my_queue, &msg3, FIFO); msg_put(&my_queue1, &msg4, FIFO); msg_put(&my_queue1, &msg5, FIFO); msg_put(&my_queue1, &msg6, FIFO); msg_put(&my_queue2, &msg7, FIFO); msg_put(&my_queue2, &msg8, FIFO); msg_put(&my_queue2, &msg9, FIFO); #endif #if sem_test sem_put(&sem); os_printf("task1 sem.count = %d\n", sem.count ); #endif #if mutex_test mutex_get(&mutex); os_printf("task1 priority: %d\n", new_task->prio ); schedule(); os_printf("task1 running\n"); mutex_put(&mutex); #endif os_delay(300); } }
Bool mutex_is_locked_by(const Addr mutex, const DrdThreadId tid) { struct mutex_info* const p = mutex_get(mutex); if (p) { return (p->recursion_count > 0 && p->owner == tid); } return False; }
static void drd_spin_init_or_unlock(const Addr spinlock, const SizeT size) { struct mutex_info* mutex_p = mutex_get(spinlock); if (mutex_p) { mutex_unlock(spinlock, mutex_type_spinlock); } else { mutex_init(spinlock, size, mutex_type_spinlock); } }
/** Called before pthread_mutex_lock() is invoked. If a data structure for * the client-side object was not yet created, do this now. Also check whether * an attempt is made to lock recursively a synchronization object that must * not be locked recursively. */ void mutex_pre_lock(const Addr mutex, const SizeT size, MutexT mutex_type) { struct mutex_info* p; p = mutex_get(mutex); if (s_trace_mutex) { VG_(message)(Vg_UserMsg, "[%d/%d] pre_mutex_lock %s 0x%lx rc %d owner %d", VG_(get_running_tid)(), thread_get_running_tid(), p ? mutex_get_typename(p) : "(?)", mutex, p ? p->recursion_count : 0, p ? p->owner : VG_INVALID_THREADID); } if (mutex_type == mutex_type_invalid_mutex) { GenericErrInfo GEI; VG_(maybe_record_error)(VG_(get_running_tid)(), GenericErr, VG_(get_IP)(VG_(get_running_tid)()), "Not a mutex", &GEI); return; } if (p == 0) { p = mutex_init(mutex, size, mutex_type); } tl_assert(p); if (p->owner == thread_get_running_tid() && p->recursion_count >= 1 && mutex_type != mutex_type_recursive_mutex) { MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner }; VG_(maybe_record_error)(VG_(get_running_tid)(), MutexErr, VG_(get_IP)(VG_(get_running_tid)()), "Recursive locking not allowed", &MEI); } }
/* Mutex at mutexp is initialized. This must be done before any further mutex operations are OK. Fails if: - mutexp already exists (and is locked?) */ void VG_(tm_mutex_init)(ThreadId tid, Addr mutexp) { struct mutex *mx = mutex_get(mutexp); if (mx == NULL) { mx = VG_(OSet_AllocNode)(mutexSet, sizeof(struct mutex)); mx->mutex = mutexp; VG_(OSet_Insert)(mutexSet, mx); } else if (mx->state != MX_Dead) mutex_report(tid, mutexp, MXE_ReInit, "initializing"); mx->owner = VG_INVALID_THREADID; mutex_setstate(tid, mx, MX_Init); mutex_setstate(tid, mx, MX_Free); }
static struct mutex *mutex_check_initialized(ThreadId tid, Addr mutexp, const Char *action) { struct mutex *mx; vg_assert(tid != VG_INVALID_THREADID); if (!mx_is_initialized(mutexp)) { mutex_report(tid, mutexp, MXE_NotInit, action); VG_(tm_mutex_init)(tid, mutexp); } mx = mutex_get(mutexp); vg_assert(mx != NULL); return mx; }
/** * Update mutex_info state when locking the pthread_mutex_t mutex. * Note: this function must be called after pthread_mutex_lock() has been * called, or a race condition is triggered ! */ void mutex_post_lock(const Addr mutex, const Bool took_lock) { const DrdThreadId drd_tid = thread_get_running_tid(); struct mutex_info* p; p = mutex_get(mutex); if (s_trace_mutex) { VG_(message)(Vg_UserMsg, "[%d/%d] post_mutex_lock %s 0x%lx rc %d owner %d", VG_(get_running_tid)(), drd_tid, p ? mutex_get_typename(p) : "(?)", mutex, p ? p->recursion_count : 0, p ? p->owner : VG_INVALID_THREADID); } if (! p || ! took_lock) return; if (p->recursion_count == 0) { p->owner = drd_tid; s_mutex_lock_count++; } else if (p->owner != drd_tid) { VG_(message)(Vg_UserMsg, "The impossible happened: mutex 0x%lx is locked" " simultaneously by two threads (recursion count %d," " owners %d and %d) !", p->a1, p->recursion_count, p->owner, drd_tid); p->owner = drd_tid; } p->recursion_count++; if (p->recursion_count == 1) { const DrdThreadId last_owner = p->owner; if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID) thread_combine_vc2(drd_tid, mutex_get_last_vc(mutex)); thread_new_segment(drd_tid); } }
/** Called after pthread_mutex_destroy(). */ void mutex_post_destroy(const Addr mutex) { struct mutex_info* p; p = mutex_get(mutex); if (p == 0) { GenericErrInfo GEI; VG_(maybe_record_error)(VG_(get_running_tid)(), GenericErr, VG_(get_IP)(VG_(get_running_tid)()), "Not a mutex", &GEI); return; } clientobj_remove(mutex, ClientMutex); }
/** Called before pthread_mutex_init(). */ struct mutex_info* mutex_init(const Addr mutex, const SizeT size, const MutexT mutex_type) { struct mutex_info* p; if (s_trace_mutex) { VG_(message)(Vg_UserMsg, "[%d/%d] mutex_init %s 0x%lx", VG_(get_running_tid)(), thread_get_running_tid(), mutex_type_name(mutex_type), mutex); } if (mutex_type == mutex_type_invalid_mutex) { GenericErrInfo GEI; VG_(maybe_record_error)(VG_(get_running_tid)(), GenericErr, VG_(get_IP)(VG_(get_running_tid)()), "Not a mutex", &GEI); return 0; } p = mutex_get(mutex); if (p) { const ThreadId vg_tid = VG_(get_running_tid)(); MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner }; VG_(maybe_record_error)(vg_tid, MutexErr, VG_(get_IP)(vg_tid), "Mutex reinitialization", &MEI); return p; } p = mutex_get_or_allocate(mutex, size, mutex_type); return p; }
static void mutex_report(ThreadId tid, Addr mutexp, enum mutex_error err, const Char *action) { Char *errstr="?"; struct mutex *mx = mutex_get(mutexp); struct mutex_error_data errdata; switch(err) { case MXE_NotExist: errstr="non-existent"; break; case MXE_NotInit: errstr="uninitialized"; break; case MXE_ReInit: errstr="already initialized"; break; case MXE_NotLocked: errstr="not locked"; break; case MXE_Locked: errstr="locked"; break; case MXE_NotOwner: errstr="unowned"; break; case MXE_Deadlock: errstr="deadlock on"; break; } errdata.err = err; errdata.mx = mx; errdata.action = action; VG_(maybe_record_error)(tid, MutexErr, 0, errstr, &errdata); }
int mutex_get_recursion_count(const Addr mutex) { struct mutex_info* const p = mutex_get(mutex); tl_assert(p); return p->recursion_count; }
const VectorClock* mutex_get_last_vc(const Addr mutex) { struct mutex_info* const p = mutex_get(mutex); return p ? &p->vc : 0; }
static Bool mx_is_locked(Addr mutexp) { const struct mutex *mx = mutex_get(mutexp); return mx && (mx->state == MX_Locked); }
/** * Update mutex_info state when unlocking the pthread_mutex_t mutex. * Note: this function must be called before pthread_mutex_unlock() is called, * or a race condition is triggered ! * @return New value of the mutex recursion count. * @param mutex Pointer to pthread_mutex_t data structure in the client space. * @param tid ThreadId of the thread calling pthread_mutex_unlock(). * @param vc Pointer to the current vector clock of thread tid. */ void mutex_unlock(const Addr mutex, const MutexT mutex_type) { const DrdThreadId drd_tid = thread_get_running_tid(); const ThreadId vg_tid = VG_(get_running_tid)(); const VectorClock* const vc = thread_get_vc(drd_tid); struct mutex_info* const p = mutex_get(mutex); if (s_trace_mutex) { VG_(message)(Vg_UserMsg, "[%d/%d] mutex_unlock %s 0x%lx rc %d", vg_tid, drd_tid, p ? mutex_get_typename(p) : "?", mutex, p ? p->recursion_count : 0, p ? p->owner : 0); } if (p == 0 || mutex_type == mutex_type_invalid_mutex) { GenericErrInfo GEI; VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid), "Not a mutex", &GEI); return; } if (p->owner == DRD_INVALID_THREADID) { MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner }; VG_(maybe_record_error)(vg_tid, MutexErr, VG_(get_IP)(vg_tid), "Mutex not locked", &MEI); return; } tl_assert(p); if (p->mutex_type != mutex_type) { VG_(message)(Vg_UserMsg, "??? mutex %p: type changed from %d into %d", p->a1, p->mutex_type, mutex_type); } tl_assert(p->mutex_type == mutex_type); tl_assert(p->owner != DRD_INVALID_THREADID); if (p->owner != drd_tid || p->recursion_count <= 0) { MutexErrInfo MEI = { p->a1, p->recursion_count, p->owner }; VG_(maybe_record_error)(vg_tid, MutexErr, VG_(get_IP)(vg_tid), "Mutex not locked by calling thread", &MEI); return; } tl_assert(p->recursion_count > 0); p->recursion_count--; tl_assert(p->recursion_count >= 0); if (p->recursion_count == 0) { /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */ /* current vector clock of the thread such that it is available when */ /* this mutex is locked again. */ vc_assign(&p->vc, vc); thread_new_segment(drd_tid); } }
static Bool mx_is_initialized(Addr mutexp) { const struct mutex *mx = mutex_get(mutexp); return mx && mx->state != MX_Dead; }