void ML_(sema_init)(vg_sema_t *sema) { HChar buf[2]; Int res, r; r = VG_(pipe)(sema->pipe); vg_assert(r == 0); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->pipe[0] = VG_(safe_fd)(sema->pipe[0]); sema->pipe[1] = VG_(safe_fd)(sema->pipe[1]); if (0) VG_(debugLog)(0,"zz","sema_init: %d %d\n", sema->pipe[0], sema->pipe[1]); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->owner_lwpid = -1; /* create initial token */ sema_char = 'A'; buf[0] = sema_char; buf[1] = 0; sema_char++; INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(sema)); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->owner_lwpid, sizeof(sema->owner_lwpid), "")); res = VG_(write)(sema->pipe[1], buf, 1); vg_assert(res == 1); }
void VG_(init_Threads)(void) { ThreadId tid; for (tid = 1; tid < VG_N_THREADS; tid++) { INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].status, sizeof(VG_(threads)[tid].status), "")); INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].os_state.exitcode, sizeof(VG_(threads)[tid].os_state.exitcode), "")); } }
static struct sched_lock *create_sched_lock(void) { struct sched_lock *p; p = VG_(malloc)("sched_lock", sizeof(*p)); // The futex syscall requires that a futex takes four bytes. vg_assert(sizeof(p->futex[0]) == 4); VG_(memset)(p, 0, sizeof(*p)); INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(p)); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&p->futex, sizeof(p->futex), "")); return p; }
/* get a token */ void ML_(sema_down)( vg_sema_t *sema, Bool as_LL ) { HChar buf[2]; Int ret; Int lwpid = VG_(gettid)(); vg_assert(sema->owner_lwpid != lwpid); /* can't have it already */ vg_assert(sema->pipe[0] != sema->pipe[1]); again: buf[0] = buf[1] = 0; ret = VG_(read)(sema->pipe[0], buf, 1); INNER_REQUEST(ANNOTATE_RWLOCK_ACQUIRED(sema, /*is_w*/1)); if (ret != 1) VG_(debugLog)(0, "scheduler", "VG_(sema_down): read returned %d\n", ret); if (ret == -VKI_EINTR) goto again; vg_assert(ret == 1); /* should get exactly 1 token */ vg_assert(buf[0] >= 'A' && buf[0] <= 'Z'); vg_assert(buf[1] == 0); if (sema_char == 'Z') sema_char = 'A'; else sema_char++; sema->owner_lwpid = lwpid; sema->held_as_LL = as_LL; }
/* * Release a ticket lock by incrementing the head of the queue. Only generate * a thread wakeup signal if at least one thread is waiting. If the queue tail * matches the wakeup_ticket value, no threads have to be woken up. * * Note: tail will only be read after head has been incremented since both are * declared as volatile and since the __sync...() functions imply a memory * barrier. */ static void release_sched_lock(struct sched_lock *p, ThreadId tid, SchedLockKind slk) { unsigned wakeup_ticket, futex_value; volatile unsigned *futex; SysRes sres; vg_assert(p->owner != 0); p->owner = 0; INNER_REQUEST(ANNOTATE_RWLOCK_RELEASED(p, /*is_w*/1)); wakeup_ticket = __sync_fetch_and_add(&p->head, 1) + 1; if (p->tail != wakeup_ticket) { futex = &p->futex[wakeup_ticket & TL_FUTEX_MASK]; futex_value = __sync_fetch_and_add(futex, 1); if (s_debug) VG_(printf)("[%d/%d] release: waking up ticket %d (futex[%ld] = %d)" "\n", VG_(getpid)(), VG_(gettid)(), wakeup_ticket, (long)(futex - p->futex), futex_value); sres = VG_(do_syscall3)(__NR_futex, (UWord)futex, VKI_FUTEX_WAKE | VKI_FUTEX_PRIVATE_FLAG, 0x7fffffff); vg_assert(!sr_isError(sres)); } else { if (s_debug) VG_(printf)("[%d/%d] release: no thread is waiting for ticket %d\n", VG_(getpid)(), VG_(gettid)(), wakeup_ticket); } }
/* * Acquire ticket lock. Increment the tail of the queue and use the original * value as the ticket value. Wait until the head of the queue equals the * ticket value. The futex used to wait depends on the ticket value in order * to avoid that all threads get woken up every time a ticket lock is * released. That last effect is sometimes called the "thundering herd" * effect. * * See also Nick Piggin, x86: FIFO ticket spinlocks, Linux kernel mailing list * (http://lkml.org/lkml/2007/11/1/125) for more info. */ static void acquire_sched_lock(struct sched_lock *p, ThreadId tid, SchedLockKind slk) { unsigned ticket, futex_value; volatile unsigned *futex; SysRes sres; ticket = __sync_fetch_and_add(&p->tail, 1); futex = &p->futex[ticket & TL_FUTEX_MASK]; if (s_debug) VG_(printf)("[%d/%d] acquire: ticket %d\n", VG_(getpid)(), VG_(gettid)(), ticket); for (;;) { futex_value = *futex; __sync_synchronize(); if (ticket == p->head) break; if (s_debug) VG_(printf)("[%d/%d] acquire: ticket %d - waiting until" " futex[%ld] != %d\n", VG_(getpid)(), VG_(gettid)(), ticket, (long)(futex - p->futex), futex_value); sres = VG_(do_syscall3)(__NR_futex, (UWord)futex, VKI_FUTEX_WAIT | VKI_FUTEX_PRIVATE_FLAG, futex_value); if (sr_isError(sres) && sres._val != VKI_EAGAIN) { VG_(printf)("futex_wait() returned error code %ld\n", sres._val); vg_assert(False); } } __sync_synchronize(); INNER_REQUEST(ANNOTATE_RWLOCK_ACQUIRED(p, /*is_w*/1)); vg_assert(p->owner == 0); p->owner = VG_(gettid)(); }
void ML_(sema_init)(vg_sema_t *sema) { HChar buf[2]; Int res, r; r = VG_(pipe)(sema->pipe); vg_assert(r == 0); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->pipe[0] = VG_(safe_fd)(sema->pipe[0]); sema->pipe[1] = VG_(safe_fd)(sema->pipe[1]); if (0) VG_(debugLog)(0,"zz","sema_init: %d %d\n", sema->pipe[0], sema->pipe[1]); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->owner_lwpid = -1; sema->held_as_LL = False; /* create initial token */ sema->sema_char = 'A'; buf[0] = sema->sema_char; buf[1] = 0; sema->sema_char++; //INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(sema)); // disabled the above inner request, seems to give false problems/alarms // with mixing the high level lock logic (the big rwlock) with the low level // locks (here). // All this should be re-done with proper lock module. /* all the below are benign as in any case, such data cannot be used anymore when we have a lock that is read-acquired by multiple threads */ /* The only real mystery is the held_as_LL */ INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->owner_lwpid, sizeof(sema->owner_lwpid), "")); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->sema_char, sizeof(sema->sema_char), "semaphore sema_char inc")); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->held_as_LL, sizeof(sema->held_as_LL), "semaphore sema_char inc")); res = VG_(write)(sema->pipe[1], buf, 1); vg_assert(res == 1); }
static struct sched_lock *create_sched_lock(void) { struct sched_lock *p; p = VG_(malloc)("sched_lock", sizeof(*p)); if (p) { vg_assert(sizeof(p->futex[0]) == 4); p->head = 0; p->tail = 0; VG_(memset)((void*)p->futex, 0, sizeof(p->futex)); p->owner = 0; } INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(p)); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&p->futex, sizeof(p->futex), "")); return p; }
void ML_(sema_deinit)(vg_sema_t *sema) { vg_assert(sema->owner_lwpid != -1); /* must be initialised */ vg_assert(sema->pipe[0] != sema->pipe[1]); INNER_REQUEST(ANNOTATE_RWLOCK_DESTROY(sema)); VG_(close)(sema->pipe[0]); VG_(close)(sema->pipe[1]); sema->pipe[0] = sema->pipe[1] = -1; sema->owner_lwpid = -1; }
void VG_(init_Threads)(void) { ThreadId tid; UChar *addr, *aligned_addr; addr = VG_(malloc)("init_Threads", VG_N_THREADS * sizeof VG_(threads)[0] + LibVEX_GUEST_STATE_ALIGN - 1); // Align aligned_addr = addr + (Addr)addr % LibVEX_GUEST_STATE_ALIGN; VG_(threads) = (ThreadState *)aligned_addr; for (tid = 1; tid < VG_N_THREADS; tid++) { INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].status, sizeof(VG_(threads)[tid].status), "")); INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].os_state.exitcode, sizeof(VG_(threads)[tid].os_state.exitcode), "")); } }
/* put token back */ void ML_(sema_up)( vg_sema_t *sema, Bool as_LL ) { Int ret; HChar buf[2]; vg_assert(as_LL == sema->held_as_LL); buf[0] = sema_char; buf[1] = 0; vg_assert(sema->owner_lwpid != -1); /* must be initialised */ vg_assert(sema->pipe[0] != sema->pipe[1]); vg_assert(sema->owner_lwpid == VG_(gettid)()); /* must have it */ sema->owner_lwpid = 0; INNER_REQUEST(ANNOTATE_RWLOCK_RELEASED(sema, /*is_w*/1)); ret = VG_(write)(sema->pipe[1], buf, 1); if (ret != 1) VG_(debugLog)(0, "scheduler", "VG_(sema_up):write returned %d\n", ret); vg_assert(ret == 1); }
static void destroy_sched_lock(struct sched_lock *p) { INNER_REQUEST(ANNOTATE_RWLOCK_DESTROY(p)); VG_(free)(p); }