void VG_(init_Threads)(void) { ThreadId tid; for (tid = 1; tid < VG_N_THREADS; tid++) { INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].status, sizeof(VG_(threads)[tid].status), "")); INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].os_state.exitcode, sizeof(VG_(threads)[tid].os_state.exitcode), "")); } }
void ML_(sema_init)(vg_sema_t *sema) { HChar buf[2]; Int res, r; r = VG_(pipe)(sema->pipe); vg_assert(r == 0); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->pipe[0] = VG_(safe_fd)(sema->pipe[0]); sema->pipe[1] = VG_(safe_fd)(sema->pipe[1]); if (0) VG_(debugLog)(0,"zz","sema_init: %d %d\n", sema->pipe[0], sema->pipe[1]); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->owner_lwpid = -1; /* create initial token */ sema_char = 'A'; buf[0] = sema_char; buf[1] = 0; sema_char++; INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(sema)); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->owner_lwpid, sizeof(sema->owner_lwpid), "")); res = VG_(write)(sema->pipe[1], buf, 1); vg_assert(res == 1); }
void ML_(sema_init)(vg_sema_t *sema) { HChar buf[2]; Int res, r; r = VG_(pipe)(sema->pipe); vg_assert(r == 0); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->pipe[0] = VG_(safe_fd)(sema->pipe[0]); sema->pipe[1] = VG_(safe_fd)(sema->pipe[1]); if (0) VG_(debugLog)(0,"zz","sema_init: %d %d\n", sema->pipe[0], sema->pipe[1]); vg_assert(sema->pipe[0] != sema->pipe[1]); sema->owner_lwpid = -1; sema->held_as_LL = False; /* create initial token */ sema->sema_char = 'A'; buf[0] = sema->sema_char; buf[1] = 0; sema->sema_char++; //INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(sema)); // disabled the above inner request, seems to give false problems/alarms // with mixing the high level lock logic (the big rwlock) with the low level // locks (here). // All this should be re-done with proper lock module. /* all the below are benign as in any case, such data cannot be used anymore when we have a lock that is read-acquired by multiple threads */ /* The only real mystery is the held_as_LL */ INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->owner_lwpid, sizeof(sema->owner_lwpid), "")); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->sema_char, sizeof(sema->sema_char), "semaphore sema_char inc")); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&sema->held_as_LL, sizeof(sema->held_as_LL), "semaphore sema_char inc")); res = VG_(write)(sema->pipe[1], buf, 1); vg_assert(res == 1); }
void VG_(init_Threads)(void) { ThreadId tid; UChar *addr, *aligned_addr; addr = VG_(malloc)("init_Threads", VG_N_THREADS * sizeof VG_(threads)[0] + LibVEX_GUEST_STATE_ALIGN - 1); // Align aligned_addr = addr + (Addr)addr % LibVEX_GUEST_STATE_ALIGN; VG_(threads) = (ThreadState *)aligned_addr; for (tid = 1; tid < VG_N_THREADS; tid++) { INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].status, sizeof(VG_(threads)[tid].status), "")); INNER_REQUEST( ANNOTATE_BENIGN_RACE_SIZED(&VG_(threads)[tid].os_state.exitcode, sizeof(VG_(threads)[tid].os_state.exitcode), "")); } }
static struct sched_lock *create_sched_lock(void) { struct sched_lock *p; p = VG_(malloc)("sched_lock", sizeof(*p)); // The futex syscall requires that a futex takes four bytes. vg_assert(sizeof(p->futex[0]) == 4); VG_(memset)(p, 0, sizeof(*p)); INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(p)); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&p->futex, sizeof(p->futex), "")); return p; }
static struct sched_lock *create_sched_lock(void) { struct sched_lock *p; p = VG_(malloc)("sched_lock", sizeof(*p)); if (p) { vg_assert(sizeof(p->futex[0]) == 4); p->head = 0; p->tail = 0; VG_(memset)((void*)p->futex, 0, sizeof(p->futex)); p->owner = 0; } INNER_REQUEST(ANNOTATE_RWLOCK_CREATE(p)); INNER_REQUEST(ANNOTATE_BENIGN_RACE_SIZED(&p->futex, sizeof(p->futex), "")); return p; }