int pa_fdsem_before_poll(pa_fdsem *f) { pa_assert(f); flush(f); if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) return -1; pa_atomic_inc(&f->data->waiting); if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) { pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1); return -1; } return 0; }
void pa_fdsem_wait(pa_fdsem *f) { pa_assert(f); flush(f); if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) return; pa_atomic_inc(&f->data->waiting); while (!pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) { char x[10]; ssize_t r; #ifdef HAVE_SYS_EVENTFD_H if (f->efd >= 0) { uint64_t u; if ((r = pa_read(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) { if (r >= 0 || errno != EINTR) { pa_log_error("Invalid read from eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF"); pa_assert_not_reached(); } continue; } r = (ssize_t) u; } else #endif if ((r = pa_read(f->fds[0], &x, sizeof(x), NULL)) <= 0) { if (r >= 0 || errno != EINTR) { pa_log_error("Invalid read from pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF"); pa_assert_not_reached(); } continue; } pa_atomic_sub(&f->data->in_pipe, (int) r); } pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1); }
int pa_fdsem_try(pa_fdsem *f) { pa_assert(f); flush(f); if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) return 1; return 0; }
int pa_fdsem_after_poll(pa_fdsem *f) { pa_assert(f); pa_assert_se(pa_atomic_dec(&f->data->waiting) >= 1); flush(f); if (pa_atomic_cmpxchg(&f->data->signalled, 1, 0)) return 1; return 0; }
/* Lock free push to linked list stack */ static void stack_push(pa_flist *flist, pa_atomic_t *list, pa_flist_elem *new_elem) { int tag, newindex, next; pa_assert(list); tag = pa_atomic_inc(&flist->current_tag); newindex = new_elem - flist->table; pa_assert(newindex >= 0 && newindex < (int) flist->size); newindex |= (tag << flist->tag_shift) & flist->tag_mask; do { next = pa_atomic_load(list); pa_atomic_store(&new_elem->next, next); } while (!pa_atomic_cmpxchg(list, next, newindex)); }
/* Lock free pop from linked list stack */ static pa_flist_elem *stack_pop(pa_flist *flist, pa_atomic_t *list) { pa_flist_elem *popped; int idx; pa_assert(list); do { idx = pa_atomic_load(list); if (idx < 0) return NULL; popped = &flist->table[idx & flist->index_mask]; } while (!pa_atomic_cmpxchg(list, idx, pa_atomic_load(&popped->next))); return popped; }
unsigned pa_aupdate_write_swap(pa_aupdate *a) { unsigned n; pa_assert(a); for (;;) { n = (unsigned) pa_atomic_load(&a->read_lock); /* If the read counter is > 0 wait; if it is 0 try to swap the lists */ if (COUNTER(n) > 0) pa_semaphore_wait(a->semaphore); else if (pa_atomic_cmpxchg(&a->read_lock, (int) n, (int) (n ^ MSB))) break; } a->swapped = true; return WHICH(n); }
void pa_fdsem_post(pa_fdsem *f) { pa_assert(f); if (pa_atomic_cmpxchg(&f->data->signalled, 0, 1)) { if (pa_atomic_load(&f->data->waiting)) { ssize_t r; char x = 'x'; pa_atomic_inc(&f->data->in_pipe); for (;;) { #ifdef HAVE_SYS_EVENTFD_H if (f->efd >= 0) { uint64_t u = 1; if ((r = pa_write(f->efd, &u, sizeof(u), NULL)) != sizeof(u)) { if (r >= 0 || errno != EINTR) { pa_log_error("Invalid write to eventfd: %s", r < 0 ? pa_cstrerror(errno) : "EOF"); pa_assert_not_reached(); } continue; } } else #endif if ((r = pa_write(f->fds[1], &x, 1, NULL)) != 1) { if (r >= 0 || errno != EINTR) { pa_log_error("Invalid write to pipe: %s", r < 0 ? pa_cstrerror(errno) : "EOF"); pa_assert_not_reached(); } continue; } break; } } } }