uint64_t atomic_dec_64_nv(volatile uint64_t *addr) { return (atomic_add_64_nv(addr, -1)); }
static void splat_atomic_work(void *priv) { atomic_priv_t *ap; atomic_op_t op; int i; ap = (atomic_priv_t *)priv; ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC); spin_lock(&ap->ap_lock); op = ap->ap_op; wake_up(&ap->ap_waitq); spin_unlock(&ap->ap_lock); splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d successfully started: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) { /* Periodically sleep to mix up the ordering */ if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) { splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d sleeping: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 100); } switch (op) { case SPLAT_ATOMIC_INC_64: atomic_inc_64(&ap->ap_atomic); break; case SPLAT_ATOMIC_DEC_64: atomic_dec_64(&ap->ap_atomic); break; case SPLAT_ATOMIC_ADD_64: atomic_add_64(&ap->ap_atomic, 3); break; case SPLAT_ATOMIC_SUB_64: atomic_sub_64(&ap->ap_atomic, 3); break; case SPLAT_ATOMIC_ADD_64_NV: atomic_add_64_nv(&ap->ap_atomic, 5); break; case SPLAT_ATOMIC_SUB_64_NV: atomic_sub_64_nv(&ap->ap_atomic, 5); break; default: PANIC("Undefined op %d\n", op); } } atomic_inc_64(&ap->ap_atomic_exited); splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d successfully exited: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); wake_up(&ap->ap_waitq); thread_exit(); }
template<typename T> static T add_nv(T *ptr, T val) { return atomic_add_64_nv(ptr, val); }