/* * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg * to avoid race conditions with concurrent updates to cputime. */ static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) { u64 curr_cputime; retry: curr_cputime = atomic64_read(cputime); if (sum_cputime > curr_cputime) { if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) goto retry; } }
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) { struct pvclock_shadow_time shadow; unsigned version; cycle_t ret, offset; u64 last; do { version = pvclock_get_time_values(&shadow, src); barrier(); offset = pvclock_get_nsec_offset(&shadow); ret = shadow.system_timestamp + offset; barrier(); } while (version != src->version); if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) && (shadow.flags & PVCLOCK_TSC_STABLE_BIT)) return ret; /* * Assumption here is that last_value, a global accumulator, always goes * forward. If we are less than that, we should not be much smaller. * We assume there is an error marging we're inside, and then the correction * does not sacrifice accuracy. * * For reads: global may have changed between test and return, * but this means someone else updated poked the clock at a later time. * We just need to make sure we are not seeing a backwards event. * * For updates: last_value = ret is not enough, since two vcpus could be * updating at the same time, and one of them could be slightly behind, * making the assumption that last_value always go forward fail to hold. */ last = atomic64_read(&last_value); do { if (ret < last) return last; last = atomic64_cmpxchg(&last_value, last, ret); } while (unlikely(last != ret)); return ret; }
static __init int test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); INIT(v0); atomic64_add(onestwos, &v); r += onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_add(-one, &v); r += -one; BUG_ON(v.counter != r); INIT(v0); r += onestwos; BUG_ON(atomic64_add_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r += -one; BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_sub(onestwos, &v); r -= onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_sub(-one, &v); r -= -one; BUG_ON(v.counter != r); INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r -= -one; BUG_ON(atomic64_sub_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); r += one; BUG_ON(atomic64_inc_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INIT(v0); r -= one; BUG_ON(atomic64_dec_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_xchg(&v, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); #else #warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol #endif INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_X86 pr_info("passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else pr_info("passed\n"); #endif return 0; }
static __init int test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); INIT(v0); atomic64_add(onestwos, &v); r += onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_add(-one, &v); r += -one; BUG_ON(v.counter != r); INIT(v0); r += onestwos; BUG_ON(atomic64_add_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r += -one; BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_sub(onestwos, &v); r -= onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_sub(-one, &v); r -= -one; BUG_ON(v.counter != r); INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r -= -one; BUG_ON(atomic64_sub_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); r += one; BUG_ON(atomic64_inc_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INIT(v0); r -= one; BUG_ON(atomic64_dec_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_xchg(&v, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); #else #warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above #endif INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_X86 #ifdef CONFIG_DEBUG_PRINTK printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else ; #endif #else #ifdef CONFIG_DEBUG_PRINTK printk(KERN_INFO "atomic64 test passed\n"); #else ; #endif #endif return 0; }