void* qrcu_reader3() { int myidx; /* rcu_read_lock */ while (1) { myidx = idx; if (NONDET) { #ifdef SATABS { __CPROVER_atomic_begin(); #else { __blockattribute__((atomic)) #endif assume(myidx <= 0); assume(ctr1>0); ctr1++; #ifdef SATABS __CPROVER_atomic_end(); } #else } #endif break; } else { if (NONDET) { #ifdef SATABS { __CPROVER_atomic_begin(); #else { __blockattribute__((atomic)) #endif assume(myidx > 0); assume(ctr2>0); ctr2++; #ifdef SATABS __CPROVER_atomic_end(); } #else } #endif break; } else {} } } readerprogress3 = 1; /*** readerprogress[me] = 1; ***/ readerprogress3 = 2; /*** readerprogress[me] = 2 ***/ /* rcu_read_unlock */ #ifdef SATABS { __CPROVER_atomic_begin(); #else { __blockattribute__((atomic)) #endif if (myidx <= 0) { ctr1--; } // use ctr1 else { ctr2--; } // use ctr2 #ifdef SATABS __CPROVER_atomic_end(); } #else } #endif }
int rw_thread() { __CPROVER_atomic_begin(); __CPROVER_assume(mtx == 0); __assume_dummy=0; mtx = 1; __CPROVER_atomic_end(); __CPROVER_atomic_begin(); assert(state != 3); __CPROVER_atomic_end(); __CPROVER_atomic_begin(); mtx = 0; __CPROVER_atomic_end(); __rw_thread_finished = 1; }
void *thread1() { int a; __CPROVER_atomic_begin(); __CPROVER_assume(mx==0); mx=1; __CPROVER_atomic_end(); a = x; __CPROVER_atomic_begin(); __CPROVER_assume(my==0); my=1; __CPROVER_atomic_end(); y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; my = 0; a = a + 1; __CPROVER_atomic_begin(); __CPROVER_assume(my==0); my=1; __CPROVER_atomic_end(); y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; y = y + a; my = 0; x = x + x + a; mx=0; assert(x!=47); }
inline int pthread_mutex_trylock(pthread_mutex_t *mutex) { __CPROVER_HIDE:; int return_value; __CPROVER_atomic_begin(); __CPROVER_assert(*((__CPROVER_mutex_t *)mutex)!=-1, "mutex not initialised or destroyed"); if(*((__CPROVER_mutex_t *)mutex)==1) { // failed return_value=1; } else { // ok return_value=0; *((__CPROVER_mutex_t *)mutex)=1; } __CPROVER_atomic_end(); __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", "WWcumul", "RRcumul", "RWcumul", "WRcumul"); return return_value; }
int pthread_create( pthread_t *thread, const pthread_attr_t *attr, void * (*start_routine)(void *), void *arg) { __CPROVER_HIDE:; unsigned long this_thread_id; __CPROVER_atomic_begin(); this_thread_id=++__CPROVER_next_thread_id; __CPROVER_atomic_end(); if(thread) { #ifdef __APPLE__ // pthread_t is a pointer type on the Mac *thread=(pthread_t)this_thread_id; #else *thread=this_thread_id; #endif } if(attr) (void)*attr; __actual_thread_spawn(start_routine, arg, this_thread_id); return 0; }
inline int pthread_mutex_lock(pthread_mutex_t *mutex) { __CPROVER_HIDE:; #ifdef __CPROVER_CUSTOM_BITVECTOR_ANALYSIS __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-init"), "mutex must be initialized"); __CPROVER_assert(!__CPROVER_get_may(mutex, "mutex-destroyed"), "mutex must not be destroyed"); __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-recursive") || !__CPROVER_get_may(mutex, "mutex-locked"), "attempt to lock non-recurisive locked mutex"); __CPROVER_set_must(mutex, "mutex-locked"); __CPROVER_set_may(mutex, "mutex-locked"); __CPROVER_assert(*((__CPROVER_mutex_t *)mutex)!=-1, "mutex not initialised or destroyed"); #else __CPROVER_atomic_begin(); __CPROVER_assume(!*((__CPROVER_mutex_t *)mutex)); *((__CPROVER_mutex_t *)mutex)=1; __CPROVER_atomic_end(); __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", "WWcumul", "RRcumul", "RWcumul", "WRcumul"); #endif return 0; // we never fail }
void* foo(void *arg) { __CPROVER_atomic_begin(); ++i; __CPROVER_atomic_end(); return 0; }
unsigned NonblockingCounter__decrement__01() { unsigned dec_v; __CPROVER_atomic_begin(); if(value == 0) { #ifdef USE_BRANCHING_ASSUMES __CPROVER_assume(value == 0); #endif __CPROVER_atomic_end(); return 0u-1; /*decrement failed, return max*/ }else{ #ifdef USE_BRANCHING_ASSUMES __CPROVER_assume(!(value == 0)); #endif dec_v = value; value = dec_v - 1; dec_flag = 1; /*set flag*/ __CPROVER_atomic_end(); assert(inc_flag || value < dec_v); return dec_v - 1; } }
inline int pthread_mutex_trylock(pthread_mutex_t *mutex) { __CPROVER_HIDE:; int return_value; __CPROVER_atomic_begin(); #ifdef __CPROVER_CUSTOM_BITVECTOR_ANALYSIS __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-init"), "mutex must be initialized"); __CPROVER_assert(*((__CPROVER_mutex_t *)mutex)!=-1, "mutex not initialised or destroyed"); #endif if(*((__CPROVER_mutex_t *)mutex)==1) { // failed return_value=1; } else { // ok return_value=0; *((__CPROVER_mutex_t *)mutex)=1; } __CPROVER_atomic_end(); __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", "WWcumul", "RRcumul", "RWcumul", "WRcumul"); return return_value; }
inline int pthread_mutex_unlock(pthread_mutex_t *mutex) { __CPROVER_HIDE:; #ifdef __CPROVER_CUSTOM_BITVECTOR_ANALYSIS __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-init"), "mutex must be initialized"); __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-locked"), "mutex must be locked"); __CPROVER_assert(!__CPROVER_get_may(mutex, "mutex-destroyed"), "mutex must not be destroyed"); __CPROVER_clear_may(mutex, "mutex-locked"); #else // the fence must be before the unlock __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", "WWcumul", "RRcumul", "RWcumul", "WRcumul"); __CPROVER_atomic_begin(); __CPROVER_assert(*((__CPROVER_mutex_t *)mutex)==1, "must hold lock upon unlock"); *((__CPROVER_mutex_t *)mutex)=0; __CPROVER_atomic_end(); #endif return 0; // we never fail }
ssize_t write(int fildes, const void *buf, size_t nbyte) { __CPROVER_HIDE:; if((fildes>=0 && fildes<=2) || fildes < __CPROVER_pipe_offset) { ssize_t retval; __CPROVER_assume(retval>=-1 && retval<=(ssize_t)nbyte); return retval; } int retval=-1; fildes-=__CPROVER_pipe_offset; if(fildes%2==1) --fildes; __CPROVER_atomic_begin(); if(!__CPROVER_pipes[fildes].widowed && sizeof(__CPROVER_pipes[fildes].data) >= __CPROVER_pipes[fildes].next_avail+nbyte) { for(size_t i=0; i<nbyte; ++i) __CPROVER_pipes[fildes].data[i+__CPROVER_pipes[fildes].next_avail]= ((char*)buf)[i]; __CPROVER_pipes[fildes].next_avail+=nbyte; retval=nbyte; } __CPROVER_atomic_end(); return retval; }
inline int pthread_cond_wait( pthread_cond_t *cond, pthread_mutex_t *mutex) { __CPROVER_HIDE: (void)*mutex; #ifdef __CPROVER_CUSTOM_BITVECTOR_ANALYSIS __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-init"), "mutex must be initialized"); __CPROVER_assert(__CPROVER_get_must(mutex, "mutex-locked"), "mutex must be locked"); __CPROVER_assert(!__CPROVER_get_may(mutex, "mutex-destroyed"), "mutex must not be destroyed"); __CPROVER_clear_may(mutex, "mutex-locked"); #endif __CPROVER_atomic_begin(); __CPROVER_assume(*((unsigned *)cond)); (*((unsigned *)cond))--; __CPROVER_atomic_end(); return 0; // we never fail }
unsigned NonblockingCounter__increment() { unsigned v = 0; __CPROVER_atomic_begin(); if(value == 0u-1) { #ifdef USE_BRANCHING_ASSUMES __CPROVER_assume(value == 0u-1); #endif __CPROVER_atomic_end(); return 0; }else{ #ifdef USE_BRANCHING_ASSUMES __CPROVER_assume(!(value == 0u-1)); #endif v = value; value = v + 1; __CPROVER_atomic_end(); assert(value > v); return v + 1; } }
int pipe(int fildes[2]) { __CPROVER_HIDE:; char error; if(error) { errno=error==1 ? EMFILE : ENFILE; return -1; } __CPROVER_atomic_begin(); __CPROVER_assume(__CPROVER_pipe_offset%2==0); __CPROVER_assume(__CPROVER_pipe_offset<=(int)(__CPROVER_pipe_offset+__CPROVER_pipe_count)); fildes[0]=__CPROVER_pipe_offset+__CPROVER_pipe_count; fildes[1]=__CPROVER_pipe_offset+__CPROVER_pipe_count+1; __CPROVER_pipes[__CPROVER_pipe_count].widowed=0; __CPROVER_pipes[__CPROVER_pipe_count].next_avail=0; __CPROVER_pipes[__CPROVER_pipe_count].next_unread=0; __CPROVER_pipe_count+=2; __CPROVER_atomic_end(); __CPROVER_assume(fildes[0]!=0 && fildes[0]!=1 && fildes[0]!=2); __CPROVER_assume(fildes[1]!=0 && fildes[1]!=1 && fildes[1]!=2); return 0; }
int f() { unsigned l; __CPROVER_predicate(first); __CPROVER_predicate(l == input); __CPROVER_predicate(s == l); __CPROVER_predicate(ctr == 0); __CPROVER_predicate(ctr == 1); __CPROVER_predicate(ctr == 2); __CPROVER_predicate(ctr == 3); __CPROVER_predicate(ctr >= 4); l = input; ++ctr; __CPROVER_assume(ctr == 2); __CPROVER_atomic_begin(); if(first) { s = l, first = 0; } else { assert(s == l); } __CPROVER_atomic_end(); }
unsigned NonblockingCounter__increment__01() { unsigned inc_v = 0; __CPROVER_atomic_begin(); if(value == 0u-1) { #ifdef USE_BRANCHING_ASSUMES __CPROVER_assume(value == 0u-1); #endif __CPROVER_atomic_end(); return 0; }else{ #ifdef USE_BRANCHING_ASSUMES __CPROVER_assume(!(value == 0u-1)); #endif inc_v = value; value = inc_v + 1; inc_flag = 1; /*set flag*/ __CPROVER_atomic_end(); assert(dec_flag || value > inc_v); return inc_v + 1; } }
inline int pthread_cond_broadcast( pthread_cond_t *cond) { __CPROVER_HIDE: __CPROVER_atomic_begin(); *((unsigned *)cond)=(unsigned)-1; __CPROVER_atomic_end(); return 0; }
inline int pthread_cond_signal( pthread_cond_t *cond) { __CPROVER_HIDE: __CPROVER_atomic_begin(); (*((unsigned *)cond))++; __CPROVER_atomic_end(); return 0; }
int ioctl_thread() { int old_state; __CPROVER_atomic_begin(); __CPROVER_assume(mtx == 0); __assume_dummy=0; mtx = 1; __CPROVER_atomic_end(); __CPROVER_atomic_begin(); assert(state != 3); __CPROVER_atomic_end(); __CPROVER_atomic_begin(); old_state = state; __CPROVER_atomic_end(); __CPROVER_atomic_begin(); state = 3; __CPROVER_atomic_end(); __CPROVER_atomic_begin(); state = old_state; __CPROVER_atomic_end(); if (nondet_int()) { } else { //noReorderBegin(); __CPROVER_atomic_begin(); assert(((want_mtx == 0) | (sem == 1)) | (mtx == 0)); want_sem = 2; __CPROVER_assume(sem == 1); __assume_dummy=0; sem = 0; want_sem = 0; assert(vm_consistent); sem = 1; __CPROVER_atomic_end(); //noReorderBegin(); } __CPROVER_atomic_begin(); mtx = 0; __CPROVER_atomic_end(); __ioctl_thread_finished = 1; }
inline int pthread_rwlock_trywrlock(pthread_rwlock_t *lock) { __CPROVER_HIDE:; __CPROVER_atomic_begin(); if(*(signed char *)lock) { __CPROVER_atomic_end(); return 1; } (*(signed char *)lock)=2; __CPROVER_atomic_end(); return 0; }
void X__VERIFIER_atomic_assert2(unsigned dec__v) { __CPROVER_atomic_begin(); unsigned dec__v_l=dec__v; unsigned inc_flag_l=inc_flag; unsigned value_l=value; __CPROVER_atomic_end(); __CPROVER_assert(inc_flag_l || value_l < dec__v_l, ""); }
inline int pthread_rwlock_tryrdlock(pthread_rwlock_t *lock) { __CPROVER_HIDE:; __CPROVER_atomic_begin(); if((*(signed char *)lock & 2)!=0) { __CPROVER_atomic_end(); return 1; } (*(signed char *)lock)|=1; __CPROVER_atomic_end(); return 0; }
_Bool inline casTop(int oldVal, int newVal) { _Bool preCond=0; __CPROVER_atomic_begin(); preCond=(top==oldVal); if(preCond) top=newVal; __CPROVER_atomic_end(); return preCond; }
int main() { __CPROVER_ASYNC_1: global=2; __CPROVER_atomic_begin(); global=1; // no interleaving here assert(global==1); __CPROVER_atomic_end(); }
enum lock_t TestAndSet() { enum lock_t oldValue; __CPROVER_atomic_begin(); oldValue = lock; lock = locked; __CPROVER_atomic_end(); return oldValue; }
int f(int n) { int r, p; #ifdef NOBUG __CPROVER_parameter_predicates(); #endif __CPROVER_atomic_begin(); __CPROVER_assume(m==0); m = 1; __CPROVER_atomic_end(); r = seed; do n = rand(); while(n == r); seed = n; __CPROVER_atomic_begin(); __CPROVER_assume(m==1); m = 0; __CPROVER_atomic_end(); p = n % n; assert(p <= 10); return p; }
inline int pthread_rwlock_wrlock(pthread_rwlock_t *lock) { __CPROVER_HIDE:; __CPROVER_atomic_begin(); __CPROVER_assert(*((signed char *)lock)!=-1, "lock not initialised or destroyed"); __CPROVER_assume(!*((signed char *)lock)); *((signed char *)lock)=2; __CPROVER_atomic_end(); return 0; // we never fail }
inline int pthread_cond_wait( pthread_cond_t *cond, pthread_mutex_t *mutex) { __CPROVER_HIDE: pthread_mutex_unlock(mutex); __CPROVER_atomic_begin(); __CPROVER_assume(*((unsigned *)cond)); (*((unsigned *)cond))--; __CPROVER_atomic_end(); pthread_mutex_lock(mutex); return 0; // we never fail }
int pthread_spin_lock(pthread_spinlock_t *lock) { __CPROVER_HIDE:; __CPROVER_atomic_begin(); __CPROVER_assume(!*((unsigned *)lock)); (*((unsigned *)lock))=1; __CPROVER_atomic_end(); __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", "WWcumul", "RRcumul", "RWcumul", "WRcumul"); return 0; }
inline int pthread_mutex_unlock(pthread_mutex_t *mutex) { __CPROVER_HIDE:; // the fence must be before the unlock __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", "WWcumul", "RRcumul", "RWcumul", "WRcumul"); __CPROVER_atomic_begin(); __CPROVER_assert(*((__CPROVER_mutex_t *)mutex)==1, "must hold lock upon unlock"); *((__CPROVER_mutex_t *)mutex)=0; __CPROVER_atomic_end(); return 0; // we never fail }