//include "linux/dmapool.h" void dma_pool_destroy(struct dma_pool *pool) { #ifdef RULE_ID0029 /* We may destroy either created pool or pool that has releasing * memory. */ ldv_assert(pool_state == 2 || pool_state == 4); ldv_assert(pool == the_pool); #endif pool_state = 1; }
//include "linux/dmapool.h" //"linux/pci.h" - pci_pool_free is alias to dma_pool_free void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr) { #ifdef RULE_ID0029 /* Releasing of memory is possible just when some pool memory * was allocated. */ ldv_assert(pool_state == 3); ldv_assert(pool == the_pool); ldv_assert(vaddr == the_qtd); #endif pool_state = 4; }
void module_exit() { void *status; //race pdev = 4; ldv_assert(pdev==4); pthread_join(t1, &status); pthread_mutex_destroy(&mutex); //not a race pdev = 5; ldv_assert(pdev==5); }
//include "linux/dmapool.h" //"linux/pci.h" - pci_pool_alloc is alias to dma_pool_alloc void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { //void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle) { #ifdef RULE_ID0029 /* Allocation of memory is possible in either created pool or * already allocated pool. */ ldv_assert(pool_state == 2 || pool_state == 3); ldv_assert(pool == the_pool); #endif the_qtd = ldv_undef_ptr(); if(the_qtd) pool_state = 3; return the_qtd; }
int module_init() { pthread_mutex_init(&mutex, NULL); //not a race pdev = 1; ldv_assert(pdev==1); if(__VERIFIER_nondet_int()) { pthread_create(&t1, NULL, thread_usb, NULL); return 0; } //not a race pdev = 3; ldv_assert(pdev==3); pthread_mutex_destroy(&mutex); return -1; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='mutex_lock_killable') Check that the mutex wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ int __must_check mutex_lock_killable(struct mutex *lock) { int nondetermined; /* LDV_COMMENT_ASSERT This must not be done in interrupting*/ //ldv_assert(LDV_IN_INTERRUPT == 1); /* LDV_COMMENT_ASSERT Mutex must be unlocked*/ ldv_assert(ldv_mutex == 1); /* LDV_COMMENT_OTHER Construct the nondetermined result*/ nondetermined = ldv_undef_int(); /* LDV_COMMENT_ASSERT Nondeterministically lock the mutex*/ if (nondetermined) { /* LDV_COMMENT_CHANGE_STATE Lock the mutex*/ ldv_mutex = 2; /* LDV_COMMENT_RETURN Finish with success*/ return 0; } else { /* LDV_COMMENT_RETURN Finish with the fail. The mutex is keeped unlocked*/ return -EINTR; } }
void drm_gem_object_unreference(struct drm_gem_object *obj) { if(obj == NULL) return; ldv_assert(ldv_lock==1); //kref_put(&obj->refcount, drm_gem_object_free); }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock') Releases the lock and checks that lock was acquired before*/ void ldv_spin_unlock(spinlock_t *lock) { /* LDV_COMMENT_ASSERT Lock should be in a locked state*/ ldv_assert(ldv_lock!=1); /* LDV_COMMENT_CHANGE_STATE Goto free state*/ ldv_lock=1; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_irqrestore') Releases the lock and checks that lock was acquired before*/ void ldv_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { /* LDV_COMMENT_ASSERT Lock should be in a locked state*/ ldv_assert(ldv_lock!=1); /* LDV_COMMENT_CHANGE_STATE Goto free state*/ ldv_lock=1; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='mutex_unlock') Check that the mutex was locked and unlock it*/ void mutex_unlock_TEMPLATE(struct mutex *lock) { /* LDV_COMMENT_ASSERT Mutex must be locked*/ ldv_assert(ldv_mutex_TEMPLATE == 2); /* LDV_COMMENT_CHANGE_STATE Unlock the mutex*/ ldv_mutex_TEMPLATE = 1; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='mutex_lock') Check that the mutex was not locked and lock it*/ void mutex_lock(struct mutex *lock) { /* LDV_COMMENT_ASSERT This must not be done in interrupting*/ //ldv_assert(LDV_IN_INTERRUPT == 1); /* LDV_COMMENT_ASSERT Mutex must be unlocked*/ ldv_assert(ldv_mutex == 1); /* LDV_COMMENT_CHANGE_STATE Lock the mutex*/ ldv_mutex = 2; }
int mutex_trylock(struct mutex *lock) { int ret = ldv_nondet_int(); if(ret) { ldv_assert(ldv_lock==0); ldv_lock=1; return 1; } return 0; }
//include "linux/dmapool.h" //"linux/pci.h" - pci_pool_create is alias to dma_pool_create struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { #ifdef RULE_ID0029 /* To create pool we need its state must be absent or destroyed. */ ldv_assert(pool_state == 1); #endif the_pool = ldv_undef_ptr(); if(the_pool) pool_state = 2; return the_pool; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irqsave') Acquires the lock and checks for double spin lock*/ unsigned long ldv_spin_lock_irqsave(spinlock_t *lock) { unsigned long res; /* LDV_COMMENT_ASSERT Lock should be in a free state*/ ldv_assert(ldv_lock==1); /* LDV_COMMENT_CHANGE_STATE Goto locked state*/ ldv_lock=2; /* LDV_COMMENT_OTHER Construct an arbitrary result*/ res = ldv_undef_ulong(); /* LDV_COMMENT_RETURN Return arbitrary values of irqs*/ return res; }
//[[thread usb]] void *thread_usb(void *arg) { ldv_usb_state = 0; int probe_ret; while(1) { switch(__VERIFIER_nondet_int()) { case 0: if(ldv_usb_state==0) { probe_ret = ath_ahb_probe(); if(probe_ret!=0) goto exit_thread_usb; ldv_usb_state = 1; //race pdev = 7; ldv_assert(pdev==7); } break; case 1: if(ldv_usb_state==1) { ath_ahb_disconnect(); ldv_usb_state=0; //not a race pdev = 8; ldv_assert(pdev==8); } break; case 2: if(ldv_usb_state==0) { goto exit_thread_usb; } break; } } exit_thread_usb: //not a race pdev = 9; ldv_assert(pdev==9); return 0; }
int ldv_mutex_lock_interruptible_fw_lock(struct mutex *lock) { int nondetermined; ldv_assert(ldv_mutex_fw_lock == 1); nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_fw_lock = 2; /* LDV_COMMENT_RETURN Finish with success */ return 0; } else { /* LDV_COMMENT_RETURN Finish with fail. Mutex 'lock_of_NOT_ARG_SIGN' is keeped unlocked */ return -4; } }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='atomic_dec_and_mutex_lock') Lock if atomic decrement result is zero */ int atomic_dec_and_mutex_lock_TEMPLATE(atomic_t *cnt, struct mutex *lock) { int atomic_value_after_dec; /* LDV_COMMENT_ASSERT Mutex must be unlocked (since we may lock it in this function) */ ldv_assert(ldv_mutex_TEMPLATE == 1); /* LDV_COMMENT_OTHER Assign the result of atomic decrement */ atomic_value_after_dec = ldv_undef_int(); /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */ if (atomic_value_after_dec == 0) { /* LDV_COMMENT_CHANGE_STATE Lock the mutex, as atomic has decremented to zero*/ ldv_mutex_TEMPLATE = 2; /* LDV_COMMENT_RETURN Return 1 with a locked mutex */ return 1; } /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking the mutex */ return 0; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='mutex_lock_interruptible') Check that the mutex was unlocked and nondeterministically lock it. Return the corresponding error code on fails*/ int mutex_lock_interruptible_TEMPLATE(struct mutex *lock) { int nondetermined; /* LDV_COMMENT_ASSERT Mutex must be unlocked*/ ldv_assert(ldv_mutex_TEMPLATE == 1); /* LDV_COMMENT_OTHER Construct the nondetermined result*/ nondetermined = ldv_undef_int(); /* LDV_COMMENT_ASSERT Nondeterministically lock the mutex*/ if (nondetermined) { /* LDV_COMMENT_CHANGE_STATE Lock the mutex*/ ldv_mutex_TEMPLATE = 2; /* LDV_COMMENT_RETURN Finish with success*/ return 0; } else { /* LDV_COMMENT_RETURN Finish with the fail. The mutex is keeped unlocked*/ return -EINTR; } }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='mutex_trylock') Check that the mutex was not locked and nondeterministically lock it. Return 0 on fails*/ int mutex_trylock_TEMPLATE(struct mutex *lock) { int is_mutex_held_by_another_thread; /* LDV_COMMENT_ASSERT Mutex must be unlocked*/ ldv_assert(ldv_mutex_TEMPLATE == 1); /* LDV_COMMENT_OTHER Construct the nondetermined result*/ is_mutex_held_by_another_thread = ldv_undef_int(); /* LDV_COMMENT_ASSERT Nondeterministically lock the mutex*/ if (is_mutex_held_by_another_thread) { /* LDV_COMMENT_RETURN Finish with fail*/ return 0; } else { /* LDV_COMMENT_CHANGE_STATE Lock the mutex*/ ldv_mutex_TEMPLATE = 2; /* LDV_COMMENT_RETURN Finish with success*/ return 1; } }
void ldv_mutex_unlock_fw_lock(struct mutex *lock) { ldv_assert(ldv_mutex_fw_lock == 2); ldv_mutex_fw_lock = 1; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='check_final_state') Check that the mutex is unlocked at the end*/ void ldv_check_final_state_TEMPLATE(void) { /* LDV_COMMENT_ASSERT The mutex must be unlocked at the end*/ ldv_assert(ldv_mutex_TEMPLATE == 1); }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='check_final_state') Checks that all locks were released*/ void ldv_check_final_state(void) { /* LDV_COMMENT_ASSERT Lock should be in a free state*/ ldv_assert(ldv_lock==1); }
void mutex_unlock(struct mutex *lock) { ldv_assert(ldv_lock!=0); ldv_lock=0; }
void mutex_lock(struct mutex *lock) { ldv_assert(ldv_lock==0); ldv_lock=1; }
/* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Checks for usage of the same lock in different contexts.*/ void ldv_check_final_state_TEMPLATE(void) { /* LDV_COMMENT_ASSERT If you use same lock in interrupt context (e.g. interrupt handler) and in process context, then in the latter case interrupts (maybe just one line) should be disabled.*/ ldv_assert( ( ldv_lock_in_interrupt_flag_TEMPLATE == 0 ) || ( ldv_lock_in_process_flag_TEMPLATE == 0 ) ); }
void ath9k_flush(void) { pthread_mutex_lock(&mutex); pdev = 6; ldv_assert(pdev==6); pthread_mutex_unlock(&mutex); }