int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) { try_init_preload(); debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); __del_lock(__get_lock(rwlock)); return ll_pthread_rwlock_destroy(rwlock); }
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) { /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lock->name = name; }
int __init_srcu_struct(struct srcu_struct *sp, const char *name, struct lock_class_key *key) { /* Don't re-initialize a lock while it is held. */ debug_check_no_locks_freed((void *)sp, sizeof(*sp)); lockdep_init_map(&sp->dep_map, name, key, 0); return init_srcu_struct_fields(sp); }
int __init_srcu_struct(struct srcu_struct *sp, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* Don't re-initialize a lock while it is held. */ debug_check_no_locks_freed((void *)sp, sizeof(*sp)); lockdep_init_map(&sp->dep_map, name, key, 0); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ return init_srcu_struct_fields(sp); }
void debug_mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif lock->magic = lock; }
/* * struct mutex functions */ void __mutex_init(struct mutex *lock, char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif __rt_mutex_init(&lock->lock, name); }
void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->activity = 0; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); }
void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); lockdep_init_map(&rwlock->dep_map, name, key); #endif __rt_mutex_init(&rwlock->lock, name); rwlock->read_depth = 0; }
int pthread_mutex_destroy(pthread_mutex_t *mutex) { try_init_preload(); /* * Let's see if we're releasing a lock that's held. * * TODO: Hook into free() and add that check there as well. */ debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); __del_lock(__get_lock(mutex)); return ll_pthread_mutex_destroy(mutex); }
void fastcall __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); lockdep_init_map(&rwsem->dep_map, name, key, 0); #endif __rt_mutex_init(&rwsem->lock, name); rwsem->read_depth = 0; }
void debug_mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key); #endif lock->owner = NULL; lock->magic = lock; }
/* * Initialize an rwsem: */ void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->count = RWSEM_UNLOCKED_VALUE; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); }
void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; }
/* * initialise the semaphore */ void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->activity = 0; #ifdef CONFIG_BRCM_DEBUG_RWSEM sem->wr_owner = NULL; #endif spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); }
/* * Initialize an sk_lock. * * (We also register the sk_lock with the lock validator.) */ static void inline sock_lock_init(struct sock *sk) { spin_lock_init(&sk->sk_lock.slock); sk->sk_lock.owner = NULL; init_waitqueue_head(&sk->sk_lock.wq); /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); /* * Mark both the sk_lock and the sk_lock.slock as a * per-address-family lock class: */ lockdep_set_class_and_name(&sk->sk_lock.slock, af_family_slock_keys + sk->sk_family, af_family_slock_key_strings[sk->sk_family]); lockdep_init_map(&sk->sk_lock.dep_map, af_family_key_strings[sk->sk_family], af_family_keys + sk->sk_family, 0); }
static inline void slab_free_hook(struct kmem_cache *s, void *x) { kmemleak_free_recursive(x, s->flags); /* * Trouble is that we may no longer disable interupts in the fast path * So in order to make the debug calls that expect irqs to be * disabled we need to disable interrupts temporarily. */ #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) { unsigned long flags; local_irq_save(flags); kmemcheck_slab_free(s, x, s->objsize); debug_check_no_locks_freed(x, s->objsize); local_irq_restore(flags); } #endif if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(x, s->objsize); }