void __lockfunc _spin_lock_irq(spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); }
int __lockfunc rt_spin_trylock(spinlock_t *lock) { int ret = rt_mutex_trylock(&lock->lock); if (ret) spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) { int ret; *flags = 0; ret = rt_mutex_trylock(&lock->lock); if (ret) spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc _spin_trylock(spinlock_t *lock) { preempt_disable(); if (_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } preempt_enable(); return 0; }
//在单CPU的系统中,进入临界区代码只需要关闭中断就可以了, //而多CPU的系统中,还需要测试自旋锁的状态 void __lockfunc _spin_lock_irq(spinlock_t *lock) { local_irq_disable();//关闭中断 //关闭进程抢占,由于中观或系统调用之后,可能会调度其他进程运行 //(例如当前进程时间片用完,或者有一个拥有更高优先级的进程已经进入了就绪状态), //preempt_disable()关闭调度器的这个功能,从而保证当前进程在执行临界区代码的过程中不会被其他进程干扰 preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); //其实就是调用_raw_spin_lock()函数。#define LOCK_CONTENDED(_lock, try, lock) lock(_lock) LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); }
unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { unsigned long flags; local_irq_save(flags); preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, do_raw_spin_lock_flags, &flags); return flags; }
int __lockfunc _spin_trylock_bh(spinlock_t *lock) { local_bh_disable(); preempt_disable(); if (_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); return 0; }
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) { unsigned long flags; local_irq_save(flags); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); /* * On lockdep we dont want the hand-coded irq-enable of * _raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ #ifdef CONFIG_LOCKDEP LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); #else _raw_spin_lock_flags(lock, &flags); #endif return flags; }
/* try d_walk() in linux/fs/dcache.c */ int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, au_dpages_test test, void *arg) { int err; struct dentry *this_parent; struct list_head *next; struct super_block *sb = root->d_sb; err = 0; write_seqlock(&rename_lock); this_parent = root; spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: if (this_parent->d_sb == sb && !IS_ROOT(this_parent) && au_di(this_parent) && d_count(this_parent) && (!test || test(this_parent, arg))) { err = au_dpages_append(dpages, this_parent, GFP_ATOMIC); if (unlikely(err)) goto out; } while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (d_count(dentry)) { if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } if (dentry->d_sb == sb && au_di(dentry) && (!test || test(dentry, arg))) err = au_dpages_append(dpages, dentry, GFP_ATOMIC); } spin_unlock(&dentry->d_lock); if (unlikely(err)) goto out; } if (this_parent != root) { struct dentry *tmp; struct dentry *child; tmp = this_parent->d_parent; rcu_read_lock(); spin_unlock(&this_parent->d_lock); child = this_parent; this_parent = tmp; spin_lock(&this_parent->d_lock); rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } out: spin_unlock(&this_parent->d_lock); write_sequnlock(&rename_lock); return err; }
void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) { rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); }
void __lockfunc rt_spin_lock(spinlock_t *lock) { rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); }
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); }