void __lockfunc _spin_unlock_bh(spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); }
void __lockfunc _spin_unlock_irq(spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); }
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); }
static inline void context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct mm_struct *mm, *oldmm; prepare_task_switch(rq, prev, next); mm = next->mm; oldmm = prev->active_mm; /* * For paravirt, this is coupled with an exit in switch_to to * combine the page table reload and the switch backend into * one hypercall. */ arch_start_context_switch(prev); if (!mm) { next->active_mm = oldmm; atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next); } else switch_mm(oldmm, mm, next); if (!prev->mm) { prev->active_mm = NULL; rq->prev_mm = oldmm; } /* * Since the runqueue lock will be released by the next * task (which is an invalid locking op but in the case * of the scheduler it's an obvious special-case), so we * do an early lockdep release here: */ #ifndef __ARCH_WANT_UNLOCKED_CTXSW spin_release(&rq->lock.dep_map, 1, _THIS_IP_); #endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); barrier(); /* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack * frame will be invalid. */ finish_task_switch(this_rq(), prev); }
/* try d_walk() in linux/fs/dcache.c */ int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, au_dpages_test test, void *arg) { int err; struct dentry *this_parent; struct list_head *next; struct super_block *sb = root->d_sb; err = 0; write_seqlock(&rename_lock); this_parent = root; spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: if (this_parent->d_sb == sb && !IS_ROOT(this_parent) && au_di(this_parent) && d_count(this_parent) && (!test || test(this_parent, arg))) { err = au_dpages_append(dpages, this_parent, GFP_ATOMIC); if (unlikely(err)) goto out; } while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (d_count(dentry)) { if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } if (dentry->d_sb == sb && au_di(dentry) && (!test || test(dentry, arg))) err = au_dpages_append(dpages, dentry, GFP_ATOMIC); } spin_unlock(&dentry->d_lock); if (unlikely(err)) goto out; } if (this_parent != root) { struct dentry *tmp; struct dentry *child; tmp = this_parent->d_parent; rcu_read_lock(); spin_unlock(&this_parent->d_lock); child = this_parent; this_parent = tmp; spin_lock(&this_parent->d_lock); rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } out: spin_unlock(&this_parent->d_lock); write_sequnlock(&rename_lock); return err; }
void __lockfunc rt_spin_unlock(spinlock_t *lock) { /* NOTE: we always pass in '1' for nested, for simplicity */ spin_release(&lock->dep_map, 1, _RET_IP_); rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); }