int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry, int do_include, au_dpages_test test, void *arg) { int err; err = 0; spin_lock(&dcache_lock); if (do_include && (!test || test(dentry, arg))) { err = au_dpages_append(dpages, dentry, GFP_ATOMIC); if (unlikely(err)) goto out; } while (!IS_ROOT(dentry)) { dentry = dentry->d_parent; /* dcache_lock is locked */ if (!test || test(dentry, arg)) { err = au_dpages_append(dpages, dentry, GFP_ATOMIC); if (unlikely(err)) break; } } out: spin_unlock(&dcache_lock); return err; }
int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry, int do_include, au_dpages_test test, void *arg) { int err; err = 0; write_seqlock(&rename_lock); spin_lock(&dentry->d_lock); if (do_include && d_count(dentry) && (!test || test(dentry, arg))) err = au_dpages_append(dpages, dentry, GFP_ATOMIC); spin_unlock(&dentry->d_lock); if (unlikely(err)) goto out; /* * RCU for vfsmount is unnecessary since this is a traverse in a single * mount */ while (!IS_ROOT(dentry)) { dentry = dentry->d_parent; /* rename_lock is locked */ spin_lock(&dentry->d_lock); if (d_count(dentry) && (!test || test(dentry, arg))) err = au_dpages_append(dpages, dentry, GFP_ATOMIC); spin_unlock(&dentry->d_lock); if (unlikely(err)) break; } out: write_sequnlock(&rename_lock); return err; }
int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, au_dpages_test test, void *arg) { int err; struct dentry *this_parent = root; struct list_head *next; struct super_block *sb = root->d_sb; err = 0; spin_lock(&dcache_lock); repeat: next = this_parent->d_subdirs.next; resume: if (this_parent->d_sb == sb && !IS_ROOT(this_parent) && au_di(this_parent) && (!test || test(this_parent, arg))) { err = au_dpages_append(dpages, this_parent, GFP_ATOMIC); if (unlikely(err)) goto out; } while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; if (!list_empty(&dentry->d_subdirs)) { this_parent = dentry; goto repeat; } if (dentry->d_sb == sb && au_di(dentry) && (!test || test(dentry, arg))) { err = au_dpages_append(dpages, dentry, GFP_ATOMIC); if (unlikely(err)) goto out; } } if (this_parent != root) { next = this_parent->d_u.d_child.next; this_parent = this_parent->d_parent; /* dcache_lock is locked */ goto resume; } out: spin_unlock(&dcache_lock); return err; }
/* try d_walk() in linux/fs/dcache.c */ int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, au_dpages_test test, void *arg) { int err; struct dentry *this_parent; struct list_head *next; struct super_block *sb = root->d_sb; err = 0; write_seqlock(&rename_lock); this_parent = root; spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: if (this_parent->d_sb == sb && !IS_ROOT(this_parent) && au_di(this_parent) && d_count(this_parent) && (!test || test(this_parent, arg))) { err = au_dpages_append(dpages, this_parent, GFP_ATOMIC); if (unlikely(err)) goto out; } while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (d_count(dentry)) { if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } if (dentry->d_sb == sb && au_di(dentry) && (!test || test(dentry, arg))) err = au_dpages_append(dpages, dentry, GFP_ATOMIC); } spin_unlock(&dentry->d_lock); if (unlikely(err)) goto out; } if (this_parent != root) { struct dentry *tmp; struct dentry *child; tmp = this_parent->d_parent; rcu_read_lock(); spin_unlock(&this_parent->d_lock); child = this_parent; this_parent = tmp; spin_lock(&this_parent->d_lock); rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } out: spin_unlock(&this_parent->d_lock); write_sequnlock(&rename_lock); return err; }
int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, au_dpages_test test, void *arg) { int err; struct dentry *this_parent = root; struct list_head *next; struct super_block *sb = root->d_sb; TraceEnter(); err = 0; spin_lock(&dcache_lock); repeat: next = this_parent->d_subdirs.next; resume: if (this_parent->d_sb == sb && !IS_ROOT(this_parent) && atomic_read(&this_parent->d_count) && this_parent->d_inode && (!test || test(this_parent, arg))) { err = au_dpages_append(dpages, this_parent, GFP_ATOMIC); if (unlikely(err)) goto out; } while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, D_CHILD); next = tmp->next; if (unlikely(/*d_unhashed(dentry) || */!dentry->d_inode)) continue; if (!list_empty(&dentry->d_subdirs)) { this_parent = dentry; goto repeat; } if (dentry->d_sb == sb && atomic_read(&dentry->d_count) && (!test || test(dentry, arg))) { err = au_dpages_append(dpages, dentry, GFP_ATOMIC); if (unlikely(err)) goto out; } } if (this_parent != root) { next = this_parent->D_CHILD.next; this_parent = this_parent->d_parent; goto resume; } out: spin_unlock(&dcache_lock); #if 0 if (!err) { int i, j; j = 0; for (i = 0; i < dpages->ndpage; i++) { if ((dpages->dpages + i)->ndentry) Dbg("%d: %d\n", i, (dpages->dpages + i)->ndentry); j += (dpages->dpages + i)->ndentry; } if (j) Dbg("ndpage %d, %d\n", dpages->ndpage, j); } #endif TraceErr(err); return err; }