static inline int __realloc_dentry_private_data(struct dentry *dentry) { struct unionfs_dentry_info *info = UNIONFS_D(dentry); void *p; int size; BUG_ON(!info); size = sizeof(struct path) * sbmax(dentry->d_sb); p = krealloc(info->lower_paths, size, GFP_ATOMIC); if (unlikely(!p)) return -ENOMEM; info->lower_paths = p; info->bstart = -1; info->bend = -1; info->bopaque = -1; info->bcount = sbmax(dentry->d_sb); atomic_set(&info->generation, atomic_read(&UNIONFS_SB(dentry->d_sb)->generation)); memset(info->lower_paths, 0, size); return 0; }
static int unionfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt) { int err; err = get_sb_nodev(fs_type, flags, raw_data, unionfs_read_super, mnt); if (!err) UNIONFS_SB(mnt->mnt_sb)->dev_name = kstrdup(dev_name, GFP_KERNEL); return err; }
static struct dentry *unionfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { struct dentry *dentry; dentry = mount_nodev(fs_type, flags, raw_data, unionfs_read_super); if (!IS_ERR(dentry)) UNIONFS_SB(dentry->d_sb)->dev_name = kstrdup(dev_name, GFP_KERNEL); return dentry; }
/* * Revalidate the struct file * @file: file to revalidate * @parent: parent dentry (locked by caller) * @willwrite: true if caller may cause changes to the file; false otherwise. * Caller must lock/unlock dentry's branch configuration. */ int unionfs_file_revalidate(struct file *file, struct dentry *parent, bool willwrite) { struct super_block *sb; struct dentry *dentry; int sbgen, dgen; int err = 0; dentry = file->f_path.dentry; sb = dentry->d_sb; verify_locked(dentry); verify_locked(parent); /* * First revalidate the dentry inside struct file, * but not unhashed dentries. */ if (!d_deleted(dentry) && !__unionfs_d_revalidate(dentry, parent, willwrite)) { err = -ESTALE; goto out; } sbgen = atomic_read(&UNIONFS_SB(sb)->generation); dgen = atomic_read(&UNIONFS_D(dentry)->generation); if (unlikely(sbgen > dgen)) { /* XXX: should never happen */ pr_debug("unionfs: failed to revalidate dentry (%s)\n", dentry->d_name.name); err = -ESTALE; goto out; } err = __unionfs_file_revalidate(file, dentry, parent, sb, sbgen, dgen, willwrite); out: return err; }
/* allocate new dentry private data, free old one if necessary */ int new_dentry_private_data(struct dentry *dentry) { int newsize; int oldsize = 0; struct unionfs_dentry_info *info = UNIONFS_D(dentry); spin_lock(&dentry->d_lock); if (!info) { dentry->d_fsdata = kmem_cache_alloc(unionfs_dentry_cachep, GFP_ATOMIC); info = UNIONFS_D(dentry); if (!info) goto out; mutex_init(&info->lock); mutex_lock(&info->lock); info->lower_paths = NULL; } else oldsize = sizeof(struct path) * info->bcount; info->bstart = -1; info->bend = -1; info->bopaque = -1; info->bcount = sbmax(dentry->d_sb); atomic_set(&info->generation, atomic_read(&UNIONFS_SB(dentry->d_sb)->generation)); newsize = sizeof(struct path) * sbmax(dentry->d_sb); /* Don't reallocate when we already have enough space. */ /* It would be ideal if we could actually use the slab macros to * determine what our object sizes is, but those are not exported. */ if (oldsize) { int minsize = malloc_sizes[0].cs_size; if (!newsize || ((oldsize < newsize) && (newsize > minsize))) { kfree(info->lower_paths); info->lower_paths = NULL; } } if (!info->lower_paths && newsize) { info->lower_paths = kmalloc(newsize, GFP_ATOMIC); if (!info->lower_paths) goto out_free; } memset(info->lower_paths, 0, (oldsize > newsize ? oldsize : newsize)); spin_unlock(&dentry->d_lock); return 0; out_free: kfree(info->lower_paths); out: free_dentry_private_data(info); dentry->d_fsdata = NULL; spin_unlock(&dentry->d_lock); return -ENOMEM; }
/* * Connect a unionfs inode dentry/inode with several lower ones. This is * the classic stackable file system "vnode interposition" action. * * @sb: unionfs's super_block */ struct dentry *unionfs_interpose(struct dentry *dentry, struct super_block *sb, int flag) { int err = 0; struct inode *inode; int need_fill_inode = 1; struct dentry *spliced = NULL; verify_locked(dentry); /* * We allocate our new inode below, by calling iget. * iget will call our read_inode which will initialize some * of the new inode's fields */ /* * On revalidate we've already got our own inode and just need * to fix it up. */ if (flag == INTERPOSE_REVAL) { inode = dentry->d_inode; UNIONFS_I(inode)->bstart = -1; UNIONFS_I(inode)->bend = -1; atomic_set(&UNIONFS_I(inode)->generation, atomic_read(&UNIONFS_SB(sb)->generation)); UNIONFS_I(inode)->lower_inodes = kcalloc(sbmax(sb), sizeof(struct inode *), GFP_KERNEL); if (unlikely(!UNIONFS_I(inode)->lower_inodes)) { err = -ENOMEM; goto out; } } else { /* get unique inode number for unionfs */ inode = iget(sb, iunique(sb, UNIONFS_ROOT_INO)); if (!inode) { err = -EACCES; goto out; } if (atomic_read(&inode->i_count) > 1) goto skip; } need_fill_inode = 0; unionfs_fill_inode(dentry, inode); skip: /* only (our) lookup wants to do a d_add */ switch (flag) { case INTERPOSE_DEFAULT: /* for operations which create new inodes */ d_add(dentry, inode); break; case INTERPOSE_REVAL_NEG: d_instantiate(dentry, inode); break; case INTERPOSE_LOOKUP: spliced = d_splice_alias(inode, dentry); if (spliced && spliced != dentry) { /* * d_splice can return a dentry if it was * disconnected and had to be moved. We must ensure * that the private data of the new dentry is * correct and that the inode info was filled * properly. Finally we must return this new * dentry. */ spliced->d_op = &unionfs_dops; spliced->d_fsdata = dentry->d_fsdata; dentry->d_fsdata = NULL; dentry = spliced; if (need_fill_inode) { need_fill_inode = 0; unionfs_fill_inode(dentry, inode); } goto out_spliced; } else if (!spliced) { if (need_fill_inode) { need_fill_inode = 0; unionfs_fill_inode(dentry, inode); goto out_spliced; } } break; case INTERPOSE_REVAL: /* Do nothing. */ break; default: printk(KERN_CRIT "unionfs: invalid interpose flag passed!\n"); BUG(); } goto out; out_spliced: if (!err) return spliced; out: return ERR_PTR(err); }
/* * our custom d_alloc_root work-alike * * we can't use d_alloc_root if we want to use our own interpose function * unchanged, so we simply call our own "fake" d_alloc_root */ static struct dentry *unionfs_d_alloc_root(struct super_block *sb) { struct dentry *ret = NULL; if (sb) { static const struct qstr name = { .name = "/", .len = 1 }; ret = d_alloc(NULL, &name); if (likely(ret)) { ret->d_op = &unionfs_dops; ret->d_sb = sb; ret->d_parent = ret; } } return ret; } /* * There is no need to lock the unionfs_super_info's rwsem as there is no * way anyone can have a reference to the superblock at this point in time. */ static int unionfs_read_super(struct super_block *sb, void *raw_data, int silent) { int err = 0; struct unionfs_dentry_info *lower_root_info = NULL; int bindex, bstart, bend; if (!raw_data) { printk(KERN_ERR "unionfs: read_super: missing data argument\n"); err = -EINVAL; goto out; } /* Allocate superblock private data */ sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL); if (unlikely(!UNIONFS_SB(sb))) { printk(KERN_CRIT "unionfs: read_super: out of memory\n"); err = -ENOMEM; goto out; } UNIONFS_SB(sb)->bend = -1; atomic_set(&UNIONFS_SB(sb)->generation, 1); init_rwsem(&UNIONFS_SB(sb)->rwsem); UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */ lower_root_info = unionfs_parse_options(sb, raw_data); if (IS_ERR(lower_root_info)) { printk(KERN_ERR "unionfs: read_super: error while parsing options " "(err = %ld)\n", PTR_ERR(lower_root_info)); err = PTR_ERR(lower_root_info); lower_root_info = NULL; goto out_free; } if (lower_root_info->bstart == -1) { err = -ENOENT; goto out_free; } /* set the lower superblock field of upper superblock */ bstart = lower_root_info->bstart; BUG_ON(bstart != 0); sbend(sb) = bend = lower_root_info->bend; for (bindex = bstart; bindex <= bend; bindex++) { struct dentry *d = lower_root_info->lower_paths[bindex].dentry; atomic_inc(&d->d_sb->s_active); unionfs_set_lower_super_idx(sb, bindex, d->d_sb); } /* max Bytes is the maximum bytes from highest priority branch */ sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes; /* * Our c/m/atime granularity is 1 ns because we may stack on file * systems whose granularity is as good. This is important for our * time-based cache coherency. */ sb->s_time_gran = 1; sb->s_op = &unionfs_sops; /* See comment next to the definition of unionfs_d_alloc_root */ sb->s_root = unionfs_d_alloc_root(sb); if (unlikely(!sb->s_root)) { err = -ENOMEM; goto out_dput; } /* link the upper and lower dentries */ sb->s_root->d_fsdata = NULL; err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT); if (unlikely(err)) goto out_freedpd; /* Set the lower dentries for s_root */ for (bindex = bstart; bindex <= bend; bindex++) { struct dentry *d; struct vfsmount *m; d = lower_root_info->lower_paths[bindex].dentry; m = lower_root_info->lower_paths[bindex].mnt; unionfs_set_lower_dentry_idx(sb->s_root, bindex, d); unionfs_set_lower_mnt_idx(sb->s_root, bindex, m); } dbstart(sb->s_root) = bstart; dbend(sb->s_root) = bend; /* Set the generation number to one, since this is for the mount. */ atomic_set(&UNIONFS_D(sb->s_root)->generation, 1); /* * Call interpose to create the upper level inode. Only * INTERPOSE_LOOKUP can return a value other than 0 on err. */ err = PTR_ERR(unionfs_interpose(sb->s_root, sb, 0)); unionfs_unlock_dentry(sb->s_root); if (!err) goto out; /* else fall through */ out_freedpd: if (UNIONFS_D(sb->s_root)) { kfree(UNIONFS_D(sb->s_root)->lower_paths); free_dentry_private_data(sb->s_root); } dput(sb->s_root); out_dput: if (lower_root_info && !IS_ERR(lower_root_info)) { for (bindex = lower_root_info->bstart; bindex <= lower_root_info->bend; bindex++) { struct dentry *d; struct vfsmount *m; d = lower_root_info->lower_paths[bindex].dentry; m = lower_root_info->lower_paths[bindex].mnt; dput(d); /* initializing: can't use unionfs_mntput here */ mntput(m); /* drop refs we took earlier */ atomic_dec(&d->d_sb->s_active); } kfree(lower_root_info->lower_paths); kfree(lower_root_info); lower_root_info = NULL; } out_free: kfree(UNIONFS_SB(sb)->data); kfree(UNIONFS_SB(sb)); sb->s_fs_info = NULL; out: if (lower_root_info && !IS_ERR(lower_root_info)) { kfree(lower_root_info->lower_paths); kfree(lower_root_info); } return err; }
/* * Parse mount options. See the manual page for usage instructions. * * Returns the dentry object of the lower-level (lower) directory; * We want to mount our stackable file system on top of that lower directory. */ static struct unionfs_dentry_info *unionfs_parse_options( struct super_block *sb, char *options) { struct unionfs_dentry_info *lower_root_info; char *optname; int err = 0; int bindex; int dirsfound = 0; /* allocate private data area */ err = -ENOMEM; lower_root_info = kzalloc(sizeof(struct unionfs_dentry_info), GFP_KERNEL); if (unlikely(!lower_root_info)) goto out_error; lower_root_info->bstart = -1; lower_root_info->bend = -1; lower_root_info->bopaque = -1; while ((optname = strsep(&options, ",")) != NULL) { char *optarg; if (!optname || !*optname) continue; optarg = strchr(optname, '='); if (optarg) *optarg++ = '\0'; /* * All of our options take an argument now. Insert ones that * don't, above this check. */ if (!optarg) { printk(KERN_ERR "unionfs: %s requires an argument\n", optname); err = -EINVAL; goto out_error; } if (!strcmp("dirs", optname)) { if (++dirsfound > 1) { printk(KERN_ERR "unionfs: multiple dirs specified\n"); err = -EINVAL; goto out_error; } err = parse_dirs_option(sb, lower_root_info, optarg); if (err) goto out_error; continue; } err = -EINVAL; printk(KERN_ERR "unionfs: unrecognized option '%s'\n", optname); goto out_error; } if (dirsfound != 1) { printk(KERN_ERR "unionfs: dirs option required\n"); err = -EINVAL; goto out_error; } goto out; out_error: if (lower_root_info && lower_root_info->lower_paths) { for (bindex = lower_root_info->bstart; bindex >= 0 && bindex <= lower_root_info->bend; bindex++) { struct dentry *d; struct vfsmount *m; d = lower_root_info->lower_paths[bindex].dentry; m = lower_root_info->lower_paths[bindex].mnt; dput(d); /* initializing: can't use unionfs_mntput here */ mntput(m); } } kfree(lower_root_info->lower_paths); kfree(lower_root_info); kfree(UNIONFS_SB(sb)->data); UNIONFS_SB(sb)->data = NULL; lower_root_info = ERR_PTR(err); out: return lower_root_info; }
/* * parse the dirs= mount argument * * We don't need to lock the superblock private data's rwsem, as we get * called only by unionfs_read_super - it is still a long time before anyone * can even get a reference to us. */ static int parse_dirs_option(struct super_block *sb, struct unionfs_dentry_info *lower_root_info, char *options) { struct nameidata nd; char *name; int err = 0; int branches = 1; int bindex = 0; int i = 0; int j = 0; struct dentry *dent1; struct dentry *dent2; if (options[0] == '\0') { printk(KERN_ERR "unionfs: no branches specified\n"); err = -EINVAL; goto out; } /* * Each colon means we have a separator, this is really just a rough * guess, since strsep will handle empty fields for us. */ for (i = 0; options[i]; i++) if (options[i] == ':') branches++; /* allocate space for underlying pointers to lower dentry */ UNIONFS_SB(sb)->data = kcalloc(branches, sizeof(struct unionfs_data), GFP_KERNEL); if (unlikely(!UNIONFS_SB(sb)->data)) { err = -ENOMEM; goto out; } lower_root_info->lower_paths = kcalloc(branches, sizeof(struct path), GFP_KERNEL); if (unlikely(!lower_root_info->lower_paths)) { err = -ENOMEM; goto out; } /* now parsing a string such as "b1:b2=rw:b3=ro:b4" */ branches = 0; while ((name = strsep(&options, ":")) != NULL) { int perms; char *mode = strchr(name, '='); if (!name) continue; if (!*name) { /* bad use of ':' (extra colons) */ err = -EINVAL; goto out; } branches++; /* strip off '=' if any */ if (mode) *mode++ = '\0'; err = parse_branch_mode(mode, &perms); if (err) { printk(KERN_ERR "unionfs: invalid mode \"%s\" for " "branch %d\n", mode, bindex); goto out; } /* ensure that leftmost branch is writeable */ if (!bindex && !(perms & MAY_WRITE)) { printk(KERN_ERR "unionfs: leftmost branch cannot be " "read-only (use \"-o ro\" to create a " "read-only union)\n"); err = -EINVAL; goto out; } err = path_lookup(name, LOOKUP_FOLLOW, &nd); if (err) { printk(KERN_ERR "unionfs: error accessing " "lower directory '%s' (error %d)\n", name, err); goto out; } err = check_branch(&nd); if (err) { printk(KERN_ERR "unionfs: lower directory " "'%s' is not a valid branch\n", name); path_release(&nd); goto out; } lower_root_info->lower_paths[bindex].dentry = nd.dentry; lower_root_info->lower_paths[bindex].mnt = nd.mnt; set_branchperms(sb, bindex, perms); set_branch_count(sb, bindex, 0); new_branch_id(sb, bindex); if (lower_root_info->bstart < 0) lower_root_info->bstart = bindex; lower_root_info->bend = bindex; bindex++; } if (branches == 0) { printk(KERN_ERR "unionfs: no branches specified\n"); err = -EINVAL; goto out; } BUG_ON(branches != (lower_root_info->bend + 1)); /* * Ensure that no overlaps exist in the branches. * * This test is required because the Linux kernel has no support * currently for ensuring coherency between stackable layers and * branches. If we were to allow overlapping branches, it would be * possible, for example, to delete a file via one branch, which * would not be reflected in another branch. Such incoherency could * lead to inconsistencies and even kernel oopses. Rather than * implement hacks to work around some of these cache-coherency * problems, we prevent branch overlapping, for now. A complete * solution will involve proper kernel/VFS support for cache * coherency, at which time we could safely remove this * branch-overlapping test. */ for (i = 0; i < branches; i++) { dent1 = lower_root_info->lower_paths[i].dentry; for (j = i + 1; j < branches; j++) { dent2 = lower_root_info->lower_paths[j].dentry; if (is_branch_overlap(dent1, dent2)) { printk(KERN_ERR "unionfs: branches %d and " "%d overlap\n", i, j); err = -EINVAL; goto out; } } } out: if (err) { for (i = 0; i < branches; i++) if (lower_root_info->lower_paths[i].dentry) { dput(lower_root_info->lower_paths[i].dentry); /* initialize: can't use unionfs_mntput here */ mntput(lower_root_info->lower_paths[i].mnt); } kfree(lower_root_info->lower_paths); kfree(UNIONFS_SB(sb)->data); /* * MUST clear the pointers to prevent potential double free if * the caller dies later on */ lower_root_info->lower_paths = NULL; UNIONFS_SB(sb)->data = NULL; } return err; }
/* * There is no need to lock the unionfs_super_info's rwsem as there is no * way anyone can have a reference to the superblock at this point in time. */ static int unionfs_read_super(struct super_block *sb, void *raw_data, int silent) { int err = 0; struct unionfs_dentry_info *lower_root_info = NULL; int bindex, bstart, bend; struct inode *inode = NULL; if (!raw_data) { printk(KERN_ERR "unionfs: read_super: missing data argument\n"); err = -EINVAL; goto out; } /* Allocate superblock private data */ sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL); if (unlikely(!UNIONFS_SB(sb))) { printk(KERN_CRIT "unionfs: read_super: out of memory\n"); err = -ENOMEM; goto out; } UNIONFS_SB(sb)->bend = -1; atomic_set(&UNIONFS_SB(sb)->generation, 1); init_rwsem(&UNIONFS_SB(sb)->rwsem); UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */ lower_root_info = unionfs_parse_options(sb, raw_data); if (IS_ERR(lower_root_info)) { printk(KERN_ERR "unionfs: read_super: error while parsing options " "(err = %ld)\n", PTR_ERR(lower_root_info)); err = PTR_ERR(lower_root_info); lower_root_info = NULL; goto out_free; } if (lower_root_info->bstart == -1) { err = -ENOENT; goto out_free; } /* set the lower superblock field of upper superblock */ bstart = lower_root_info->bstart; BUG_ON(bstart != 0); sbend(sb) = bend = lower_root_info->bend; for (bindex = bstart; bindex <= bend; bindex++) { struct dentry *d = lower_root_info->lower_paths[bindex].dentry; atomic_inc(&d->d_sb->s_active); unionfs_set_lower_super_idx(sb, bindex, d->d_sb); } /* max Bytes is the maximum bytes from highest priority branch */ sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes; /* * Our c/m/atime granularity is 1 ns because we may stack on file * systems whose granularity is as good. This is important for our * time-based cache coherency. */ sb->s_time_gran = 1; sb->s_op = &unionfs_sops; /* get a new inode and allocate our root dentry */ inode = unionfs_iget(sb, iunique(sb, UNIONFS_ROOT_INO)); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_dput; } sb->s_root = d_make_root(inode); if (unlikely(!sb->s_root)) { err = -ENOMEM; goto out_iput; } d_set_d_op(sb->s_root, &unionfs_dops); /* link the upper and lower dentries */ sb->s_root->d_fsdata = NULL; err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT); if (unlikely(err)) goto out_freedpd; /* if get here: cannot have error */ /* Set the lower dentries for s_root */ for (bindex = bstart; bindex <= bend; bindex++) { struct dentry *d; struct vfsmount *m; d = lower_root_info->lower_paths[bindex].dentry; m = lower_root_info->lower_paths[bindex].mnt; unionfs_set_lower_dentry_idx(sb->s_root, bindex, d); unionfs_set_lower_mnt_idx(sb->s_root, bindex, m); } dbstart(sb->s_root) = bstart; dbend(sb->s_root) = bend; /* Set the generation number to one, since this is for the mount. */ atomic_set(&UNIONFS_D(sb->s_root)->generation, 1); if (atomic_read(&inode->i_count) <= 1) unionfs_fill_inode(sb->s_root, inode); /* * No need to call interpose because we already have a positive * dentry, which was instantiated by d_alloc_root. Just need to * d_rehash it. */ d_rehash(sb->s_root); unionfs_unlock_dentry(sb->s_root); goto out; /* all is well */ out_freedpd: if (UNIONFS_D(sb->s_root)) { kfree(UNIONFS_D(sb->s_root)->lower_paths); free_dentry_private_data(sb->s_root); } dput(sb->s_root); out_iput: iput(inode); out_dput: if (lower_root_info && !IS_ERR(lower_root_info)) { for (bindex = lower_root_info->bstart; bindex <= lower_root_info->bend; bindex++) { struct dentry *d; d = lower_root_info->lower_paths[bindex].dentry; /* drop refs we took earlier */ atomic_dec(&d->d_sb->s_active); path_put(&lower_root_info->lower_paths[bindex]); } kfree(lower_root_info->lower_paths); kfree(lower_root_info); lower_root_info = NULL; } out_free: kfree(UNIONFS_SB(sb)->data); kfree(UNIONFS_SB(sb)); sb->s_fs_info = NULL; out: if (lower_root_info && !IS_ERR(lower_root_info)) { kfree(lower_root_info->lower_paths); kfree(lower_root_info); } return err; }
/* * returns 1 if valid, 0 otherwise. */ int unionfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { int valid = 1; /* default is valid (1); invalid is 0. */ struct dentry *hidden_dentry; int bindex, bstart, bend; int sbgen, dgen; int positive = 0; int locked = 0; int restart = 0; int interpose_flag; struct nameidata lowernd; /* TODO: be gentler to the stack */ if (nd) memcpy(&lowernd, nd, sizeof(struct nameidata)); else memset(&lowernd, 0, sizeof(struct nameidata)); restart: verify_locked(dentry); /* if the dentry is unhashed, do NOT revalidate */ if (d_deleted(dentry)) { printk(KERN_DEBUG "unhashed dentry being revalidated: %*s\n", dentry->d_name.len, dentry->d_name.name); goto out; } BUG_ON(dbstart(dentry) == -1); if (dentry->d_inode) positive = 1; dgen = atomic_read(&UNIONFS_D(dentry)->generation); sbgen = atomic_read(&UNIONFS_SB(dentry->d_sb)->generation); /* If we are working on an unconnected dentry, then there is no * revalidation to be done, because this file does not exist within the * namespace, and Unionfs operates on the namespace, not data. */ if (sbgen != dgen) { struct dentry *result; int pdgen; unionfs_read_lock(dentry->d_sb); locked = 1; /* The root entry should always be valid */ BUG_ON(IS_ROOT(dentry)); /* We can't work correctly if our parent isn't valid. */ pdgen = atomic_read(&UNIONFS_D(dentry->d_parent)->generation); if (!restart && (pdgen != sbgen)) { unionfs_read_unlock(dentry->d_sb); locked = 0; /* We must be locked before our parent. */ if (! (dentry->d_parent->d_op-> d_revalidate(dentry->d_parent, nd))) { valid = 0; goto out; } restart = 1; goto restart; } BUG_ON(pdgen != sbgen); /* Free the pointers for our inodes and this dentry. */ bstart = dbstart(dentry); bend = dbend(dentry); if (bstart >= 0) { struct dentry *hidden_dentry; for (bindex = bstart; bindex <= bend; bindex++) { hidden_dentry = unionfs_lower_dentry_idx(dentry, bindex); dput(hidden_dentry); } } set_dbstart(dentry, -1); set_dbend(dentry, -1); interpose_flag = INTERPOSE_REVAL_NEG; if (positive) { interpose_flag = INTERPOSE_REVAL; mutex_lock(&dentry->d_inode->i_mutex); bstart = ibstart(dentry->d_inode); bend = ibend(dentry->d_inode); if (bstart >= 0) { struct inode *hidden_inode; for (bindex = bstart; bindex <= bend; bindex++) { hidden_inode = unionfs_lower_inode_idx(dentry->d_inode, bindex); iput(hidden_inode); } } kfree(UNIONFS_I(dentry->d_inode)->lower_inodes); UNIONFS_I(dentry->d_inode)->lower_inodes = NULL; ibstart(dentry->d_inode) = -1; ibend(dentry->d_inode) = -1; mutex_unlock(&dentry->d_inode->i_mutex); } result = unionfs_lookup_backend(dentry, &lowernd, interpose_flag); if (result) { if (IS_ERR(result)) { valid = 0; goto out; } /* current unionfs_lookup_backend() doesn't return * a valid dentry */ dput(dentry); dentry = result; } if (positive && UNIONFS_I(dentry->d_inode)->stale) { make_bad_inode(dentry->d_inode); d_drop(dentry); valid = 0; goto out; } goto out; } /* The revalidation must occur across all branches */ bstart = dbstart(dentry); bend = dbend(dentry); BUG_ON(bstart == -1); for (bindex = bstart; bindex <= bend; bindex++) { hidden_dentry = unionfs_lower_dentry_idx(dentry, bindex); if (!hidden_dentry || !hidden_dentry->d_op || !hidden_dentry->d_op->d_revalidate) continue; if (!hidden_dentry->d_op->d_revalidate(hidden_dentry, nd)) valid = 0; } if (!dentry->d_inode) valid = 0; if (valid) { fsstack_copy_attr_all(dentry->d_inode, unionfs_lower_inode(dentry->d_inode), unionfs_get_nlinks); fsstack_copy_inode_size(dentry->d_inode, unionfs_lower_inode(dentry->d_inode)); } out: if (locked) unionfs_read_unlock(dentry->d_sb); return valid; }