struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid, int generation, struct fuse_attr *attr) { struct inode *inode; struct fuse_inode *fi; int retried = 0; retry: inode = iget4(sb, attr->ino, fuse_inode_eq, &nodeid); if (!inode) return NULL; if (!inode->u.generic_ip) { get_fuse_inode(inode)->nodeid = nodeid; inode->u.generic_ip = inode; inode->i_generation = generation; fuse_init_inode(inode, attr); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { BUG_ON(retried); /* Inode has changed type, any I/O on the old should fail */ remove_inode_hash(inode); make_bad_inode(inode); iput(inode); retried = 1; goto retry; } fi = get_fuse_inode(inode); fi->nlookup ++; fuse_change_attributes(inode, attr); return inode; }
/* * Get a reference on a vnode. */ vnode_t * vn_get( struct vnode *vp, vmap_t *vmap) { struct inode *inode; XFS_STATS_INC(vn_get); inode = LINVFS_GET_IP(vp); if (inode->i_state & I_FREEING) return NULL; inode = VFS_GET_INODE(vmap->v_vfsp, vmap->v_ino, IGET_NOALLOC); if (!inode) /* Inode not present */ return NULL; /* We do not want to create new inodes via vn_get, * returning NULL here is OK. */ if (inode->i_state & I_NEW) { remove_inode_hash(inode); make_bad_inode(inode); unlock_new_inode(inode); iput(inode); return NULL; } vn_trace_exit(vp, "vn_get", (inst_t *)__return_address); return vp; }
void make_bad_inode(struct inode * inode) { remove_inode_hash(inode); inode->i_mode = S_IFREG; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_op = &bad_inode_ops; inode->i_fop = &bad_file_ops; }
void make_bad_inode(struct inode *inode) { remove_inode_hash(inode); inode->i_mode = S_IFREG; inode->i_atime = inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); inode->i_op = &bad_inode_ops; inode->i_fop = &bad_file_ops; }
/* Although we treat Coda file identifiers as immutable, there is one * special case for files created during a disconnection where they may * not be globally unique. When an identifier collision is detected we * first try to flush the cached inode from the kernel and finally * resort to renaming/rehashing in-place. Userspace remembers both old * and new values of the identifier to handle any in-flight upcalls. * The real solution is to use globally unique UUIDs as identifiers, but * retrofitting the existing userspace code for this is non-trivial. */ void coda_replace_fid(struct inode *inode, struct CodaFid *oldfid, struct CodaFid *newfid) { struct coda_inode_info *cii = ITOC(inode); unsigned long hash = coda_f2i(newfid); BUG_ON(!coda_fideq(&cii->c_fid, oldfid)); /* replace fid and rehash inode */ /* XXX we probably need to hold some lock here! */ remove_inode_hash(inode); cii->c_fid = *newfid; inode->i_ino = hash; __insert_inode_hash(inode, hash); }
void coda_replace_fid(struct inode *inode, struct CodaFid *oldfid, struct CodaFid *newfid) { struct coda_inode_info *cii = ITOC(inode); unsigned long hash = coda_f2i(newfid); BUG_ON(!coda_fideq(&cii->c_fid, oldfid)); remove_inode_hash(inode); cii->c_fid = *newfid; inode->i_ino = hash; __insert_inode_hash(inode, hash); }
/* * Look up the inode by super block and fattr->fileid. * * Note carefully the special handling of busy inodes (i_count > 1). * With the kernel 2.1.xx dcache all inodes except hard links must * have i_count == 1 after iget(). Otherwise, it indicates that the * server has reused a fileid (i_ino) and we have a stale inode. */ static struct inode * __nfs_fhget(struct super_block *sb, struct nfs_fattr *fattr) { struct inode *inode; int max_count, stale_inode, unhashed = 0; retry: inode = iget(sb, fattr->fileid); if (!inode) goto out_no_inode; /* N.B. This should be impossible ... */ if (inode->i_ino != fattr->fileid) goto out_bad_id; /* * Check for busy inodes, and attempt to get rid of any * unused local references. If successful, we release the * inode and try again. * * Note that the busy test uses the values in the fattr, * as the inode may have become a different object. * (We can probably handle modes changes here, too.) */ stale_inode = inode->i_mode && ((fattr->mode ^ inode->i_mode) & S_IFMT); stale_inode |= inode->i_count && inode->i_count == unhashed; max_count = S_ISDIR(fattr->mode) ? 1 : fattr->nlink; if (stale_inode || inode->i_count > max_count + unhashed) { dprintk("__nfs_fhget: inode %ld busy, i_count=%d, i_nlink=%d\n", inode->i_ino, inode->i_count, inode->i_nlink); unhashed = nfs_free_dentries(inode); if (stale_inode || inode->i_count > max_count + unhashed) { printk("__nfs_fhget: inode %ld still busy, i_count=%d\n", inode->i_ino, inode->i_count); if (!list_empty(&inode->i_dentry)) { struct dentry *dentry; dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); printk("__nfs_fhget: killing %s/%s filehandle\n", dentry->d_parent->d_name.name, dentry->d_name.name); memset(dentry->d_fsdata, 0, sizeof(struct nfs_fh)); } remove_inode_hash(inode); nfs_invalidate_inode(inode); unhashed = 0; }
void coda_replace_fid(struct inode *inode, struct ViceFid *oldfid, struct ViceFid *newfid) { struct coda_inode_info *cii; cii = ITOC(inode); if (!coda_fideq(&cii->c_fid, oldfid)) BUG(); /* replace fid and rehash inode */ /* XXX we probably need to hold some lock here! */ remove_inode_hash(inode); cii->c_fid = *newfid; inode->i_ino = coda_f2i(newfid); insert_inode_hash(inode); }
void gfs2_aspace_put(struct inode *aspace) { remove_inode_hash(aspace); iput(aspace); }
STATIC int linvfs_mknod( struct inode *dir, struct dentry *dentry, int mode, int rdev) { struct inode *ip; vattr_t va; vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); xfs_acl_t *default_acl = NULL; attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; int error; if (test_default_acl && test_default_acl(dvp)) { if (!_ACL_ALLOC(default_acl)) return -ENOMEM; if (!_ACL_GET_DEFAULT(dvp, default_acl)) { _ACL_FREE(default_acl); default_acl = NULL; } } #ifdef CONFIG_XFS_POSIX_ACL /* * Conditionally compiled so that the ACL base kernel changes can be * split out into separate patches - remove this once MS_POSIXACL is * accepted, or some other way to implement this exists. */ if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current)) mode &= ~current->fs->umask; #endif memset(&va, 0, sizeof(va)); va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; va.va_type = IFTOVT(mode); va.va_mode = mode; switch (mode & S_IFMT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: va.va_rdev = XFS_MKDEV(MAJOR(rdev), MINOR(rdev)); va.va_mask |= XFS_AT_RDEV; /*FALLTHROUGH*/ case S_IFREG: VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); break; case S_IFDIR: VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); break; default: error = EINVAL; break; } if (default_acl) { if (!error) { error = _ACL_INHERIT(vp, &va, default_acl); if (!error) { VMODIFY(vp); } else { struct dentry teardown = {}; int err2; /* Oh, the horror. * If we can't add the ACL we must back out. * ENOSPC can hit here, among other things. */ teardown.d_inode = ip = LINVFS_GET_IP(vp); teardown.d_name = dentry->d_name; remove_inode_hash(ip); make_bad_inode(ip); if (S_ISDIR(mode)) VOP_RMDIR(dvp, &teardown, NULL, err2); else VOP_REMOVE(dvp, &teardown, NULL, err2); VN_RELE(vp); } } _ACL_FREE(default_acl); } if (!error) { ASSERT(vp); ip = LINVFS_GET_IP(vp); if (S_ISCHR(mode) || S_ISBLK(mode)) ip->i_rdev = to_kdev_t(rdev); else if (S_ISDIR(mode)) validate_fields(ip); d_instantiate(dentry, ip); validate_fields(dir); } return -error; }
/* * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) { int err, err2; znode_t *zp; uint64_t sa_obj = 0; ASSERT(RRM_WRITE_HELD(&zsb->z_teardown_lock)); ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock)); /* * We already own this, so just hold and rele it to update the * objset_t, as the one we had before may have been evicted. */ VERIFY0(dmu_objset_hold(osname, zsb, &zsb->z_os)); VERIFY3P(zsb->z_os->os_dsl_dataset->ds_owner, ==, zsb); VERIFY(dsl_dataset_long_held(zsb->z_os->os_dsl_dataset)); dmu_objset_rele(zsb->z_os, zsb); /* * Make sure version hasn't changed */ err = zfs_get_zplprop(zsb->z_os, ZFS_PROP_VERSION, &zsb->z_version); if (err) goto bail; err = zap_lookup(zsb->z_os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if (err && zsb->z_version >= ZPL_VERSION_SA) goto bail; if ((err = sa_setup(zsb->z_os, sa_obj, zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0) goto bail; if (zsb->z_version >= ZPL_VERSION_SA) sa_register_update_callback(zsb->z_os, zfs_sa_upgrade); VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); zfs_set_fuid_feature(zsb); zsb->z_rollback_time = jiffies; /* * Attempt to re-establish all the active inodes with their * dbufs. If a zfs_rezget() fails, then we unhash the inode * and mark it stale. This prevents a collision if a new * inode/object is created which must use the same inode * number. The stale inode will be be released when the * VFS prunes the dentry holding the remaining references * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { err2 = zfs_rezget(zp); if (err2) { remove_inode_hash(ZTOI(zp)); zp->z_is_stale = B_TRUE; } } mutex_exit(&zsb->z_znodes_lock); bail: /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrm_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* * Since we couldn't setup the sa framework, try to force * unmount this file system. */ if (zsb->z_os) (void) zfs_umount(zsb->z_sb); } return (err); }
/* * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) { int err, err2; ASSERT(RRW_WRITE_HELD(&zsb->z_teardown_lock)); ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock)); err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zsb, &zsb->z_os); if (err) { zsb->z_os = NULL; } else { znode_t *zp; uint64_t sa_obj = 0; err2 = zap_lookup(zsb->z_os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if ((err || err2) && zsb->z_version >= ZPL_VERSION_SA) goto bail; if ((err = sa_setup(zsb->z_os, sa_obj, zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0) goto bail; VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); zsb->z_rollback_time = jiffies; /* * Attempt to re-establish all the active inodes with their * dbufs. If a zfs_rezget() fails, then we unhash the inode * and mark it stale. This prevents a collision if a new * inode/object is created which must use the same inode * number. The stale inode will be be released when the * VFS prunes the dentry holding the remaining references * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { err2 = zfs_rezget(zp); if (err2) { remove_inode_hash(ZTOI(zp)); zp->z_is_stale = B_TRUE; } } mutex_exit(&zsb->z_znodes_lock); } bail: /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrw_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* * Since we couldn't reopen zfs_sb_t or, setup the * sa framework, force unmount this file system. */ if (zsb->z_os) (void) zfs_umount(zsb->z_sb); } return (err); }