ssize_t zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { znode_t *zp = ITOZ(dentry->d_inode); zfs_sb_t *zsb = ZTOZSB(zp); xattr_filldir_t xf = { buffer_size, 0, buffer, dentry->d_inode }; cred_t *cr = CRED(); fstrans_cookie_t cookie; int error = 0; crhold(cr); cookie = spl_fstrans_mark(); rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); rw_enter(&zp->z_xattr_lock, RW_READER); if (zsb->z_use_sa && zp->z_is_sa) { error = zpl_xattr_list_sa(&xf); if (error) goto out; } error = zpl_xattr_list_dir(&xf, cr); if (error) goto out; error = xf.offset; out: rw_exit(&zp->z_xattr_lock); rrm_exit(&(zsb)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); return (error); }
static int zpl_xattr_set(struct inode *ip, const char *name, const void *value, size_t size, int flags) { znode_t *zp = ITOZ(ip); zfs_sb_t *zsb = ZTOZSB(zp); cred_t *cr = CRED(); fstrans_cookie_t cookie; int error; crhold(cr); cookie = spl_fstrans_mark(); rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); rw_enter(&ITOZ(ip)->z_xattr_lock, RW_WRITER); /* * Before setting the xattr check to see if it already exists. * This is done to ensure the following optional flags are honored. * * XATTR_CREATE: fail if xattr already exists * XATTR_REPLACE: fail if xattr does not exist */ error = __zpl_xattr_get(ip, name, NULL, 0, cr); if (error < 0) { if (error != -ENODATA) goto out; if (flags & XATTR_REPLACE) goto out; /* The xattr to be removed already doesn't exist */ error = 0; if (value == NULL) goto out; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto out; } /* Preferentially store the xattr as a SA for better performance */ if (zsb->z_use_sa && zsb->z_xattr_sa && zp->z_is_sa) { error = zpl_xattr_set_sa(ip, name, value, size, flags, cr); if (error == 0) goto out; } error = zpl_xattr_set_dir(ip, name, value, size, flags, cr); out: rw_exit(&ITOZ(ip)->z_xattr_lock); rrm_exit(&(zsb)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static int zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size) { znode_t *zp = ITOZ(ip); zfs_sb_t *zsb = ZTOZSB(zp); cred_t *cr = CRED(); fstrans_cookie_t cookie; int error; crhold(cr); cookie = spl_fstrans_mark(); rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); rw_enter(&zp->z_xattr_lock, RW_READER); error = __zpl_xattr_get(ip, name, value, size, cr); rw_exit(&zp->z_xattr_lock); rrm_exit(&(zsb)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); return (error); }
/* * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) { int err, err2; znode_t *zp; uint64_t sa_obj = 0; ASSERT(RRM_WRITE_HELD(&zsb->z_teardown_lock)); ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock)); /* * We already own this, so just hold and rele it to update the * objset_t, as the one we had before may have been evicted. */ VERIFY0(dmu_objset_hold(osname, zsb, &zsb->z_os)); VERIFY3P(zsb->z_os->os_dsl_dataset->ds_owner, ==, zsb); VERIFY(dsl_dataset_long_held(zsb->z_os->os_dsl_dataset)); dmu_objset_rele(zsb->z_os, zsb); /* * Make sure version hasn't changed */ err = zfs_get_zplprop(zsb->z_os, ZFS_PROP_VERSION, &zsb->z_version); if (err) goto bail; err = zap_lookup(zsb->z_os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if (err && zsb->z_version >= ZPL_VERSION_SA) goto bail; if ((err = sa_setup(zsb->z_os, sa_obj, zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0) goto bail; if (zsb->z_version >= ZPL_VERSION_SA) sa_register_update_callback(zsb->z_os, zfs_sa_upgrade); VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); zfs_set_fuid_feature(zsb); zsb->z_rollback_time = jiffies; /* * Attempt to re-establish all the active inodes with their * dbufs. If a zfs_rezget() fails, then we unhash the inode * and mark it stale. This prevents a collision if a new * inode/object is created which must use the same inode * number. The stale inode will be be released when the * VFS prunes the dentry holding the remaining references * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { err2 = zfs_rezget(zp); if (err2) { remove_inode_hash(ZTOI(zp)); zp->z_is_stale = B_TRUE; } } mutex_exit(&zsb->z_znodes_lock); bail: /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrm_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* * Since we couldn't setup the sa framework, try to force * unmount this file system. */ if (zsb->z_os) (void) zfs_umount(zsb->z_sb); } return (err); }
/* * Teardown the zfs_sb_t. * * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock' * and 'z_teardown_inactive_lock' held. */ int zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) { znode_t *zp; /* * If someone has not already unmounted this file system, * drain the iput_taskq to ensure all active references to the * zfs_sb_t have been handled only then can it be safely destroyed. */ if (zsb->z_os) { /* * If we're unmounting we have to wait for the list to * drain completely. * * If we're not unmounting there's no guarantee the list * will drain completely, but iputs run from the taskq * may add the parents of dir-based xattrs to the taskq * so we want to wait for these. * * We can safely read z_nr_znodes without locking because the * VFS has already blocked operations which add to the * z_all_znodes list and thus increment z_nr_znodes. */ int round = 0; while (zsb->z_nr_znodes > 0) { taskq_wait_outstanding(dsl_pool_iput_taskq( dmu_objset_pool(zsb->z_os)), 0); if (++round > 1 && !unmounting) break; } } rrm_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG); if (!unmounting) { /* * We purge the parent filesystem's super block as the * parent filesystem and all of its snapshots have their * inode's super block set to the parent's filesystem's * super block. Note, 'z_parent' is self referential * for non-snapshots. */ shrink_dcache_sb(zsb->z_parent->z_sb); } /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zsb->z_log) { zil_close(zsb->z_log); zsb->z_log = NULL; } rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER); /* * If we are not unmounting (ie: online recv) and someone already * unmounted this file system while we were doing the switcheroo, * or a reopen of z_os failed then just bail out now. */ if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) { rw_exit(&zsb->z_teardown_inactive_lock); rrm_exit(&zsb->z_teardown_lock, FTAG); return (SET_ERROR(EIO)); } /* * At this point there are no VFS ops active, and any new VFS ops * will fail with EIO since we have z_teardown_lock for writer (only * relevant for forced unmount). * * Release all holds on dbufs. */ if (!unmounting) { mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp != NULL; zp = list_next(&zsb->z_all_znodes, zp)) { if (zp->z_sa_hdl) zfs_znode_dmu_fini(zp); } mutex_exit(&zsb->z_znodes_lock); } /* * If we are unmounting, set the unmounted flag and let new VFS ops * unblock. zfs_inactive will have the unmounted behavior, and all * other VFS ops will fail with EIO. */ if (unmounting) { zsb->z_unmounted = B_TRUE; rrm_exit(&zsb->z_teardown_lock, FTAG); rw_exit(&zsb->z_teardown_inactive_lock); } /* * z_os will be NULL if there was an error in attempting to reopen * zsb, so just return as the properties had already been * * unregistered and cached data had been evicted before. */ if (zsb->z_os == NULL) return (0); /* * Unregister properties. */ zfs_unregister_callbacks(zsb); /* * Evict cached data */ if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && !zfs_is_readonly(zsb)) txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); dmu_objset_evict_dbufs(zsb->z_os); return (0); }
static int zpl_xattr_set(struct inode *ip, const char *name, const void *value, size_t size, int flags) { znode_t *zp = ITOZ(ip); zfs_sb_t *zsb = ZTOZSB(zp); cred_t *cr = CRED(); fstrans_cookie_t cookie; int where; int error; crhold(cr); cookie = spl_fstrans_mark(); rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); rw_enter(&ITOZ(ip)->z_xattr_lock, RW_WRITER); /* * Before setting the xattr check to see if it already exists. * This is done to ensure the following optional flags are honored. * * XATTR_CREATE: fail if xattr already exists * XATTR_REPLACE: fail if xattr does not exist * * We also want to know if it resides in sa or dir, so we can make * sure we don't end up with duplicate in both places. */ error = __zpl_xattr_where(ip, name, &where, cr); if (error < 0) { if (error != -ENODATA) goto out; if (flags & XATTR_REPLACE) goto out; /* The xattr to be removed already doesn't exist */ error = 0; if (value == NULL) goto out; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto out; } /* Preferentially store the xattr as a SA for better performance */ if (zsb->z_use_sa && zp->z_is_sa && (zsb->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) { error = zpl_xattr_set_sa(ip, name, value, size, flags, cr); if (error == 0) { /* * Successfully put into SA, we need to clear the one * in dir. */ if (where & XATTR_IN_DIR) zpl_xattr_set_dir(ip, name, NULL, 0, 0, cr); goto out; } } error = zpl_xattr_set_dir(ip, name, value, size, flags, cr); /* * Successfully put into dir, we need to clear the one in SA. */ if (error == 0 && (where & XATTR_IN_SA)) zpl_xattr_set_sa(ip, name, NULL, 0, 0, cr); out: rw_exit(&ITOZ(ip)->z_xattr_lock); rrm_exit(&(zsb)->z_teardown_lock, FTAG); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }