static zfs_handle_t *libzfs_zfs_snapshot_next(libzfs_handle_t *p_libzfshd, const char *psz_zfs, char *psz_buffer, size_t i_buffer, uint64_t *p_cookie, const char **ppsz_error) { objset_t *p_os; size_t i_zfs_len = strlen(psz_zfs); int i_error; /* Check the size of the zfs name */ if(i_zfs_len >= i_buffer) { *ppsz_error = "ZFS name too long to handle snapshots"; return NULL; } if(*p_cookie == 0) dmu_objset_find(psz_zfs, dmu_objset_prefetch, NULL, DS_FIND_SNAPSHOTS); top: if((i_error = dmu_objset_hold(psz_zfs, FTAG, &p_os))) { *ppsz_error = "Unable to hold the zfs filesystem"; return NULL; } snprintf(psz_buffer, i_buffer, "%s@", psz_zfs); if((i_error = dmu_snapshot_list_next(p_os, i_buffer - i_zfs_len - 1, psz_buffer + i_zfs_len + 1, NULL, p_cookie, NULL))) *ppsz_error = "Unable to get the next snapshot"; dmu_objset_rele(p_os, FTAG); zfs_handle_t *p_zfs_snap = NULL; if(i_error == 0) { i_error = dmu_objset_hold(psz_buffer, FTAG, &p_os); if(i_error == ENOENT) goto top; if(!(p_zfs_snap = libzfs_make_dataset_handle(p_libzfshd, psz_buffer))) *ppsz_error = "Unable to create a zfs handle for the snapshot"; dmu_objset_rele(p_os, FTAG); } return p_zfs_snap; }
/* * Set ZFS_PROP_VOLSIZE set entry point. */ int zvol_set_volsize(const char *name, uint64_t volsize) { zvol_state_t *zv; dmu_object_info_t *doi; objset_t *os = NULL; uint64_t readonly; int error; mutex_enter(&zvol_state_lock); zv = zvol_find_by_name(name); if (zv == NULL) { error = ENXIO; goto out; } doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP); error = dmu_objset_hold(name, FTAG, &os); if (error) goto out_doi; if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 || (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0) goto out_doi; VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0); if (readonly) { error = EROFS; goto out_doi; } if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) { error = EROFS; goto out_doi; } error = zvol_update_volsize(zv, volsize, os); out_doi: kmem_free(doi, sizeof(dmu_object_info_t)); out: if (os) dmu_objset_rele(os, FTAG); mutex_exit(&zvol_state_lock); return (error); }
int dsl_prop_get(const char *dsname, const char *propname, int intsz, int numints, void *buf, char *setpoint) { objset_t *os; int error; error = dmu_objset_hold(dsname, FTAG, &os); if (error != 0) return (error); error = dsl_prop_get_ds(dmu_objset_ds(os), propname, intsz, numints, buf, setpoint); dmu_objset_rele(os, FTAG); return (error); }
static int libzfs_update_stats(zfs_handle_t *p_zfs) { objset_t *p_os; nvlist_t *pnv_allprops, *pnv_userprops; int i_error; if((i_error = dmu_objset_hold(p_zfs->zfs_name, FTAG, &p_os))) return i_error; dmu_objset_fast_stat(p_os, &p_zfs->zfs_dmustats); if((i_error = dsl_prop_get_all(p_os, &pnv_allprops)) == 0) { dmu_objset_stats(p_os, pnv_allprops); if(!p_zfs->zfs_dmustats.dds_inconsistent) { if(dmu_objset_type(p_os) == DMU_OST_ZVOL) assert(zvol_get_stats(p_os, pnv_allprops) == 0); } } dmu_objset_rele(p_os, FTAG); // Continue processing the stats if((pnv_userprops = process_user_props(p_zfs, pnv_allprops)) == NULL) { nvlist_free(pnv_allprops); return 1; } nvlist_free(p_zfs->zfs_props); nvlist_free(p_zfs->zfs_user_props); p_zfs->zfs_props = pnv_allprops; p_zfs->zfs_user_props = pnv_userprops; return 0; }
/* * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) { int err, err2; znode_t *zp; uint64_t sa_obj = 0; ASSERT(RRM_WRITE_HELD(&zsb->z_teardown_lock)); ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock)); /* * We already own this, so just hold and rele it to update the * objset_t, as the one we had before may have been evicted. */ VERIFY0(dmu_objset_hold(osname, zsb, &zsb->z_os)); VERIFY3P(zsb->z_os->os_dsl_dataset->ds_owner, ==, zsb); VERIFY(dsl_dataset_long_held(zsb->z_os->os_dsl_dataset)); dmu_objset_rele(zsb->z_os, zsb); /* * Make sure version hasn't changed */ err = zfs_get_zplprop(zsb->z_os, ZFS_PROP_VERSION, &zsb->z_version); if (err) goto bail; err = zap_lookup(zsb->z_os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if (err && zsb->z_version >= ZPL_VERSION_SA) goto bail; if ((err = sa_setup(zsb->z_os, sa_obj, zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0) goto bail; if (zsb->z_version >= ZPL_VERSION_SA) sa_register_update_callback(zsb->z_os, zfs_sa_upgrade); VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); zfs_set_fuid_feature(zsb); zsb->z_rollback_time = jiffies; /* * Attempt to re-establish all the active inodes with their * dbufs. If a zfs_rezget() fails, then we unhash the inode * and mark it stale. This prevents a collision if a new * inode/object is created which must use the same inode * number. The stale inode will be be released when the * VFS prunes the dentry holding the remaining references * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { err2 = zfs_rezget(zp); if (err2) { remove_inode_hash(ZTOI(zp)); zp->z_is_stale = B_TRUE; } } mutex_exit(&zsb->z_znodes_lock); bail: /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrm_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* * Since we couldn't setup the sa framework, try to force * unmount this file system. */ if (zsb->z_os) (void) zfs_umount(zsb->z_sb); } return (err); }
/* ARGSUSED */ static int zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, int *direntflags, pathname_t *realpnp) { zfsctl_snapdir_t *sdp = dvp->v_data; objset_t *snap; char snapname[MAXNAMELEN]; char real[MAXNAMELEN]; char *mountpoint; zfs_snapentry_t *sep, search; struct mounta margs; vfs_t *vfsp; size_t mountpoint_len; avl_index_t where; zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; int err; /* * No extended attributes allowed under .zfs */ if (flags & LOOKUP_XATTR) return (EINVAL); ASSERT(dvp->v_type == VDIR); /* * If we get a recursive call, that means we got called * from the domount() code while it was trying to look up the * spec (which looks like a local path for zfs). We need to * add some flag to domount() to tell it not to do this lookup. */ if (MUTEX_HELD(&sdp->sd_lock)) return (ENOENT); ZFS_ENTER(zfsvfs); if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) { ZFS_EXIT(zfsvfs); return (0); } if (flags & FIGNORECASE) { boolean_t conflict = B_FALSE; err = dmu_snapshot_realname(zfsvfs->z_os, nm, real, MAXNAMELEN, &conflict); if (err == 0) { nm = real; } else if (err != ENOTSUP) { ZFS_EXIT(zfsvfs); return (err); } if (realpnp) (void) strlcpy(realpnp->pn_buf, nm, realpnp->pn_bufsize); if (conflict && direntflags) *direntflags = ED_CASE_CONFLICT; } mutex_enter(&sdp->sd_lock); search.se_name = (char *)nm; if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) { *vpp = sep->se_root; VN_HOLD(*vpp); err = traverse(vpp); if (err) { VN_RELE(*vpp); *vpp = NULL; } else if (*vpp == sep->se_root) { /* * The snapshot was unmounted behind our backs, * try to remount it. */ goto domount; } else { /* * VROOT was set during the traverse call. We need * to clear it since we're pretending to be part * of our parent's vfs. */ (*vpp)->v_flag &= ~VROOT; } mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); return (err); } /* * The requested snapshot is not currently mounted, look it up. */ err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname); if (err) { mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); /* * handle "ls *" or "?" in a graceful manner, * forcing EILSEQ to ENOENT. * Since shell ultimately passes "*" or "?" as name to lookup */ return (err == EILSEQ ? ENOENT : err); } if (dmu_objset_hold(snapname, FTAG, &snap) != 0) { mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); return (ENOENT); } sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP); sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); (void) strcpy(sep->se_name, nm); *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap)); avl_insert(&sdp->sd_snaps, sep, where); dmu_objset_rele(snap, FTAG); domount: mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) + strlen("/.zfs/snapshot/") + strlen(nm) + 1; mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s", refstr_value(dvp->v_vfsp->vfs_mntpt), nm); margs.spec = snapname; margs.dir = mountpoint; margs.flags = MS_SYSSPACE | MS_NOMNTTAB; margs.fstype = "zfs"; margs.dataptr = NULL; margs.datalen = 0; margs.optptr = NULL; margs.optlen = 0; err = domount("zfs", &margs, *vpp, kcred, &vfsp); kmem_free(mountpoint, mountpoint_len); if (err == 0) { /* * Return the mounted root rather than the covered mount point. * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns * the ZFS vnode mounted on top of the GFS node. This ZFS * vnode is the root of the newly created vfsp. */ VFS_RELE(vfsp); err = traverse(vpp); } if (err == 0) { /* * Fix up the root vnode mounted on .zfs/snapshot/<snapname>. * * This is where we lie about our v_vfsp in order to * make .zfs/snapshot/<snapname> accessible over NFS * without requiring manual mounts of <snapname>. */ ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; (*vpp)->v_vfsp = zfsvfs->z_vfs; (*vpp)->v_flag &= ~VROOT; } mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); /* * If we had an error, drop our hold on the vnode and * zfsctl_snapshot_inactive() will clean up. */ if (err) { VN_RELE(*vpp); *vpp = NULL; } return (err); }