/* ARGSUSED */ static int zfsctl_shares_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, int *direntflags, pathname_t *realpnp) { zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; znode_t *dzp; int error; ZFS_ENTER(zfsvfs); if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) { ZFS_EXIT(zfsvfs); return (0); } if (zfsvfs->z_shares_dir == 0) { ZFS_EXIT(zfsvfs); return (ENOTSUP); } if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) error = VOP_LOOKUP(ZTOV(dzp), nm, vpp, pnp, flags, rdir, cr, ct, direntflags, realpnp); VN_RELE(ZTOV(dzp)); ZFS_EXIT(zfsvfs); return (error); }
/* ARGSUSED */ static int zfsctl_shares_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp, caller_context_t *ct, int flags) { zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; znode_t *dzp; int error; ZFS_ENTER(zfsvfs); if (zfsvfs->z_shares_dir == 0) { ZFS_EXIT(zfsvfs); return (ENOTSUP); } if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) { error = VOP_READDIR(ZTOV(dzp), uiop, cr, eofp, ct, flags); VN_RELE(ZTOV(dzp)); } else { *eofp = 1; error = ENOENT; } ZFS_EXIT(zfsvfs); return (error); }
/* ARGSUSED */ int zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, int *direntflags, pathname_t *realpnp) { zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; int err; /* * No extended attributes allowed under .zfs */ if (flags & LOOKUP_XATTR) return (EINVAL); ZFS_ENTER(zfsvfs); if (strcmp(nm, "..") == 0) { err = VFS_ROOT(dvp->v_vfsp, vpp); } else { err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir, cr, ct, direntflags, realpnp); } ZFS_EXIT(zfsvfs); return (err); }
/* ARGSUSED */ static int zpl_shares_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *ip = dentry->d_inode; zfs_sb_t *zsb = ITOZSB(ip); znode_t *dzp; int error; ZFS_ENTER(zsb); if (zsb->z_shares_dir == 0) { error = simple_getattr(mnt, dentry, stat); stat->nlink = stat->size = 2; stat->atime = CURRENT_TIME; ZFS_EXIT(zsb); return (error); } error = -zfs_zget(zsb, zsb->z_shares_dir, &dzp); if (error == 0) { error = -zfs_getattr_fast(ZTOI(dzp), stat); iput(ZTOI(dzp)); } //VN_RELE(ZTOI(dzp)); ZFS_EXIT(zsb); ASSERT3S(error, <=, 0); return (error); }
/* * Get root directory contents. */ static int zpl_root_iterate(struct file *filp, struct dir_context *ctx) { zfs_sb_t *zsb = ITOZSB(filp->f_path.dentry->d_inode); int error = 0; ZFS_ENTER(zsb); if (!dir_emit_dots(filp, ctx)) goto out; if (ctx->pos == 2) { if (!dir_emit(ctx, ZFS_SNAPDIR_NAME, strlen(ZFS_SNAPDIR_NAME), ZFSCTL_INO_SNAPDIR, DT_DIR)) goto out; ctx->pos++; } if (ctx->pos == 3) { if (!dir_emit(ctx, ZFS_SHAREDIR_NAME, strlen(ZFS_SHAREDIR_NAME), ZFSCTL_INO_SHARES, DT_DIR)) goto out; ctx->pos++; } out: ZFS_EXIT(zsb); return (error); }
/*ARGSUSED*/ static int zfsctl_common_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) { zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfsctl_node_t *zcp = vp->v_data; uint64_t object = zcp->zc_id; zfid_short_t *zfid; int i; ZFS_ENTER(zfsvfs); if (fidp->fid_len < SHORT_FID_LEN) { fidp->fid_len = SHORT_FID_LEN; ZFS_EXIT(zfsvfs); return (ENOSPC); } zfid = (zfid_short_t *)fidp; zfid->zf_len = SHORT_FID_LEN; for (i = 0; i < sizeof (zfid->zf_object); i++) zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); /* .zfs znodes always have a generation number of 0 */ for (i = 0; i < sizeof (zfid->zf_gen); i++) zfid->zf_gen[i] = 0; ZFS_EXIT(zfsvfs); return (0); }
/*ARGSUSED*/ int zfsctl_fid(struct inode *ip, fid_t *fidp) { znode_t *zp = ITOZ(ip); zfs_sb_t *zsb = ITOZSB(ip); uint64_t object = zp->z_id; zfid_short_t *zfid; int i; ZFS_ENTER(zsb); if (fidp->fid_len < SHORT_FID_LEN) { fidp->fid_len = SHORT_FID_LEN; ZFS_EXIT(zsb); return (ENOSPC); } zfid = (zfid_short_t *)fidp; zfid->zf_len = SHORT_FID_LEN; for (i = 0; i < sizeof (zfid->zf_object); i++) zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); /* .zfs znodes always have a generation number of 0 */ for (i = 0; i < sizeof (zfid->zf_gen); i++) zfid->zf_gen[i] = 0; ZFS_EXIT(zsb); return (0); }
static int zfsfuse_unlink(fuse_req_t req, fuse_ino_t parent, const char *name) { if(strlen(name) >= MAXNAMELEN) return ENAMETOOLONG; vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, parent, &znode, B_FALSE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *dvp = ZTOV(znode); ASSERT(dvp != NULL); cred_t cred; zfsfuse_getcred(req, &cred); error = VOP_REMOVE(dvp, (char *) name, &cred, NULL, 0); VN_RELE(dvp); ZFS_EXIT(zfsvfs); return error; }
static int zfsfuse_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) { if(strlen(name) >= MAXNAMELEN) return ENAMETOOLONG; vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, parent, &znode, B_TRUE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *dvp = ZTOV(znode); ASSERT(dvp != NULL); vnode_t *vp = NULL; cred_t cred; zfsfuse_getcred(req, &cred); error = VOP_LOOKUP(dvp, (char *) name, &vp, NULL, 0, NULL, &cred, NULL, NULL, NULL); if(error) goto out; struct fuse_entry_param e = { 0 }; e.attr_timeout = 0.0; e.entry_timeout = 0.0; if(vp == NULL) goto out; e.ino = VTOZ(vp)->z_id; if(e.ino == 3) e.ino = 1; e.generation = VTOZ(vp)->z_phys->zp_gen; error = zfsfuse_stat(vp, &e.attr, &cred); out: if(vp != NULL) VN_RELE(vp); VN_RELE(dvp); ZFS_EXIT(zfsvfs); if(!error) fuse_reply_entry(req, &e); return error; }
static int zfsfuse_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); file_info_t *info = (file_info_t *)(uintptr_t) fi->fh; ASSERT(info->vp != NULL); ASSERT(VTOZ(info->vp) != NULL); ASSERT(VTOZ(info->vp)->z_id == ino); cred_t cred; zfsfuse_getcred(req, &cred); int error = VOP_CLOSE(info->vp, info->flags, 1, (offset_t) 0, &cred, NULL); VERIFY(error == 0); VN_RELE(info->vp); kmem_cache_free(file_info_cache, info); ZFS_EXIT(zfsvfs); return error; }
static int zfsfuse_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, ino, &znode, B_TRUE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *vp = ZTOV(znode); ASSERT(vp != NULL); cred_t cred; zfsfuse_getcred(req, &cred); struct stat stbuf; error = zfsfuse_stat(vp, &stbuf, &cred); VN_RELE(vp); ZFS_EXIT(zfsvfs); if(!error) fuse_reply_attr(req, &stbuf, 0.0); return error; }
/* ARGSUSED */ int zfsctl_root_lookup(struct inode *dip, char *name, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { zfs_sb_t *zsb = ITOZSB(dip); int error = 0; ZFS_ENTER(zsb); if (strcmp(name, "..") == 0) { *ipp = dip->i_sb->s_root->d_inode; } else if (strcmp(name, ZFS_SNAPDIR_NAME) == 0) { *ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SNAPDIR, &zpl_fops_snapdir, &zpl_ops_snapdir); } else if (strcmp(name, ZFS_SHAREDIR_NAME) == 0) { *ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SHARES, &zpl_fops_shares, &zpl_ops_shares); } else { *ipp = NULL; } if (*ipp == NULL) error = ENOENT; ZFS_EXIT(zsb); return (error); }
/* ARGSUSED */ int zfsctl_snapdir_lookup(struct inode *dip, char *name, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { zfs_sb_t *zsb = ITOZSB(dip); uint64_t id; int error; ZFS_ENTER(zsb); error = dmu_snapshot_lookup(zsb->z_os, name, &id); if (error) { ZFS_EXIT(zsb); return (error); } *ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SNAPDIRS - id, &simple_dir_operations, &simple_dir_inode_operations); if (*ipp) { #ifdef HAVE_AUTOMOUNT (*ipp)->i_flags |= S_AUTOMOUNT; #endif /* HAVE_AUTOMOUNT */ } else { error = ENOENT; } ZFS_EXIT(zsb); return (error); }
static int zpl_shares_iterate(struct file *filp, struct dir_context *ctx) { fstrans_cookie_t cookie; cred_t *cr = CRED(); zfs_sb_t *zsb = ITOZSB(filp->f_path.dentry->d_inode); znode_t *dzp; int error = 0; ZFS_ENTER(zsb); cookie = spl_fstrans_mark(); if (zsb->z_shares_dir == 0) { dir_emit_dots(filp, ctx); goto out; } error = -zfs_zget(zsb, zsb->z_shares_dir, &dzp); if (error) goto out; crhold(cr); error = -zfs_readdir(ZTOI(dzp), ctx, cr); crfree(cr); VN_RELE(ZTOV(dzp)); out: spl_fstrans_unmark(cookie); ZFS_EXIT(zsb); ASSERT3S(error, <=, 0); return (error); }
/* ARGSUSED */ int zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp, int flags, cred_t *cr, int *direntflags, pathname_t *realpnp) { zfs_sb_t *zsb = ITOZSB(dip); struct inode *ip; znode_t *dzp; int error; ZFS_ENTER(zsb); if (zsb->z_shares_dir == 0) { ZFS_EXIT(zsb); return (ENOTSUP); } error = zfs_zget(zsb, zsb->z_shares_dir, &dzp); if (error) { ZFS_EXIT(zsb); return (error); } error = zfs_lookup(ZTOI(dzp), name, &ip, 0, cr, NULL, NULL); iput(ZTOI(dzp)); ZFS_EXIT(zsb); return (error); }
static int zfs_statfs(vfs_t *vfsp, struct statfs *statp) { zfsvfs_t *zfsvfs = vfsp->vfs_data; uint64_t refdbytes, availbytes, usedobjs, availobjs; statp->f_version = STATFS_VERSION; ZFS_ENTER(zfsvfs); dmu_objset_space(zfsvfs->z_os, &refdbytes, &availbytes, &usedobjs, &availobjs); /* * The underlying storage pool actually uses multiple block sizes. * We report the fragsize as the smallest block size we support, * and we report our blocksize as the filesystem's maximum blocksize. */ statp->f_bsize = zfsvfs->z_vfs->vfs_bsize; statp->f_iosize = zfsvfs->z_vfs->vfs_bsize; /* * The following report "total" blocks of various kinds in the * file system, but reported in terms of f_frsize - the * "fragment" size. */ statp->f_blocks = (refdbytes + availbytes) / statp->f_bsize; statp->f_bfree = availbytes / statp->f_bsize; statp->f_bavail = statp->f_bfree; /* no root reservation */ /* * statvfs() should really be called statufs(), because it assumes * static metadata. ZFS doesn't preallocate files, so the best * we can do is report the max that could possibly fit in f_files, * and that minus the number actually used in f_ffree. * For f_ffree, report the smaller of the number of object available * and the number of blocks (each object will take at least a block). */ statp->f_ffree = MIN(availobjs, statp->f_bfree); statp->f_files = statp->f_ffree + usedobjs; /* * We're a zfs filesystem. */ (void) strlcpy(statp->f_fstypename, "zfs", sizeof(statp->f_fstypename)); strlcpy(statp->f_mntfromname, vfsp->mnt_stat.f_mntfromname, sizeof(statp->f_mntfromname)); strlcpy(statp->f_mntonname, vfsp->mnt_stat.f_mntonname, sizeof(statp->f_mntonname)); statp->f_namemax = ZFS_MAXNAMELEN; ZFS_EXIT(zfsvfs); return (0); }
/* * The ARC has requested that the filesystem drop entries from the dentry * and inode caches. This can occur when the ARC needs to free meta data * blocks but can't because they are all pinned by entries in these caches. */ int zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) { zfs_sb_t *zsb = sb->s_fs_info; int error = 0; #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK) struct shrinker *shrinker = &sb->s_shrink; struct shrink_control sc = { .nr_to_scan = nr_to_scan, .gfp_mask = GFP_KERNEL, }; #endif ZFS_ENTER(zsb); #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \ defined(SHRINK_CONTROL_HAS_NID) && \ defined(SHRINKER_NUMA_AWARE) if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) { *objects = 0; for_each_online_node(sc.nid) *objects += (*shrinker->scan_objects)(shrinker, &sc); } else { *objects = (*shrinker->scan_objects)(shrinker, &sc); } #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK) *objects = (*shrinker->scan_objects)(shrinker, &sc); #elif defined(HAVE_SHRINK) *objects = (*shrinker->shrink)(shrinker, &sc); #elif defined(HAVE_D_PRUNE_ALIASES) #define D_PRUNE_ALIASES_IS_DEFAULT *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); #else #error "No available dentry and inode cache pruning mechanism." #endif #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT) #undef D_PRUNE_ALIASES_IS_DEFAULT /* * Fall back to zfs_sb_prune_aliases if the kernel's per-superblock * shrinker couldn't free anything, possibly due to the inodes being * allocated in a different memcg. */ if (*objects == 0) *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); #endif ZFS_EXIT(zsb); dprintf_ds(zsb->z_os->os_dsl_dataset, "pruning, nr_to_scan=%lu objects=%d error=%d\n", nr_to_scan, *objects, error); return (error); }
static int zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) { znode_t *zp = ITOZ(mapping->host); zfs_sb_t *zsb = ITOZSB(mapping->host); enum writeback_sync_modes sync_mode; int result; ZFS_ENTER(zsb); if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) wbc->sync_mode = WB_SYNC_ALL; ZFS_EXIT(zsb); sync_mode = wbc->sync_mode; /* * We don't want to run write_cache_pages() in SYNC mode here, because * that would make putpage() wait for a single page to be committed to * disk every single time, resulting in atrocious performance. Instead * we run it once in non-SYNC mode so that the ZIL gets all the data, * and then we commit it all in one go. */ wbc->sync_mode = WB_SYNC_NONE; result = write_cache_pages(mapping, wbc, zpl_putpage, mapping); if (sync_mode != wbc->sync_mode) { ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); if (zsb->z_log != NULL) zil_commit(zsb->z_log, zp->z_id); ZFS_EXIT(zsb); /* * We need to call write_cache_pages() again (we can't just * return after the commit) because the previous call in * non-SYNC mode does not guarantee that we got all the dirty * pages (see the implementation of write_cache_pages() for * details). That being said, this is a no-op in most cases. */ wbc->sync_mode = sync_mode; result = write_cache_pages(mapping, wbc, zpl_putpage, mapping); } return (result); }
/* ARGSUSED */ static int zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, caller_context_t *ct, int flags) { zfsctl_snapdir_t *sdp = dvp->v_data; zfs_snapentry_t *sep; zfs_snapentry_t search; zfsvfs_t *zfsvfs; char snapname[MAXNAMELEN]; char real[MAXNAMELEN]; int err; zfsvfs = dvp->v_vfsp->vfs_data; ZFS_ENTER(zfsvfs); if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { err = dmu_snapshot_realname(zfsvfs->z_os, name, real, MAXNAMELEN, NULL); if (err == 0) { name = real; } else if (err != ENOTSUP) { ZFS_EXIT(zfsvfs); return (err); } } ZFS_EXIT(zfsvfs); err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname); if (!err) err = zfs_secpolicy_destroy_perms(snapname, cr); if (err) return (err); mutex_enter(&sdp->sd_lock); search.se_name = name; sep = avl_find(&sdp->sd_snaps, &search, NULL); if (sep) { avl_remove(&sdp->sd_snaps, sep); err = zfsctl_unmount_snap(sep, MS_FORCE, cr); if (err) avl_add(&sdp->sd_snaps, sep); else err = dmu_objset_destroy(snapname, B_FALSE); } else { err = ENOENT; } mutex_exit(&sdp->sd_lock); return (err); }
/*ARGSUSED*/ int zfs_sync(struct super_block *sb, int wait, cred_t *cr) { zfs_sb_t *zsb = sb->s_fs_info; /* * Data integrity is job one. We don't want a compromised kernel * writing to the storage pool, so we never sync during panic. */ if (unlikely(oops_in_progress)) return (0); /* * Semantically, the only requirement is that the sync be initiated. * The DMU syncs out txgs frequently, so there's nothing to do. */ if (!wait) return (0); if (zsb != NULL) { /* * Sync a specific filesystem. */ dsl_pool_t *dp; ZFS_ENTER(zsb); dp = dmu_objset_pool(zsb->z_os); /* * If the system is shutting down, then skip any * filesystems which may exist on a suspended pool. */ if (spa_suspended(dp->dp_spa)) { ZFS_EXIT(zsb); return (0); } if (zsb->z_log != NULL) zil_commit(zsb->z_log, 0); ZFS_EXIT(zsb); } else { /* * Sync all ZFS filesystems. This is what happens when you * run sync(1M). Unlike other filesystems, ZFS honors the * request by waiting for all pools to commit all dirty data. */ spa_sync_allpools(); } return (0); }
static int zfs_vfs_sync(struct mount *mp, __unused int waitfor, __unused vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); ZFS_ENTER(zfsvfs); /* * Mac OS X needs a file system modify time * * We use the mtime of the "com.apple.system.mtime" * extended attribute, which is associated with the * file system root directory. * * Here we sync any mtime changes to this attribute. */ if (zfsvfs->z_mtime_vp != NULL) { timestruc_t mtime; znode_t *zp; top: zp = VTOZ(zfsvfs->z_mtime_vp); ZFS_TIME_DECODE(&mtime, zp->z_phys->zp_mtime); if (zfsvfs->z_last_mtime_synced < mtime.tv_sec) { dmu_tx_t *tx; int error; tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); error = dmu_tx_assign(tx, zfsvfs->z_assign); if (error) { if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); } else { dmu_buf_will_dirty(zp->z_dbuf, tx); dmu_tx_commit(tx); zfsvfs->z_last_mtime_synced = mtime.tv_sec; } } } if (zfsvfs->z_log != NULL) zil_commit(zfsvfs->z_log, UINT64_MAX, 0); else txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); ZFS_EXIT(zfsvfs); return (0); }
static int zfsfuse_readlink(fuse_req_t req, fuse_ino_t ino) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, ino, &znode, B_FALSE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *vp = ZTOV(znode); ASSERT(vp != NULL); char buffer[PATH_MAX + 1]; iovec_t iovec; uio_t uio; uio.uio_iov = &iovec; uio.uio_iovcnt = 1; uio.uio_segflg = UIO_SYSSPACE; uio.uio_fmode = 0; uio.uio_llimit = RLIM64_INFINITY; iovec.iov_base = buffer; iovec.iov_len = sizeof(buffer) - 1; uio.uio_resid = iovec.iov_len; uio.uio_loffset = 0; cred_t cred; zfsfuse_getcred(req, &cred); error = VOP_READLINK(vp, &uio, &cred, NULL); VN_RELE(vp); ZFS_EXIT(zfsvfs); if(!error) { VERIFY(uio.uio_loffset < sizeof(buffer)); buffer[uio.uio_loffset] = '\0'; fuse_reply_readlink(req, buffer); } return error; }
/* * File handle to vnode pointer */ static int zfs_vfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); zfs_zfid_t *zfid = (zfs_zfid_t *)fhp; znode_t *zp; uint64_t obj_num = 0; uint64_t fid_gen = 0; uint64_t zp_gen; int i; int error; *vpp = NULL; ZFS_ENTER(zfsvfs); if (fhlen < sizeof (zfs_zfid_t)) { error = EINVAL; goto out; } /* * Grab the object and gen numbers in an endian neutral manner */ for (i = 0; i < sizeof (zfid->zf_object); i++) obj_num |= ((uint64_t)zfid->zf_object[i]) << (8 * i); for (i = 0; i < sizeof (zfid->zf_gen); i++) fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i); if ((error = zfs_zget(zfsvfs, obj_num, &zp))) { goto out; } zp_gen = zp->z_phys->zp_gen; if (zp_gen == 0) zp_gen = 1; if (zp->z_unlinked || zp_gen != fid_gen) { vnode_put(ZTOV(zp)); error = EINVAL; goto out; } *vpp = ZTOV(zp); out: ZFS_EXIT(zfsvfs); return (error); }
/* ARGSUSED */ static int zfsctl_snapdir_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, caller_context_t *ct) { zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfsctl_snapdir_t *sdp = vp->v_data; ZFS_ENTER(zfsvfs); zfsctl_common_getattr(vp, vap); vap->va_nodeid = gfs_file_inode(vp); vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2; ZFS_EXIT(zfsvfs); return (0); }
/* ARGSUSED */ static int zfsctl_root_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, caller_context_t *ct) { zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; ZFS_ENTER(zfsvfs); vap->va_nodeid = ZFSCTL_INO_ROOT; vap->va_nlink = vap->va_size = NROOT_ENTRIES; zfsctl_common_getattr(vp, vap); ZFS_EXIT(zfsvfs); return (0); }
int zfs_root(zfs_sb_t *zsb, struct inode **ipp) { znode_t *rootzp; int error; ZFS_ENTER(zsb); error = zfs_zget(zsb, zsb->z_root, &rootzp); if (error == 0) *ipp = ZTOI(rootzp); ZFS_EXIT(zsb); return (error); }
int zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) { zfs_sb_t *zsb = sb->s_fs_info; struct shrinker *shrinker = &sb->s_shrink; struct shrink_control sc = { .nr_to_scan = nr_to_scan, .gfp_mask = GFP_KERNEL, }; ZFS_ENTER(zsb); *objects = (*shrinker->shrink)(shrinker, &sc); ZFS_EXIT(zsb); return (0); }
static int zfs_root(vfs_t *vfsp, vnode_t **vpp) { zfsvfs_t *zfsvfs = vfsp->vfs_data; znode_t *rootzp; int error; ZFS_ENTER(zfsvfs); error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp); if (error == 0) *vpp = ZTOV(rootzp); ZFS_EXIT(zfsvfs); return (error); }
/* ARGSUSED */ static int zpl_snapdir_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { zfs_sb_t *zsb = ITOZSB(dentry->d_inode); int error; ZFS_ENTER(zsb); error = simple_getattr(mnt, dentry, stat); stat->nlink = stat->size = 2; stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zsb->z_os); stat->atime = CURRENT_TIME; ZFS_EXIT(zsb); return (error); }
static int zfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); znode_t *rootzp; int error; ZFS_ENTER(zfsvfs); error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp); if (error == 0) *vpp = ZTOV(rootzp); ZFS_EXIT(zfsvfs); return (error); }