static int report_statvfs(struct vfsmount *mnt, struct inode *ip, struct sco_statvfs *bufp) { struct sco_statvfs buf; struct kstatfs s; int error; #if _KSL < 18 error = vfs_statfs(mnt->mnt_sb, &s); #else error = vfs_statfs(mnt->mnt_sb->s_root, &s); #endif if (error) return error; memset(&buf, 0, sizeof(struct sco_statvfs)); buf.f_bsize = s.f_bsize; buf.f_frsize = s.f_bsize; buf.f_blocks = s.f_blocks; buf.f_bfree = s.f_bfree; buf.f_bavail = s.f_bavail; buf.f_files = s.f_files; buf.f_free = s.f_ffree; buf.f_favail = s.f_ffree; /* SCO addition in the middle! */ buf.f_sid = ip->i_sb->s_dev; /* * Get the name of the filesystem. * * Sadly, some code "in the wild" actually checks the name * against a hard coded list to see if it is a "real" fs or not. * * I believe Informix Dynamic Server for SCO is one such. */ if (strncmp(ip->i_sb->s_type->name, "ext2", 4) == 0) strcpy(buf.f_basetype, "HTFS"); else strcpy(buf.f_basetype, ip->i_sb->s_type->name); /* Check for a few flags statvfs wants but statfs doesn't have. */ if (IS_RDONLY(ip)) buf.f_flag |= 1; if (mnt->mnt_flags & MNT_NOSUID) buf.f_flag |= 2; buf.f_namemax = s.f_namelen; if (copy_to_user(bufp, &buf, sizeof(struct sco_statvfs))) return -EFAULT; return 0; }
static int au_wbr_init(struct au_branch *br, struct super_block *sb, int perm, struct path *path) { int err; struct kstatfs kst; struct au_wbr *wbr; struct dentry *h_dentry; wbr = br->br_wbr; au_rw_init(&wbr->wbr_wh_rwsem); memset(wbr->wbr_wh, 0, sizeof(wbr->wbr_wh)); atomic_set(&wbr->wbr_wh_running, 0); wbr->wbr_bytes = 0; /* * a limit for rmdir/rename a dir * cf. AUFS_MAX_NAMELEN in include/linux/aufs_type.h */ h_dentry = path->dentry; err = vfs_statfs(h_dentry, &kst); if (unlikely(err)) goto out; err = -EINVAL; if (kst.f_namelen >= NAME_MAX) err = au_br_init_wh(sb, br, perm, h_dentry); else pr_err("%.*s(%s), unsupported namelen %ld\n", AuDLNPair(h_dentry), au_sbtype(h_dentry->d_sb), kst.f_namelen); out: return err; }
static int vfs_statfs64(struct super_block *sb, struct statfs64 *buf) { int retval; if (sb->s_flags & MS_HAS_STATFS64) { memset(buf, 0, sizeof(struct statfs64)); lock_kernel(); retval = sb->s_op->statfs64(sb, buf); unlock_kernel(); if (retval) return retval; } else { struct statfs st; /* Fall back to the 32 bit filesystem call. */ retval = vfs_statfs(sb, &st); if (retval) return retval; /* Stuff the 32 bit values into the 64 bit struct */ buf->f_type = st.f_type; buf->f_bsize = st.f_bsize; buf->f_blocks = st.f_blocks; buf->f_bfree = st.f_bfree; buf->f_bavail = st.f_bavail; buf->f_files = st.f_files; buf->f_ffree = st.f_ffree; buf->f_fsid = st.f_fsid; buf->f_namelen = st.f_namelen; memset(buf->f_spare, 0, sizeof(buf->f_spare)); } return 0; }
/* This function is called from hpux_utssys(); HP-UX implements * ustat() as an option to utssys(). * * Now, struct ustat on HP-UX is exactly the same as on Linux, except * that it contains one addition field on the end, int32_t f_blksize. * So, we could have written this function to just call the Linux * sys_ustat(), (defined in linux/fs/super.c), and then just * added this additional field to the user's structure. But I figure * if we're gonna be digging through filesystem structures to get * this, we might as well just do the whole enchilada all in one go. * * So, most of this function is almost identical to sys_ustat(). * I have placed comments at the few lines changed or added, to * aid in porting forward if and when sys_ustat() is changed from * its form in kernel 2.2.5. */ static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf) { struct super_block *s; struct hpux_ustat tmp; /* Changed to hpux_ustat */ struct kstatfs sbuf; int err = -EINVAL; s = user_get_super(dev); if (s == NULL) goto out; err = vfs_statfs(s, &sbuf); drop_super(s); if (err) goto out; memset(&tmp,0,sizeof(tmp)); tmp.f_tfree = (int32_t)sbuf.f_bfree; tmp.f_tinode = (u_int32_t)sbuf.f_ffree; tmp.f_blksize = (u_int32_t)sbuf.f_bsize; /* Added this line */ err = copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; out: return err; }
static int nullfs_getattr(struct vnop_getattr_args * args) { int error; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); lck_mtx_lock(&null_mp->nullm_lock); if (nullfs_isspecialvp(args->a_vp)) { error = nullfs_special_getattr(args); lck_mtx_unlock(&null_mp->nullm_lock); return error; } lck_mtx_unlock(&null_mp->nullm_lock); /* this will return a different inode for third than read dir will */ struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp); error = vnode_getwithref(lowervp); if (error == 0) { error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context); vnode_put(lowervp); if (error == 0) { /* fix up fsid so it doesn't say the underlying fs*/ VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]); } } return error; }
static int sdcardfs_statfs(struct dentry *dentry, struct kstatfs *buf) { int err; struct path lower_path; u32 min_blocks; struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); sdcardfs_get_lower_path(dentry, &lower_path); err = vfs_statfs(&lower_path, buf); sdcardfs_put_lower_path(dentry, &lower_path); if (sbi->options.reserved_mb) { /* Invalid statfs informations. */ if (buf->f_bsize == 0) { printk(KERN_ERR "Returned block size is zero.\n"); return -EINVAL; } min_blocks = ((sbi->options.reserved_mb * 1024 * 1024)/buf->f_bsize); buf->f_blocks -= min_blocks; if (buf->f_bavail > min_blocks) buf->f_bavail -= min_blocks; else buf->f_bavail = 0; /* Make reserved blocks invisiable to media storage */ buf->f_bfree = buf->f_bavail; } /* set return buf to our f/s to avoid confusing user-level utils */ buf->f_type = SDCARDFS_SUPER_MAGIC; return err; }
static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf) { struct kstatfs st; int retval; retval = vfs_statfs(dentry, &st); if (retval) return retval; if (sizeof(*buf) == sizeof(st)) memcpy(buf, &st, sizeof(st)); else { buf->f_type = st.f_type; buf->f_bsize = st.f_bsize; buf->f_blocks = st.f_blocks; buf->f_bfree = st.f_bfree; buf->f_bavail = st.f_bavail; buf->f_files = st.f_files; buf->f_ffree = st.f_ffree; buf->f_fsid = st.f_fsid; buf->f_namelen = st.f_namelen; buf->f_frsize = st.f_frsize; memset(buf->f_spare, 0, sizeof(buf->f_spare)); } return 0; }
/* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct bsd_acct_struct *acct) { struct kstatfs sbuf; if (time_is_before_jiffies(acct->needcheck)) goto out; /* May block */ if (vfs_statfs(&acct->file->f_path, &sbuf)) goto out; if (acct->active) { u64 suspend = sbuf.f_blocks * SUSPEND; do_div(suspend, 100); if (sbuf.f_bavail <= suspend) { acct->active = 0; pr_info("Process accounting paused\n"); } } else { u64 resume = sbuf.f_blocks * RESUME; do_div(resume, 100); if (sbuf.f_bavail >= resume) { acct->active = 1; pr_info("Process accounting resumed\n"); } } acct->needcheck = jiffies + ACCT_TIMEOUT*HZ; out: return acct->active; }
static int vnop_getattr_9p(struct vnop_getattr_args *ap) { struct vnode_attr *vap; struct timespec ts; node_9p *np; enum vtype type; dev_t rdev; int e, dotu; TRACE(); e = 0; np = NTO9P(ap->a_vp); /* exclusive, because we modify np->dir */ nlock_9p(np, NODE_LCK_EXCLUSIVE); if ((e=ngetdir_9p(np))) goto error; dotu = ISSET(np->nmp->flags, F_DOTU); ts.tv_nsec = 0; vap = ap->a_vap; VATTR_RETURN(vap, va_rdev, np->dir.dev); VATTR_RETURN(vap, va_nlink, 1); VATTR_RETURN(vap, va_data_size, np->dir.length); VATTR_RETURN(vap, va_iosize, np->iounit); if (dotu) { VATTR_RETURN(vap, va_uid, np->dir.uidnum); VATTR_RETURN(vap, va_gid, np->dir.gidnum); } else { VATTR_RETURN(vap, va_uid, np->nmp->uid); VATTR_RETURN(vap, va_gid, np->nmp->gid); } VATTR_RETURN(vap, va_mode, np->dir.mode & 0777); VATTR_RETURN(vap, va_flags, 0); ts.tv_sec = np->dir.atime; VATTR_RETURN(vap, va_access_time, ts); ts.tv_sec = np->dir.mtime; VATTR_RETURN(vap, va_modify_time, ts); VATTR_RETURN(vap, va_fileid, QTOI(np->dir.qid)); VATTR_RETURN(vap, va_linkid, QTOI(np->dir.qid)); VATTR_RETURN(vap, va_fsid, vfs_statfs(np->nmp->mp)->f_fsid.val[0]); VATTR_RETURN(vap, va_filerev, np->dir.qid.vers); VATTR_RETURN(vap, va_gen, 0); VATTR_RETURN(vap, va_encoding, 0x7E); /* utf-8 */ dirvtype_9p(&np->dir, dotu, &type, &rdev); VATTR_RETURN(vap, va_type, type); VATTR_RETURN(vap, va_rdev, rdev); /* if (VATTR_IS_ACTIVE(vap, va_name) && !vnode_isvroot(ap->a_vp)) { strlcpy(vap->va_name, dp->name, MAXPATHLEN); VATTR_SET_SUPPORTED(vap, va_name); } */ error: nunlock_9p(np); return e; }
/* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct bsd_acct_struct *acct, struct file *file) { struct kstatfs sbuf; int res; int act; u64 resume; u64 suspend; spin_lock(&acct_lock); res = acct->active; if (!file || time_is_before_jiffies(acct->needcheck)) goto out; spin_unlock(&acct_lock); /* May block */ if (vfs_statfs(&file->f_path, &sbuf)) return res; suspend = sbuf.f_blocks * SUSPEND; resume = sbuf.f_blocks * RESUME; do_div(suspend, 100); do_div(resume, 100); if (sbuf.f_bavail <= suspend) act = -1; else if (sbuf.f_bavail >= resume) act = 1; else act = 0; /* * If some joker switched acct->file under us we'ld better be * silent and _not_ touch anything. */ spin_lock(&acct_lock); if (file != acct->file) { if (act) res = act > 0; goto out; } if (acct->active) { if (act < 0) { acct->active = 0; pr_info("Process accounting paused\n"); } } else { if (act > 0) { acct->active = 1; pr_info("Process accounting resumed\n"); } } acct->needcheck = jiffies + ACCT_TIMEOUT*HZ; res = acct->active; out: spin_unlock(&acct_lock); return res; }
static int u2fs_statfs(struct dentry *dentry, struct kstatfs *buf) { int err; struct path lower_path, lower_path1; u2fs_get_lower_path(dentry, &lower_path, LEFT); err = vfs_statfs(&lower_path, buf); u2fs_put_lower_path(dentry, &lower_path); u2fs_get_lower_path(dentry, &lower_path1, RIGHT); err = vfs_statfs(&lower_path1, buf); u2fs_put_lower_path(dentry, &lower_path1); /* set return buf to our f/s to avoid confusing user-level utils */ buf->f_type = WRAPFS_SUPER_MAGIC; return err; }
QTOCDataFormat10Ptr CreateBufferFromIORegistry ( mount_t mountPtr ) { QTOCDataFormat10Ptr TOCDataPtr = NULL; OSObject * objectPtr = NULL; OSData * dataPtr = NULL; IOCDMedia * cdMediaPtr = NULL; char * ioBSDNamePtr = NULL; DebugLog ( ( "CreateBufferFromIORegistry: Entering...\n" ) ); DebugAssert ( ( mountPtr != NULL ) ); ioBSDNamePtr = vfs_statfs ( mountPtr )->f_mntfromname; DebugAssert ( ( ioBSDNamePtr != NULL ) ); cdMediaPtr = GetCDMediaObjectFromName ( ioBSDNamePtr ); DebugAssert ( ( cdMediaPtr != NULL ) ); if ( cdMediaPtr != NULL ) { // Get the TOC property objectPtr = cdMediaPtr->getProperty ( kIOCDMediaTOCKey ); if ( objectPtr == NULL ) { DebugLog ( ( "CreateBufferFromIORegistry: objectPtr is NULL.\n" ) ); return NULL; } // Cast it to an OSData * dataPtr = OSDynamicCast ( OSData, objectPtr ); if ( dataPtr == NULL ) { DebugLog ( ( "CreateBufferFromIORegistry: dataPtr is NULL.\n" ) ); return NULL; } // Get the data from the registry entry TOCDataPtr = CreateBufferFromData ( dataPtr ); DebugLog ( ( "Releasing refcsount on IOCDMedia.\n" ) ); cdMediaPtr->release ( ); } DebugLog ( ( "CreateBufferFromIORegistry: exiting...\n" ) ); return TOCDataPtr; }
/** * Mount helper: Allocate and init VBoxVFS global data. * * @param mp Mount data provided by VFS layer. * @param pUserData Mounting parameters provided by user space mount tool. * * @return VBoxVFS global data or NULL. */ static vboxvfs_mount_t * vboxvfs_alloc_internal_data(struct mount *mp, user_addr_t pUserData) { vboxvfs_mount_t *pMount; struct vboxvfs_mount_info mountInfo; struct vfsstatfs *pVfsInfo; size_t cbShareName; int rc; AssertReturn(mp, NULL); AssertReturn(pUserData, NULL); pVfsInfo = vfs_statfs(mp); AssertReturn(pVfsInfo, NULL); /* Allocate memory for VBoxVFS internal data */ pMount = (vboxvfs_mount_t *)RTMemAllocZ(sizeof(vboxvfs_mount_t)); if (pMount) { rc = vboxvfs_get_mount_info(pUserData, &mountInfo); if (rc == 0) { PDEBUG("Mounting shared folder '%s'", mountInfo.name); /* Prepare for locking. We prepare locking group and attr data here, * but allocate and initialize real lock in vboxvfs_create_vnode_internal(). * We use the same pLockGroup and pLockAttr for all vnodes related to this mount point. */ rc = vboxvfs_prepare_locking(pMount); if (rc == 0) { rc = vboxvfs_set_share_name(mp, (char *)&mountInfo.name, &cbShareName); if (rc == 0) { pMount->pShareName = vboxvfs_construct_shflstring((char *)&mountInfo.name, cbShareName); if (pMount->pShareName) { /* Remember user who mounted this share */ pMount->owner = pVfsInfo->f_owner; /* Mark root vnode as uninitialized */ ASMAtomicWriteU8(&pMount->fRootVnodeState, VBOXVFS_OBJECT_UNINITIALIZED); return pMount; } } } vboxvfs_destroy_locking(pMount); } RTMemFree(pMount); } return NULL; }
int user_statfs(const char __user *pathname, struct kstatfs *st) { struct path path; int error = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); if (!error) { error = vfs_statfs(&path, st); path_put(&path); } return error; }
int fd_statfs(int fd, struct kstatfs *st) { struct file *file = fget_raw(fd); int error = -EBADF; if (file) { error = vfs_statfs(&file->f_path, st); fput(file); } return error; }
/* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct file *file) { struct statfs sbuf; int res; int act; lock_kernel(); res = acct_active; if (!file || !acct_needcheck) goto out; unlock_kernel(); /* May block */ if (vfs_statfs(file->f_dentry->d_inode->i_sb, &sbuf)) return res; if (sbuf.f_bavail <= SUSPEND * sbuf.f_blocks / 100) act = -1; else if (sbuf.f_bavail >= RESUME * sbuf.f_blocks / 100) act = 1; else act = 0; /* * If some joker switched acct_file under us we'ld better be * silent and _not_ touch anything. */ lock_kernel(); if (file != acct_file) { if (act) res = act>0; goto out; } if (acct_active) { if (act < 0) { acct_active = 0; printk(KERN_INFO "Process accounting paused\n"); } } else { if (act > 0) { acct_active = 1; printk(KERN_INFO "Process accounting resumed\n"); } } del_timer(&acct_timer); acct_needcheck = 0; acct_timer.expires = jiffies + ACCT_TIMEOUT*HZ; add_timer(&acct_timer); res = acct_active; out: unlock_kernel(); return res; }
/* most free space */ static void au_mfs(struct dentry *dentry) { struct super_block *sb; struct au_branch *br; struct au_wbr_mfs *mfs; aufs_bindex_t bindex, bend; int err; unsigned long long b, bavail; struct path h_path; /* reduce the stack usage */ struct kstatfs *st; st = kmalloc(sizeof(*st), GFP_NOFS); if (unlikely(!st)) { AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM); return; } bavail = 0; sb = dentry->d_sb; mfs = &au_sbi(sb)->si_wbr_mfs; MtxMustLock(&mfs->mfs_lock); mfs->mfs_bindex = -EROFS; mfs->mfsrr_bytes = 0; bend = au_sbend(sb); for (bindex = 0; bindex <= bend; bindex++) { br = au_sbr(sb, bindex); if (au_br_rdonly(br)) continue; /* sb->s_root for NFS is unreliable */ h_path.mnt = br->br_mnt; h_path.dentry = h_path.mnt->mnt_root; err = vfs_statfs(&h_path, st); if (unlikely(err)) { AuWarn1("failed statfs, b%d, %d\n", bindex, err); continue; } /* when the available size is equal, select the lower one */ BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail) || sizeof(b) < sizeof(st->f_bsize)); b = st->f_bavail * st->f_bsize; br->br_wbr->wbr_bytes = b; if (b >= bavail) { bavail = b; mfs->mfs_bindex = bindex; mfs->mfs_jiffy = jiffies; } } mfs->mfsrr_bytes = bavail; AuDbg("b%d\n", mfs->mfs_bindex); kfree(st); }
mini_fo_statfs(super_block_t *sb, struct statfs *buf) #endif { int err = 0; super_block_t *hidden_sb; hidden_sb = stohs(sb); err = vfs_statfs(hidden_sb, buf); return err; }
static int vnop_strategy_9p(struct vnop_strategy_args *ap) { mount_t mp; struct buf *bp; node_9p *np; caddr_t addr; uio_t uio; int e, flags; TRACE(); bp = ap->a_bp; np = NTO9P(buf_vnode(bp)); flags = buf_flags(bp); uio = NULL; addr = NULL; mp = vnode_mount(buf_vnode(bp)); if (mp == NULL) return ENXIO; if ((e=buf_map(bp, &addr))) goto error; uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE, ISSET(flags, B_READ)? UIO_READ: UIO_WRITE); if (uio == NULL) { e = ENOMEM; goto error; } uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp)); if (ISSET(flags, B_READ)) { if((e=nread_9p(np, uio))) goto error; /* zero the rest of the page if we reached EOF */ if (uio_resid(uio) > 0) { bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio)); uio_update(uio, uio_resid(uio)); } } else { if ((e=nwrite_9p(np, uio))) goto error; } buf_setresid(bp, uio_resid(uio)); error: if (uio) uio_free(uio); if (addr) buf_unmap(bp); buf_seterror(bp, e); buf_biodone(bp); return e; }
static int vnop_offtoblk_9p(struct vnop_offtoblk_args *ap) { mount_t mp; TRACE(); mp = vnode_mount(ap->a_vp); if (mp == NULL) return ENXIO; *ap->a_lblkno = ap->a_offset / vfs_statfs(mp)->f_bsize; return 0; }
static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs, const char *name) { struct kstatfs statfs; int err = vfs_statfs(path, &statfs); if (err) pr_err("overlayfs: statfs failed on '%s'\n", name); else ofs->namelen = max(ofs->namelen, statfs.f_namelen); return err; }
static int esdfs_statfs(struct dentry *dentry, struct kstatfs *buf) { int err; struct path lower_path; esdfs_get_lower_path(dentry, &lower_path); err = vfs_statfs(&lower_path, buf); esdfs_put_lower_path(dentry, &lower_path); /* set return buf to our f/s to avoid confusing user-level utils */ buf->f_type = ESDFS_SUPER_MAGIC; return err; }
asmlinkage long sys_statfs(const char * path, struct statfs * buf) { struct nameidata nd; int error; error = user_path_walk(path, &nd); if (!error) { struct statfs tmp; error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp); if (!error && copy_to_user(buf, &tmp, sizeof(struct statfs))) error = -EFAULT; path_release(&nd); } return error; }
/** * ovl_statfs * @sb: The overlayfs super block * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer * filesystem pass the statfs to the same filesystem. */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; struct dentry *root_dentry = dentry->d_sb->s_root; struct path path; int err; ovl_path_upper(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen); buf->f_type = OVERLAYFS_SUPER_MAGIC; } return err; }
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs * buf) { struct file * file; struct statfs tmp; int error; error = -EBADF; file = fget(fd); if (!file) goto out; error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp); if (!error && copy_to_user(buf, &tmp, sizeof(struct statfs))) error = -EFAULT; fput(file); out: return error; }
int user_statfs(const char __user *pathname, struct kstatfs *st) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (!error) { error = vfs_statfs(&path, st); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; }
/** * Mount helper: Provide VFS layer with a VBox share name (stored as mounted device). * * @param mp Mount data provided by VFS layer. * @param szShareName VBox share name. * @param cbShareName Returning parameter which contains VBox share name string length. * * @return 0 on success or BSD error code otherwise. */ static int vboxvfs_set_share_name(struct mount *mp, char *szShareName, size_t *cbShareName) { struct vfsstatfs *pVfsInfo; AssertReturn(mp, EINVAL); AssertReturn(szShareName, EINVAL); AssertReturn(cbShareName, EINVAL); pVfsInfo = vfs_statfs(mp); if (!pVfsInfo) { PERROR("Unable to get VFS data for the mount structure"); return EINVAL; } return copystr(szShareName, pVfsInfo->f_mntfromname, MAXPATHLEN, cbShareName); }
static int vnop_blockmap_9p(struct vnop_blockmap_args *ap) { mount_t mp; TRACE(); mp = vnode_mount(ap->a_vp); if (mp == NULL) return ENXIO; if (ap->a_run) *ap->a_run = ap->a_size; if (ap->a_bpn) *ap->a_bpn = ap->a_foffset / vfs_statfs(mp)->f_bsize; if (ap->a_poff) *(int32_t*)ap->a_poff = 0; return 0; }
static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf) { struct kstatfs st; int retval; retval = vfs_statfs(dentry, &st); if (retval) return retval; if (sizeof(*buf) == sizeof(st)) memcpy(buf, &st, sizeof(st)); else { if (sizeof buf->f_blocks == 4) { if ((st.f_blocks | st.f_bfree | st.f_bavail | st.f_bsize | st.f_frsize) & 0xffffffff00000000ULL) return -EOVERFLOW; /* * f_files and f_ffree may be -1; it's okay to stuff * that into 32 bits */ if (st.f_files != -1 && (st.f_files & 0xffffffff00000000ULL)) return -EOVERFLOW; if (st.f_ffree != -1 && (st.f_ffree & 0xffffffff00000000ULL)) return -EOVERFLOW; } buf->f_type = st.f_type; buf->f_bsize = st.f_bsize; buf->f_blocks = st.f_blocks; buf->f_bfree = st.f_bfree; buf->f_bavail = st.f_bavail; buf->f_files = st.f_files; buf->f_ffree = st.f_ffree; buf->f_fsid = st.f_fsid; buf->f_namelen = st.f_namelen; buf->f_frsize = st.f_frsize; memset(buf->f_spare, 0, sizeof(buf->f_spare)); } return 0; }
int uw7_statvfs64(char *filename, struct uw7_statvfs64 *stvfsp) { struct nameidata nd; int error; error = user_path_walk(filename, &nd); if (!error) { struct super_block *sbp = nd.dentry->d_inode->i_sb; struct kstatfs tmp; error = vfs_statfs(sbp, &tmp); if (!error && cp_uw7_statvfs64(sbp, &tmp, stvfsp)) error = -EFAULT; path_release(&nd); } return (error); }