int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf) { struct compat_stat tmp; if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; memset(&tmp, 0, sizeof(tmp)); tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; SET_UID(tmp.st_uid, stat->uid); SET_GID(tmp.st_gid, stat->gid); tmp.st_rdev = new_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; #ifdef STAT_HAVE_NSEC tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; #endif tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; }
static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) { struct stat64 tmp; INIT_STRUCT_STAT64_PADDING(tmp); #ifdef CONFIG_MIPS /* mips has weird padding, so we don't get 64 bits there */ tmp.st_dev = new_encode_dev(stat->dev); tmp.st_rdev = new_encode_dev(stat->rdev); #else tmp.st_dev = huge_encode_dev(stat->dev); tmp.st_rdev = huge_encode_dev(stat->rdev); #endif tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; #ifdef STAT64_HAS_BROKEN_ST_INO tmp.__st_ino = stat->ino; #endif tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_size = stat->size; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; }
static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) { struct stat64 tmp; memset(&tmp, 0, sizeof(struct stat64)); #ifdef CONFIG_MIPS /* mips has weird padding, so we don't get 64 bits there */ if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; tmp.st_dev = new_encode_dev(stat->dev); tmp.st_rdev = new_encode_dev(stat->rdev); #else tmp.st_dev = huge_encode_dev(stat->dev); tmp.st_rdev = huge_encode_dev(stat->rdev); #endif tmp.st_ino = stat->ino; #ifdef STAT64_HAS_BROKEN_ST_INO tmp.__st_ino = stat->ino; #endif tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = stat->uid; tmp.st_gid = stat->gid; tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_size = stat->size; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; }
static int cp_hpux_stat(struct kstat *stat, struct hpux_stat64 __user *statbuf) { struct hpux_stat64 tmp; /* we probably want a different split here - is hpux 12:20? */ if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; memset(&tmp, 0, sizeof(tmp)); tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = stat->uid; tmp.st_gid = stat->gid; tmp.st_rdev = new_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; }
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf) { compat_ino_t ino; long err; if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; ino = stat->ino; if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino) return -EOVERFLOW; err = access_ok(VERIFY_WRITE, statbuf, sizeof(*statbuf)) ? 0 : -EFAULT; err |= __put_user(new_encode_dev(stat->dev), &statbuf->st_dev); err |= __put_user(ino, &statbuf->st_ino); err |= __put_user(stat->mode, &statbuf->st_mode); err |= __put_user(stat->nlink, &statbuf->st_nlink); err |= __put_user(stat->uid, &statbuf->st_uid); err |= __put_user(stat->gid, &statbuf->st_gid); err |= __put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev); err |= __put_user(stat->size, &statbuf->st_size); err |= __put_user(stat->atime.tv_sec, &statbuf->st_atime); err |= __put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec); err |= __put_user(stat->mtime.tv_sec, &statbuf->st_mtime); err |= __put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec); err |= __put_user(stat->ctime.tv_sec, &statbuf->st_ctime); err |= __put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec); err |= __put_user(stat->blksize, &statbuf->st_blksize); err |= __put_user(stat->blocks, &statbuf->st_blocks); err |= __put_user(0, &statbuf->__unused4[0]); err |= __put_user(0, &statbuf->__unused4[1]); return err; }
/* * Check if the given path is a mountpoint. * * If we are supplied with the file descriptor of an autofs * mount we're looking for a specific mount. In this case * the path is considered a mountpoint if it is itself a * mountpoint or contains a mount, such as a multi-mount * without a root mount. In this case we return 1 if the * path is a mount point and the super magic of the covering * mount if there is one or 0 if it isn't a mountpoint. * * If we aren't supplied with a file descriptor then we * lookup the nameidata of the path and check if it is the * root of a mount. If a type is given we are looking for * a particular autofs mount and if we don't find a match * we return fail. If the located nameidata path is the * root of a mount we return 1 along with the super magic * of the mount or 0 otherwise. * * In both cases the the device number (as returned by * new_encode_dev()) is also returned. */ static int autofs_dev_ioctl_ismountpoint(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { struct path path; const char *name; unsigned int type; unsigned int devid, magic; int err = -ENOENT; if (param->size <= sizeof(*param)) { err = -EINVAL; goto out; } name = param->path; type = param->ismountpoint.in.type; param->ismountpoint.out.devid = devid = 0; param->ismountpoint.out.magic = magic = 0; if (!fp || param->ioctlfd == -1) { if (autofs_type_any(type)) err = kern_path(name, LOOKUP_FOLLOW, &path); else err = find_autofs_mount(name, &path, test_by_type, &type); if (err) goto out; devid = new_encode_dev(path.mnt->mnt_sb->s_dev); err = 0; if (path.dentry->d_inode && path.mnt->mnt_root == path.dentry) { err = 1; magic = path.dentry->d_inode->i_sb->s_magic; } } else { dev_t dev = sbi->sb->s_dev; err = find_autofs_mount(name, &path, test_by_dev, &dev); if (err) goto out; devid = new_encode_dev(dev); err = have_submounts(path.dentry); if (path.mnt->mnt_mountpoint != path.mnt->mnt_root) { if (follow_down(&path)) magic = path.mnt->mnt_sb->s_magic; } } param->ismountpoint.out.devid = devid; param->ismountpoint.out.magic = magic; path_put(&path); out: return err; }
static __be32 * encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { struct dentry *dentry = fhp->fh_dentry; int type; struct timespec64 time; u32 f; type = (stat->mode & S_IFMT); *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); } else { *p++ = htonl((u32) stat->size); } *p++ = htonl((u32) stat->blksize); if (S_ISCHR(type) || S_ISBLK(type)) *p++ = htonl(new_encode_dev(stat->rdev)); else *p++ = htonl(0xffffffff); *p++ = htonl((u32) stat->blocks); switch (fsid_source(fhp)) { default: case FSIDSOURCE_DEV: *p++ = htonl(new_encode_dev(stat->dev)); break; case FSIDSOURCE_FSID: *p++ = htonl((u32) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u32*)fhp->fh_export->ex_uuid)[0]; f ^= ((u32*)fhp->fh_export->ex_uuid)[1]; f ^= ((u32*)fhp->fh_export->ex_uuid)[2]; f ^= ((u32*)fhp->fh_export->ex_uuid)[3]; *p++ = htonl(f); break; } *p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); time = stat->mtime; lease_get_mtime(d_inode(dentry), &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl((u32) stat->ctime.tv_sec); *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0); return p; }
/* * Check if the given path is a mountpoint. * * If we are supplied with the file descriptor of an autofs * mount we're looking for a specific mount. In this case * the path is considered a mountpoint if it is itself a * mountpoint or contains a mount, such as a multi-mount * without a root mount. In this case we return 1 if the * path is a mount point and the super magic of the covering * mount if there is one or 0 if it isn't a mountpoint. * * If we aren't supplied with a file descriptor then we * lookup the path and check if it is the root of a mount. * If a type is given we are looking for a particular autofs * mount and if we don't find a match we return fail. If the * located path is the root of a mount we return 1 along with * the super magic of the mount or 0 otherwise. * * In both cases the the device number (as returned by * new_encode_dev()) is also returned. */ static int autofs_dev_ioctl_ismountpoint(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { struct path path; const char *name; unsigned int type; unsigned int devid, magic; int err = -ENOENT; /* param->path has been checked in validate_dev_ioctl() */ name = param->path; type = param->ismountpoint.in.type; param->ismountpoint.out.devid = devid = 0; param->ismountpoint.out.magic = magic = 0; if (!fp || param->ioctlfd == -1) { if (autofs_type_any(type)) err = kern_path_mountpoint(AT_FDCWD, name, &path, LOOKUP_FOLLOW); else err = find_autofs_mount(name, &path, test_by_type, &type); if (err) goto out; devid = new_encode_dev(path.dentry->d_sb->s_dev); err = 0; if (path.mnt->mnt_root == path.dentry) { err = 1; magic = path.dentry->d_sb->s_magic; } } else { dev_t dev = sbi->sb->s_dev; err = find_autofs_mount(name, &path, test_by_dev, &dev); if (err) goto out; devid = new_encode_dev(dev); err = path_has_submounts(&path); if (follow_down_one(&path)) magic = path.dentry->d_sb->s_magic; } param->ismountpoint.out.devid = devid; param->ismountpoint.out.magic = magic; path_put(&path); out: return err; }
static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) { struct stat tmp; #if BITS_PER_LONG == 32 if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) return -EOVERFLOW; #else if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; #endif memset(&tmp, 0, sizeof(tmp)); #if BITS_PER_LONG == 32 tmp.st_dev = old_encode_dev(stat->dev); #else tmp.st_dev = new_encode_dev(stat->dev); #endif tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, stat->uid); SET_GID(tmp.st_gid, stat->gid); #if BITS_PER_LONG == 32 tmp.st_rdev = old_encode_dev(stat->rdev); #else tmp.st_rdev = new_encode_dev(stat->rdev); #endif #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; #endif tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; #ifdef STAT_HAVE_NSEC tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; #endif tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; scribe_data_non_det(); return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; }
void nilfs_write_inode_common(struct inode *inode, struct nilfs_inode *raw_inode, int has_bmap) { struct nilfs_inode_info *ii = NILFS_I(inode); raw_inode->i_mode = cpu_to_le16(inode->i_mode); raw_inode->i_uid = cpu_to_le32(inode->i_uid); raw_inode->i_gid = cpu_to_le32(inode->i_gid); raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le64(inode->i_size); raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); raw_inode->i_flags = cpu_to_le32(ii->i_flags); raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (has_bmap) nilfs_bmap_write(ii->i_bmap, raw_inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_device_code = cpu_to_le64(new_encode_dev(inode->i_rdev)); /* When extending inode, nilfs->ns_inode_size should be checked for substitutions of appended fields */ }
static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) { Some unknown structures like ACL may be in fnode, we'd better not overwrite them hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 structures", i->i_ino); } else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) { __le32 ea; if (!uid_eq(i->i_uid, hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) { ea = cpu_to_le32(i_uid_read(i)); hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2); hpfs_inode->i_ea_uid = 1; } if (!gid_eq(i->i_gid, hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) { ea = cpu_to_le32(i_gid_read(i)); hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2); hpfs_inode->i_ea_gid = 1; } if (!S_ISLNK(i->i_mode)) if ((i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0 : 0111)) | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG)) && i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0222 : 0333)) | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))) || hpfs_inode->i_ea_mode) { ea = cpu_to_le32(i->i_mode); /* sick, but legal */ hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2); hpfs_inode->i_ea_mode = 1; } if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) { ea = cpu_to_le32(new_encode_dev(i->i_rdev)); hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4); } } }
static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode, dev_t rdev) { struct fuse_mknod_in inarg; struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_req *req = fuse_get_req_nopages(fc); if (IS_ERR(req)) return PTR_ERR(req); if (!fc->dont_mask) mode &= ~current_umask(); memset(&inarg, 0, sizeof(inarg)); inarg.mode = mode; inarg.rdev = new_encode_dev(rdev); inarg.umask = current_umask(); req->in.h.opcode = FUSE_MKNOD; req->in.numargs = 2; req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : sizeof(inarg); req->in.args[0].value = &inarg; req->in.args[1].size = entry->d_name.len + 1; req->in.args[1].value = entry->d_name.name; return create_new_entry(fc, req, dir, entry, mode); }
static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); /* */ if (hpfs_sb(i->i_sb)->sb_eas >= 2) { __le32 ea; if ((i->i_uid != hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) { ea = cpu_to_le32(i->i_uid); hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2); hpfs_inode->i_ea_uid = 1; } if ((i->i_gid != hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) { ea = cpu_to_le32(i->i_gid); hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2); hpfs_inode->i_ea_gid = 1; } if (!S_ISLNK(i->i_mode)) if ((i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0 : 0111)) | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG)) && i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0222 : 0333)) | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))) || hpfs_inode->i_ea_mode) { ea = cpu_to_le32(i->i_mode); /* */ hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2); hpfs_inode->i_ea_mode = 1; } if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) { ea = cpu_to_le32(new_encode_dev(i->i_rdev)); hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4); } } }
int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev) { int err; struct dentry *d; IMustLock(dir); d = path->dentry; path->dentry = d->d_parent; err = security_path_mknod(path, d, mode, new_encode_dev(dev)); path->dentry = d; if (unlikely(err)) goto out; err = vfs_mknod(dir, path->dentry, mode, dev); if (!err) { struct path tmp = *path; int did; vfsub_update_h_iattr(&tmp, &did); if (did) { tmp.dentry = path->dentry->d_parent; vfsub_update_h_iattr(&tmp, /*did*/NULL); } /*ignore*/ } out: return err; }
/* Do we recognize the device? */ STATIC bool xfs_getfsmap_is_valid_device( struct xfs_mount *mp, struct xfs_fsmap *fm) { if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX || fm->fmr_device == new_encode_dev(mp->m_ddev_targp->bt_dev)) return true; if (mp->m_logdev_targp && fm->fmr_device == new_encode_dev(mp->m_logdev_targp->bt_dev)) return true; if (mp->m_rtdev_targp && fm->fmr_device == new_encode_dev(mp->m_rtdev_targp->bt_dev)) return true; return false; }
static struct buffer_head *sfs_update_inode(struct inode *inode) { struct buffer_head *bh; struct sfs_inode_info *si; struct sfs_inode *di; int i; si = SFS_INODE(inode); di = sfs_get_inode(inode->i_sb, inode->i_ino, &bh); if (!di) return NULL; di->i_size = cpu_to_le32(inode->i_size); di->i_mode = cpu_to_le16(inode->i_mode); di->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); di->i_atime = cpu_to_le32(inode->i_atime.tv_sec); di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); di->i_uid = cpu_to_le32(i_uid_read(inode)); di->i_gid = cpu_to_le32(i_gid_read(inode)); di->i_nlink = cpu_to_le16(inode->i_nlink); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { di->i_blkaddr[0] = cpu_to_le32(new_encode_dev(inode->i_rdev)); for (i = 1; i < 9; i++) di->i_blkaddr[i] = cpu_to_le32(0); } else for (i = 0; i < 9; i++) di->i_blkaddr[i] = si->blkaddr[i]; mark_buffer_dirty(bh); return bh; }
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf) { compat_ino_t ino; int err; if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; ino = stat->ino; if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino) return -EOVERFLOW; err = put_user(new_encode_dev(stat->dev), &statbuf->st_dev); err |= put_user(ino, &statbuf->st_ino); err |= put_user(stat->mode, &statbuf->st_mode); err |= put_user(stat->nlink, &statbuf->st_nlink); err |= put_user(0, &statbuf->st_reserved1); err |= put_user(0, &statbuf->st_reserved2); err |= put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev); err |= put_user(stat->size, &statbuf->st_size); err |= put_user(stat->atime.tv_sec, &statbuf->st_atime); err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec); err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime); err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec); err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime); err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec); err |= put_user(stat->blksize, &statbuf->st_blksize); err |= put_user(stat->blocks, &statbuf->st_blocks); err |= put_user(0, &statbuf->__unused1); err |= put_user(0, &statbuf->__unused2); err |= put_user(0, &statbuf->__unused3); err |= put_user(0, &statbuf->__unused4); err |= put_user(0, &statbuf->__unused5); err |= put_user(0, &statbuf->st_fstype); /* not avail */ err |= put_user(0, &statbuf->st_realdev); /* not avail */ err |= put_user(0, &statbuf->st_basemode); /* not avail */ err |= put_user(0, &statbuf->st_spareshort); err |= put_user(stat->uid, &statbuf->st_uid); err |= put_user(stat->gid, &statbuf->st_gid); err |= put_user(0, &statbuf->st_spare4[0]); err |= put_user(0, &statbuf->st_spare4[1]); err |= put_user(0, &statbuf->st_spare4[2]); return err; }
static int simplefs_parse_options(struct super_block *sb, char *options) { substring_t args[MAX_OPT_ARGS]; int token, ret, arg; char *p; while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); switch (token) { case SIMPLEFS_OPT_JOURNAL_DEV: if (args->from && match_int(args, &arg)) return 1; printk(KERN_INFO "Loading journal devnum: %i\n", arg); if ((ret = simplefs_load_journal(sb, arg))) return ret; break; case SIMPLEFS_OPT_JOURNAL_PATH: { char *journal_path; struct inode *journal_inode; struct path path; BUG_ON(!(journal_path = match_strdup(&args[0]))); ret = kern_path(journal_path, LOOKUP_FOLLOW, &path); if (ret) { printk(KERN_ERR "could not find journal device path: error %d\n", ret); kfree(journal_path); } journal_inode = path.dentry->d_inode; path_put(&path); kfree(journal_path); if (S_ISBLK(journal_inode->i_mode)) { unsigned long journal_devnum = new_encode_dev(journal_inode->i_rdev); if ((ret = simplefs_load_journal(sb, journal_devnum))) return ret; } else { /** Seems didn't work properly */ if ((ret = simplefs_sb_load_journal(sb, journal_inode))) return ret; } break; } } } return 0; }
/* static */ SysStatus LinuxPTYServer::Create(ObjectHandle &oh, TypeID &type, dev_t num, ProcessID pid, uval oflag) { SysStatus rc; int ret; LinuxPTYServer *pty = new LinuxPTYServer(); LinuxPTYServerRef ref; pty->devNum = num; pty->availMask = FileLinux::INVALID; CObjRootSingleRep::Create(pty); ref = (LinuxPTYServerRef)pty->getRef(); pty->readCB.obj = ref; pty->writeCB.obj = ref; rc = DREF(ref)->giveAccessByServer(oh, pid); if (_FAILURE(rc)) { DREF(ref)->destroy(); return rc; } ProcessLinux::LinuxInfo info; rc = DREFGOBJ(TheProcessLinuxRef)->getInfoNativePid(pid, info); tassertMsg(_SUCCESS(rc), "Couldn't get info about k42 process 0x%lx\n", pid); PTYTaskInfo ptyTI(info.pid, info.pgrp, info.session, info.tty, info.pid == info.session); LinuxEnv sc(SysCall, ptyTI.t); ret = ttydev_open(&pty->oli, num, &pty->readQ, &pty->writeQ, oflag|O_NONBLOCK); if (ret<0) { rc = _SERROR(2365, 0, -ret); // releaseAccess will trigger destroy DREF(ref)->releaseAccess(oh.xhandle()); } else { pty->devNum = ret; //Put ourselves on wait queue with TTYEvents object that will // generate callbacks instead of waking threads if (pty->readQ && list_empty(&pty->readCB.task_list)) { add_wait_queue(pty->readQ, &pty->readCB); } if (pty->writeQ && list_empty(&pty->writeCB.task_list)) { add_wait_queue(pty->writeQ, &pty->writeCB); } type = (TypeID)new_encode_dev(pty->devNum); } return rc; }
static __be32 * encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { struct dentry *dentry = fhp->fh_dentry; int type; struct timespec time; type = (stat->mode & S_IFMT); *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); } else { *p++ = htonl((u32) stat->size); } *p++ = htonl((u32) stat->blksize); if (S_ISCHR(type) || S_ISBLK(type)) *p++ = htonl(new_encode_dev(stat->rdev)); else *p++ = htonl(0xffffffff); *p++ = htonl((u32) stat->blocks); if (is_fsid(fhp, rqstp->rq_reffh)) *p++ = htonl((u32) fhp->fh_export->ex_fsid); else *p++ = htonl(new_encode_dev(stat->dev)); *p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); lease_get_mtime(dentry->d_inode, &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl((u32) stat->ctime.tv_sec); *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0); return p; }
/* called under sb->s_umount semaphore */ static int vz_restore_symlink(struct super_block *sb, char *path, int type) { mm_segment_t oldfs; char *newpath; char dest[64]; const char *names[] = { [USRQUOTA] "aquota.user", [GRPQUOTA] "aquota.group" }; int err; newpath = kmalloc(strlen(path) + sizeof(".new"), GFP_KERNEL); if (newpath == NULL) return -ENOMEM; strcpy(newpath, path); strcat(newpath, ".new"); sprintf(dest, "/proc/vz/vzaquota/%08x/%s", new_encode_dev(sb->s_dev), names[type]); /* * Lockdep will learn unneeded dependency while unlink(2): * ->s_umount => ->i_mutex/1 => ->i_mutex * Reverse dependency is, * open_namei() => ->i_mutex => lookup_hash() => __lookup_hash() * => ->lookup() \eq vzdq_aquotq_lookup() => find_qmblk_by_dev() * => user_get_super() => ->s_umount * * However, first set of ->i_mutex'es belong to /, second to /proc . * Right fix is to get rid of vz_restore_symlink(), of course. */ up_read(&sb->s_umount); oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_unlink(newpath); if (err < 0 && err != -ENOENT) goto out_restore; err = sys_symlink(dest, newpath); if (err < 0) goto out_restore; err = sys_rename(newpath, path); out_restore: set_fs(oldfs); down_read(&sb->s_umount); /* umounted meanwhile? */ if (err == 0 && !sb->s_root) err = -ENODEV; kfree(newpath); return err; }
void update_inode(struct inode *inode, struct page *node_page) { struct f2fs_node *rn; struct f2fs_inode *ri; f2fs_wait_on_page_writeback(node_page, NODE, false); rn = F2FS_NODE(node_page); ri = &(rn->i); ri->i_mode = cpu_to_le16(inode->i_mode); ri->i_advise = F2FS_I(inode)->i_advise; ri->i_uid = cpu_to_le32(inode->i_uid); ri->i_gid = cpu_to_le32(inode->i_gid); ri->i_links = cpu_to_le32(inode->i_nlink); ri->i_size = cpu_to_le64(i_size_read(inode)); ri->i_blocks = cpu_to_le64(inode->i_blocks); set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); set_raw_inline(F2FS_I(inode), ri); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth); ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); ri->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { ri->i_addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); ri->i_addr[1] = 0; } else { ri->i_addr[0] = 0; ri->i_addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); ri->i_addr[2] = 0; } } set_cold_node(inode, node_page); set_page_dirty(node_page); clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); }
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) { if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { ri->i_addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); ri->i_addr[1] = 0; } else { ri->i_addr[0] = 0; ri->i_addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); ri->i_addr[2] = 0; } } }
static int tiocgdev(unsigned fd, unsigned cmd, unsigned int __user *ptr) { struct file *file = fget(fd); struct tty_struct *real_tty; if (!file) return -EBADF; if (file->f_op->ioctl != tty_ioctl) return -EINVAL; real_tty = (struct tty_struct *)file->private_data; if (!real_tty) return -EINVAL; return put_user(new_encode_dev(tty_devnum(real_tty)), ptr); }
int bpf_prog_offload_info_fill(struct bpf_prog_info *info, struct bpf_prog *prog) { struct ns_get_path_bpf_prog_args args = { .prog = prog, .info = info, }; struct bpf_prog_aux *aux = prog->aux; struct inode *ns_inode; struct path ns_path; char __user *uinsns; void *res; u32 ulen; res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); if (IS_ERR(res)) { if (!info->ifindex) return -ENODEV; return PTR_ERR(res); } down_read(&bpf_devs_lock); if (!aux->offload) { up_read(&bpf_devs_lock); return -ENODEV; } ulen = info->jited_prog_len; info->jited_prog_len = aux->offload->jited_len; if (info->jited_prog_len & ulen) { uinsns = u64_to_user_ptr(info->jited_prog_insns); ulen = min_t(u32, info->jited_prog_len, ulen); if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { up_read(&bpf_devs_lock); return -EFAULT; } } up_read(&bpf_devs_lock); ns_inode = ns_path.dentry->d_inode; info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); info->netns_ino = ns_inode->i_ino; path_put(&ns_path); return 0; }
/* * updateSuper() * * update synchronously superblock if it is mounted read-write. */ int updateSuper(struct super_block *sb, uint state) { struct jfs_superblock *j_sb; struct jfs_sb_info *sbi = JFS_SBI(sb); struct buffer_head *bh; int rc; if (sbi->flag & JFS_NOINTEGRITY) { if (state == FM_DIRTY) { sbi->p_state = state; return 0; } else if (state == FM_MOUNT) { sbi->p_state = sbi->state; state = FM_DIRTY; } else if (state == FM_CLEAN) { state = sbi->p_state; } else jfs_err("updateSuper: bad state"); } else if (sbi->state == FM_DIRTY) return 0; if ((rc = readSuper(sb, &bh))) return rc; j_sb = (struct jfs_superblock *)bh->b_data; j_sb->s_state = cpu_to_le32(state); sbi->state = state; if (state == FM_MOUNT) { /* record log's dev_t and mount serial number */ j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev)); j_sb->s_logserial = cpu_to_le32(sbi->log->serial); } else if (state == FM_CLEAN) { /* * If this volume is shared with OS/2, OS/2 will need to * recalculate DASD usage, since we don't deal with it. */ if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED)) j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME); } mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); return 0; }
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) { int extra_size = get_extra_isize(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { ri->i_addr[extra_size] = cpu_to_le32(old_encode_dev(inode->i_rdev)); ri->i_addr[extra_size + 1] = 0; } else { ri->i_addr[extra_size] = 0; ri->i_addr[extra_size + 1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); ri->i_addr[extra_size + 2] = 0; } } }
int updateSuper(struct super_block *sb, uint state) { struct jfs_superblock *j_sb; struct jfs_sb_info *sbi = JFS_SBI(sb); struct buffer_head *bh; int rc; if (sbi->flag & JFS_NOINTEGRITY) { if (state == FM_DIRTY) { sbi->p_state = state; return 0; } else if (state == FM_MOUNT) { sbi->p_state = sbi->state; state = FM_DIRTY; } else if (state == FM_CLEAN) { state = sbi->p_state; } else jfs_err("updateSuper: bad state"); } else if (sbi->state == FM_DIRTY) return 0; if ((rc = readSuper(sb, &bh))) return rc; j_sb = (struct jfs_superblock *)bh->b_data; j_sb->s_state = cpu_to_le32(state); sbi->state = state; if (state == FM_MOUNT) { j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev)); j_sb->s_logserial = cpu_to_le32(sbi->log->serial); } else if (state == FM_CLEAN) { if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED)) j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME); } mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); return 0; }
__kernel_dev_t _lkl_disk_add_disk(void *data, int sectors) { struct lkl_disk_dev *dev; if (failed_init) return 0; if (!(dev=kzalloc(sizeof(*dev), GFP_KERNEL))) return 0; dev->data=data; spin_lock_init(&dev->lock); if (!(dev->queue=blk_init_queue(lkl_disk_request, &dev->lock))) { kfree(dev); return 0; } blk_queue_hardsect_size(dev->queue, 512); dev->queue->queuedata = dev; if (!(dev->gd=alloc_disk(1))) { kfree(dev); /* FIXME: del queue? */ return 0; } dev->gd->major = major; dev->gd->first_minor = disks; dev->gd->fops = &lkl_disk_ops; dev->gd->queue = dev->queue; dev->gd->private_data = dev; snprintf (dev->gd->disk_name, 32, "lkldisk%d", disks); set_capacity(dev->gd, sectors); add_disk(dev->gd); disks++; printk("lkldisk: attached %s @ dev=%d:%d\n", dev->gd->disk_name, dev->gd->major, dev->gd->first_minor); return new_encode_dev(MKDEV(dev->gd->major, dev->gd->first_minor)); }
/* * Return the uid and gid of the last request for the mount * * When reconstructing an autofs mount tree with active mounts * we need to re-connect to mounts that may have used the original * process uid and gid (or string variations of them) for mount * lookups within the map entry. */ static int autofs_dev_ioctl_requester(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { struct autofs_info *ino; struct nameidata nd; const char *path; dev_t devid; int err = -ENOENT; if (param->size <= sizeof(*param)) { err = -EINVAL; goto out; } path = param->path; devid = new_encode_dev(sbi->sb->s_dev); param->requester.uid = param->requester.gid = -1; /* Get nameidata of the parent directory */ err = path_lookup(path, LOOKUP_PARENT, &nd); if (err) goto out; err = autofs_dev_ioctl_find_super(&nd, devid); if (err) goto out_release; ino = autofs4_dentry_ino(nd.path.dentry); if (ino) { err = 0; autofs4_expire_wait(nd.path.dentry); spin_lock(&sbi->fs_lock); param->requester.uid = ino->uid; param->requester.gid = ino->gid; spin_unlock(&sbi->fs_lock); } out_release: path_put(&nd.path); out: return err; }