static int unionfs_statfs(struct mount *mp, struct statfs *sbp) { struct unionfs_mount *ump; int error; struct statfs *mstat; uint64_t lbsize; ump = MOUNTTOUNIONFSMOUNT(mp); UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n", (void *)mp, (void *)ump->um_lowervp, (void *)ump->um_uppervp); mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO); error = VFS_STATFS(ump->um_lowervp->v_mount, mstat); if (error) { free(mstat, M_STATFS); return (error); } /* now copy across the "interesting" information and fake the rest */ sbp->f_blocks = mstat->f_blocks; sbp->f_files = mstat->f_files; lbsize = mstat->f_bsize; error = VFS_STATFS(ump->um_uppervp->v_mount, mstat); if (error) { free(mstat, M_STATFS); return (error); } /* * The FS type etc is copy from upper vfs. * (write able vfs have priority) */ sbp->f_type = mstat->f_type; sbp->f_flags = mstat->f_flags; sbp->f_bsize = mstat->f_bsize; sbp->f_iosize = mstat->f_iosize; if (mstat->f_bsize != lbsize) sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) / mstat->f_bsize; sbp->f_blocks += mstat->f_blocks; sbp->f_bfree = mstat->f_bfree; sbp->f_bavail = mstat->f_bavail; sbp->f_files += mstat->f_files; sbp->f_ffree = mstat->f_ffree; free(mstat, M_STATFS); return (0); }
/* * If the VFS does not implement statvfs, then call statfs and convert * the values. This code was taken from libc's __cvtstatvfs() function, * contributed by Joerg Sonnenberger. */ int vfs_stdstatvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred) { struct statfs *in; int error; in = &mp->mnt_stat; error = VFS_STATFS(mp, in, cred); if (error == 0) { bzero(sbp, sizeof(*sbp)); sbp->f_bsize = in->f_bsize; sbp->f_frsize = in->f_bsize; sbp->f_blocks = in->f_blocks; sbp->f_bfree = in->f_bfree; sbp->f_bavail = in->f_bavail; sbp->f_files = in->f_files; sbp->f_ffree = in->f_ffree; /* * XXX * This field counts the number of available inodes to non-root * users, but this information is not available via statfs. * Just ignore this issue by returning the total number * instead. */ sbp->f_favail = in->f_ffree; /* * XXX * This field has a different meaning for statfs and statvfs. * For the former it is the cookie exported for NFS and not * intended for normal userland use. */ sbp->f_fsid = 0; sbp->f_flag = 0; if (in->f_flags & MNT_RDONLY) sbp->f_flag |= ST_RDONLY; if (in->f_flags & MNT_NOSUID) sbp->f_flag |= ST_NOSUID; sbp->f_namemax = 0; sbp->f_owner = in->f_owner; /* * XXX * statfs contains the type as string, statvfs expects it as * enumeration. */ sbp->f_type = 0; sbp->f_syncreads = in->f_syncreads; sbp->f_syncwrites = in->f_syncwrites; sbp->f_asyncreads = in->f_asyncreads; sbp->f_asyncwrites = in->f_asyncwrites; } return (error); }
static OSKIT_COMDECL filesystem_statfs(oskit_filesystem_t *f, oskit_statfs_t *out_stats) { struct gfilesystem *fs = (struct gfilesystem *) f; struct statfs *sp; struct mount *mp; struct proc *p; oskit_error_t ferror; int error; if (!fs || !fs->count || !fs->mp) return OSKIT_E_INVALIDARG; ferror = getproc(&p); if (ferror) return ferror; mp = fs->mp; sp = &mp->mnt_stat; error = VFS_STATFS(fs->mp, sp, p); prfree(p); if (error) return errno_to_oskit_error(error); sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; out_stats->flag = 0; if (sp->f_flags & MNT_RDONLY) out_stats->flag |= OSKIT_FS_RDONLY; if (sp->f_flags & MNT_NOEXEC) out_stats->flag |= OSKIT_FS_NOEXEC; if (sp->f_flags & MNT_NOSUID) out_stats->flag |= OSKIT_FS_NOSUID; if (sp->f_flags & MNT_NODEV) out_stats->flag |= OSKIT_FS_NODEV; out_stats->bsize = sp->f_bsize; out_stats->frsize = sp->f_bsize; out_stats->blocks = sp->f_blocks; out_stats->bfree = sp->f_bfree; out_stats->bavail = sp->f_bavail; out_stats->files = sp->f_files; out_stats->ffree = sp->f_ffree; out_stats->favail = sp->f_ffree; out_stats->fsid = sp->f_fsid.val[0]; /* device number */ out_stats->namemax = NAME_MAX; return 0; }
static int hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data, struct ucred *cred) { struct hammer_mount_info info; hammer_mount_t hmp; hammer_volume_t rootvol; struct vnode *rootvp; struct vnode *devvp = NULL; const char *upath; /* volume name in userspace */ char *path; /* volume name in system space */ int error; int i; int master_id; char *next_volume_ptr = NULL; /* * Accept hammer_mount_info. mntpt is NULL for root mounts at boot. */ if (mntpt == NULL) { bzero(&info, sizeof(info)); info.asof = 0; info.hflags = 0; info.nvolumes = 1; next_volume_ptr = mp->mnt_stat.f_mntfromname; /* Count number of volumes separated by ':' */ for (char *p = next_volume_ptr; *p != '\0'; ++p) { if (*p == ':') { ++info.nvolumes; } } mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */ } else { if ((error = copyin(data, &info, sizeof(info))) != 0) return (error); } /* * updating or new mount */ if (mp->mnt_flag & MNT_UPDATE) { hmp = (void *)mp->mnt_data; KKASSERT(hmp != NULL); } else { if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES) return (EINVAL); hmp = NULL; } /* * master-id validation. The master id may not be changed by a * mount update. */ if (info.hflags & HMNT_MASTERID) { if (hmp && hmp->master_id != info.master_id) { kprintf("hammer: cannot change master id " "with mount update\n"); return(EINVAL); } master_id = info.master_id; if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS) return (EINVAL); } else { if (hmp) master_id = hmp->master_id; else master_id = 0; } /* * Internal mount data structure */ if (hmp == NULL) { hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t)hmp; hmp->mp = mp; /*TAILQ_INIT(&hmp->recycle_list);*/ /* * Make sure kmalloc type limits are set appropriately. * * Our inode kmalloc group is sized based on maxvnodes * (controlled by the system, not us). */ kmalloc_create(&hmp->m_misc, "HAMMER-others"); kmalloc_create(&hmp->m_inodes, "HAMMER-inodes"); kmalloc_raise_limit(hmp->m_inodes, 0); /* unlimited */ hmp->root_btree_beg.localization = 0x00000000U; hmp->root_btree_beg.obj_id = -0x8000000000000000LL; hmp->root_btree_beg.key = -0x8000000000000000LL; hmp->root_btree_beg.create_tid = 1; hmp->root_btree_beg.delete_tid = 1; hmp->root_btree_beg.rec_type = 0; hmp->root_btree_beg.obj_type = 0; hmp->root_btree_end.localization = 0xFFFFFFFFU; hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL; hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL; hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL; hmp->root_btree_end.delete_tid = 0; /* special case */ hmp->root_btree_end.rec_type = 0xFFFFU; hmp->root_btree_end.obj_type = 0; hmp->krate.freq = 1; /* maximum reporting rate (hz) */ hmp->krate.count = -16; /* initial burst */ hmp->sync_lock.refs = 1; hmp->free_lock.refs = 1; hmp->undo_lock.refs = 1; hmp->blkmap_lock.refs = 1; hmp->snapshot_lock.refs = 1; hmp->volume_lock.refs = 1; TAILQ_INIT(&hmp->delay_list); TAILQ_INIT(&hmp->flush_group_list); TAILQ_INIT(&hmp->objid_cache_list); TAILQ_INIT(&hmp->undo_lru_list); TAILQ_INIT(&hmp->reclaim_list); RB_INIT(&hmp->rb_dedup_crc_root); RB_INIT(&hmp->rb_dedup_off_root); TAILQ_INIT(&hmp->dedup_lru_list); } hmp->hflags &= ~HMNT_USERFLAGS; hmp->hflags |= info.hflags & HMNT_USERFLAGS; hmp->master_id = master_id; if (info.asof) { mp->mnt_flag |= MNT_RDONLY; hmp->asof = info.asof; } else { hmp->asof = HAMMER_MAX_TID; } hmp->volume_to_remove = -1; /* * Re-open read-write if originally read-only, or vise-versa. * * When going from read-only to read-write execute the stage2 * recovery if it has not already been run. */ if (mp->mnt_flag & MNT_UPDATE) { lwkt_gettoken(&hmp->fs_token); error = 0; if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { kprintf("HAMMER read-only -> read-write\n"); hmp->ronly = 0; RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, hammer_adjust_volume_mode, NULL); rootvol = hammer_get_root_volume(hmp, &error); if (rootvol) { hammer_recover_flush_buffers(hmp, rootvol, 1); error = hammer_recover_stage2(hmp, rootvol); bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, sizeof(hmp->blockmap)); hammer_rel_volume(rootvol, 0); } RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, hammer_reload_inode, NULL); /* kernel clears MNT_RDONLY */ } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { kprintf("HAMMER read-write -> read-only\n"); hmp->ronly = 1; /* messy */ RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL, hammer_reload_inode, NULL); hmp->ronly = 0; hammer_flusher_sync(hmp); hammer_flusher_sync(hmp); hammer_flusher_sync(hmp); hmp->ronly = 1; RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, hammer_adjust_volume_mode, NULL); } lwkt_reltoken(&hmp->fs_token); return(error); } RB_INIT(&hmp->rb_vols_root); RB_INIT(&hmp->rb_inos_root); RB_INIT(&hmp->rb_redo_root); RB_INIT(&hmp->rb_nods_root); RB_INIT(&hmp->rb_undo_root); RB_INIT(&hmp->rb_resv_root); RB_INIT(&hmp->rb_bufs_root); RB_INIT(&hmp->rb_pfsm_root); hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0); RB_INIT(&hmp->volu_root); RB_INIT(&hmp->undo_root); RB_INIT(&hmp->data_root); RB_INIT(&hmp->meta_root); RB_INIT(&hmp->lose_root); TAILQ_INIT(&hmp->iorun_list); lwkt_token_init(&hmp->fs_token, "hammerfs"); lwkt_token_init(&hmp->io_token, "hammerio"); lwkt_gettoken(&hmp->fs_token); /* * Load volumes */ path = objcache_get(namei_oc, M_WAITOK); hmp->nvolumes = -1; for (i = 0; i < info.nvolumes; ++i) { if (mntpt == NULL) { /* * Root mount. */ KKASSERT(next_volume_ptr != NULL); strcpy(path, ""); if (*next_volume_ptr != '/') { /* relative path */ strcpy(path, "/dev/"); } int k; for (k = strlen(path); k < MAXPATHLEN-1; ++k) { if (*next_volume_ptr == '\0') { break; } else if (*next_volume_ptr == ':') { ++next_volume_ptr; break; } else { path[k] = *next_volume_ptr; ++next_volume_ptr; } } path[k] = '\0'; error = 0; cdev_t dev = kgetdiskbyname(path); error = bdevvp(dev, &devvp); if (error) { kprintf("hammer_mountroot: can't find devvp\n"); } } else { error = copyin(&info.volumes[i], &upath, sizeof(char *)); if (error == 0) error = copyinstr(upath, path, MAXPATHLEN, NULL); } if (error == 0) error = hammer_install_volume(hmp, path, devvp); if (error) break; } objcache_put(namei_oc, path); /* * Make sure we found a root volume */ if (error == 0 && hmp->rootvol == NULL) { kprintf("hammer_mount: No root volume found!\n"); error = EINVAL; } /* * Check that all required volumes are available */ if (error == 0 && hammer_mountcheck_volumes(hmp)) { kprintf("hammer_mount: Missing volumes, cannot mount!\n"); error = EINVAL; } if (error) { /* called with fs_token held */ hammer_free_hmp(mp); return (error); } /* * No errors, setup enough of the mount point so we can lookup the * root vnode. */ mp->mnt_iosize_max = MAXPHYS; mp->mnt_kern_flag |= MNTK_FSMID; mp->mnt_kern_flag |= MNTK_THR_SYNC; /* new vsyncscan semantics */ /* * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE * will acquire a per-mount token prior to entry and release it * on return, so even if we do not specify it we no longer get * the BGL regardlless of how we are flagged. */ mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; /*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/ /* * note: f_iosize is used by vnode_pager_haspage() when constructing * its VOP_BMAP call. */ mp->mnt_stat.f_iosize = HAMMER_BUFSIZE; mp->mnt_stat.f_bsize = HAMMER_BUFSIZE; mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE; mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE; mp->mnt_maxsymlinklen = 255; mp->mnt_flag |= MNT_LOCAL; vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops); vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops); vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops); /* * The root volume's ondisk pointer is only valid if we hold a * reference to it. */ rootvol = hammer_get_root_volume(hmp, &error); if (error) goto failed; /* * Perform any necessary UNDO operations. The recovery code does * call hammer_undo_lookup() so we have to pre-cache the blockmap, * and then re-copy it again after recovery is complete. * * If this is a read-only mount the UNDO information is retained * in memory in the form of dirty buffer cache buffers, and not * written back to the media. */ bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, sizeof(hmp->blockmap)); /* * Check filesystem version */ hmp->version = rootvol->ondisk->vol_version; if (hmp->version < HAMMER_VOL_VERSION_MIN || hmp->version > HAMMER_VOL_VERSION_MAX) { kprintf("HAMMER: mount unsupported fs version %d\n", hmp->version); error = ERANGE; goto done; } /* * The undo_rec_limit limits the size of flush groups to avoid * blowing out the UNDO FIFO. This calculation is typically in * the tens of thousands and is designed primarily when small * HAMMER filesystems are created. */ hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100; if (hammer_debug_general & 0x0001) kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit); /* * NOTE: Recover stage1 not only handles meta-data recovery, it * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems. */ error = hammer_recover_stage1(hmp, rootvol); if (error) { kprintf("Failed to recover HAMMER filesystem on mount\n"); goto done; } /* * Finish setup now that we have a good root volume. * * The top 16 bits of fsid.val[1] is a pfs id. */ ksnprintf(mp->mnt_stat.f_mntfromname, sizeof(mp->mnt_stat.f_mntfromname), "%s", rootvol->ondisk->vol_name); mp->mnt_stat.f_fsid.val[0] = crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8); mp->mnt_stat.f_fsid.val[1] = crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8); mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF; mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid; mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid, sizeof(mp->mnt_vstat.f_fsid_uuid)); /* * Certain often-modified fields in the root volume are cached in * the hammer_mount structure so we do not have to generate lots * of little UNDO structures for them. * * Recopy after recovery. This also has the side effect of * setting our cached undo FIFO's first_offset, which serves to * placemark the FIFO start for the NEXT flush cycle while the * on-disk first_offset represents the LAST flush cycle. */ hmp->next_tid = rootvol->ondisk->vol0_next_tid; hmp->flush_tid1 = hmp->next_tid; hmp->flush_tid2 = hmp->next_tid; bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap, sizeof(hmp->blockmap)); hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks; hammer_flusher_create(hmp); /* * Locate the root directory using the root cluster's B-Tree as a * starting point. The root directory uses an obj_id of 1. * * FUTURE: Leave the root directory cached referenced but unlocked * in hmp->rootvp (need to flush it on unmount). */ error = hammer_vfs_vget(mp, NULL, 1, &rootvp); if (error) goto done; vput(rootvp); /*vn_unlock(hmp->rootvp);*/ if (hmp->ronly == 0) error = hammer_recover_stage2(hmp, rootvol); /* * If the stage2 recovery fails be sure to clean out all cached * vnodes before throwing away the mount structure or bad things * will happen. */ if (error) vflush(mp, 0, 0); done: if ((mp->mnt_flag & MNT_UPDATE) == 0) { /* New mount */ /* Populate info for mount point (NULL pad)*/ bzero(mp->mnt_stat.f_mntonname, MNAMELEN); size_t size; if (mntpt) { copyinstr(mntpt, mp->mnt_stat.f_mntonname, MNAMELEN -1, &size); } else { /* Root mount */ mp->mnt_stat.f_mntonname[0] = '/'; } } (void)VFS_STATFS(mp, &mp->mnt_stat, cred); hammer_rel_volume(rootvol, 0); failed: /* * Cleanup and return. */ if (error) { /* called with fs_token held */ hammer_free_hmp(mp); } else { lwkt_reltoken(&hmp->fs_token); } return (error); }
/* * VFS Operations. * * mount system call */ int ffs_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp, struct proc *p) { struct vnode *devvp; struct ufs_args args; struct ufsmount *ump = NULL; struct fs *fs; int error = 0, flags; int ronly; mode_t accessmode; size_t size; error = copyin(data, &args, sizeof (struct ufs_args)); if (error) return (error); #ifndef FFS_SOFTUPDATES if (mp->mnt_flag & MNT_SOFTDEP) { printf("WARNING: soft updates isn't compiled in\n"); mp->mnt_flag &= ~MNT_SOFTDEP; } #endif /* * Soft updates is incompatible with "async", * so if we are doing softupdates stop the user * from setting the async flag. */ if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) == (MNT_SOFTDEP | MNT_ASYNC)) { return (EINVAL); } /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOUFS(mp); fs = ump->um_fs; devvp = ump->um_devvp; error = 0; ronly = fs->fs_ronly; if (ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { /* Flush any dirty data */ mp->mnt_flag &= ~MNT_RDONLY; VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p); mp->mnt_flag |= MNT_RDONLY; /* * Get rid of files open for writing. */ flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; if (fs->fs_flags & FS_DOSOFTDEP) { error = softdep_flushfiles(mp, flags, p); mp->mnt_flag &= ~MNT_SOFTDEP; } else error = ffs_flushfiles(mp, flags, p); ronly = 1; } /* * Flush soft dependencies if disabling it via an update * mount. This may leave some items to be processed, * so don't do this yet XXX. */ if ((fs->fs_flags & FS_DOSOFTDEP) && !(mp->mnt_flag & MNT_SOFTDEP) && !(mp->mnt_flag & MNT_RDONLY) && fs->fs_ronly == 0) { #if 0 flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = softdep_flushfiles(mp, flags, p); #elif FFS_SOFTUPDATES mp->mnt_flag |= MNT_SOFTDEP; #endif } /* * When upgrading to a softdep mount, we must first flush * all vnodes. (not done yet -- see above) */ if (!(fs->fs_flags & FS_DOSOFTDEP) && (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) { #if 0 flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ffs_flushfiles(mp, flags, p); #else mp->mnt_flag &= ~MNT_SOFTDEP; #endif } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p); if (error) goto error_1; if (ronly && (mp->mnt_flag & MNT_WANTRDWR)) { /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ if (suser(p, 0)) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_ACCESS(devvp, VREAD | VWRITE, p->p_ucred, p); VOP_UNLOCK(devvp, 0, p); if (error) goto error_1; } if (fs->fs_clean == 0) { #if 0 /* * It is safe mount unclean file system * if it was previously mounted with softdep * but we may loss space and must * sometimes run fsck manually. */ if (fs->fs_flags & FS_DOSOFTDEP) printf( "WARNING: %s was not properly unmounted\n", fs->fs_fsmnt); else #endif if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly unmounted\n", fs->fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->fs_fsmnt); error = EROFS; goto error_1; } } if ((fs->fs_flags & FS_DOSOFTDEP)) { error = softdep_mount(devvp, mp, fs, p->p_ucred); if (error) goto error_1; } fs->fs_contigdirs=(u_int8_t*)malloc((u_long)fs->fs_ncg, M_UFSMNT, M_WAITOK); bzero(fs->fs_contigdirs, fs->fs_ncg); ronly = 0; } if (args.fspec == 0) { /* * Process export requests. */ error = vfs_export(mp, &ump->um_export, &args.export_info); if (error) goto error_1; else goto success; } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); if ((error = namei(ndp)) != 0) goto error_1; devvp = ndp->ni_vp; if (devvp->v_type != VBLK) { error = ENOTBLK; goto error_2; } if (major(devvp->v_rdev) >= nblkdev) { error = ENXIO; goto error_2; } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ if (suser(p, 0)) { accessmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p); VOP_UNLOCK(devvp, 0, p); if (error) goto error_2; } if (mp->mnt_flag & MNT_UPDATE) { /* * UPDATE * If it's not the same vnode, or at least the same device * then it's not correct. */ if (devvp != ump->um_devvp) { if (devvp->v_rdev == ump->um_devvp->v_rdev) { vrele(devvp); } else { error = EINVAL; /* needs translation */ } } else vrele(devvp); /* * Update device name only on success */ if (!error) { /* * Save "mounted from" info for mount point (NULL pad) */ copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); } } else { /* * Since this is a new mount, we want the names for * the device and the mount point copied in. If an * error occurs, the mountpoint is discarded by the * upper level code. */ /* Save "last mounted on" info for mount point (NULL pad)*/ copyinstr(path, /* mount point*/ mp->mnt_stat.f_mntonname, /* save area*/ MNAMELEN - 1, /* max size*/ &size); /* real size*/ bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); /* Save "mounted from" info for mount point (NULL pad)*/ copyinstr(args.fspec, /* device name*/ mp->mnt_stat.f_mntfromname, /* save area*/ MNAMELEN - 1, /* max size*/ &size); /* real size*/ bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); error = ffs_mountfs(devvp, mp, p); } if (error) goto error_2; /* * Initialize FS stat information in mount struct; uses both * mp->mnt_stat.f_mntonname and mp->mnt_stat.f_mntfromname * * This code is common to root and non-root mounts */ bcopy(&args, &mp->mnt_stat.mount_info.ufs_args, sizeof(args)); (void)VFS_STATFS(mp, &mp->mnt_stat, p); success: if (path && (mp->mnt_flag & MNT_UPDATE)) { /* Update clean flag after changing read-onlyness. */ fs = ump->um_fs; if (ronly != fs->fs_ronly) { fs->fs_ronly = ronly; fs->fs_clean = ronly && (fs->fs_flags & FS_UNCLEAN) == 0 ? 1 : 0; if (ronly) free(fs->fs_contigdirs, M_UFSMNT); } if (!ronly) { if (mp->mnt_flag & MNT_SOFTDEP) fs->fs_flags |= FS_DOSOFTDEP; else fs->fs_flags &= ~FS_DOSOFTDEP; } ffs_sbupdate(ump, MNT_WAIT); } return (0); error_2: /* error with devvp held */ vrele (devvp); error_1: /* no state to back out */ return (error); }
static int ntfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { size_t size; int error; struct vnode *devvp; struct ntfs_args args; struct nlookupdata nd; struct vnode *rootvp; error = 0; /* * Use NULL path to flag a root mount */ if( path == NULL) { /* *** * Mounting root file system *** */ /* Get vnode for root device*/ if( bdevvp( rootdev, &rootvp)) panic("ffs_mountroot: can't setup bdevvp for root"); /* * FS specific handling */ mp->mnt_flag |= MNT_RDONLY; /* XXX globally applicable?*/ /* * Attempt mount */ if( ( error = ntfs_mountfs(rootvp, mp, &args, cred)) != 0) { /* fs specific cleanup (if any)*/ goto error_1; } goto dostatfs; /* success*/ } /* *** * Mounting non-root file system or updating a file system *** */ /* copy in user arguments*/ error = copyin(data, (caddr_t)&args, sizeof (struct ntfs_args)); if (error) goto error_1; /* can't get arguments*/ /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { /* if not updating name...*/ if (args.fspec == NULL) { /* * Process export requests. Jumping to "success" * will return the vfs_export() error code. */ struct ntfsmount *ntm = VFSTONTFS(mp); error = vfs_export(mp, &ntm->ntm_export, &args.export); goto success; } kprintf("ntfs_mount(): MNT_UPDATE not supported\n"); error = EINVAL; goto error_1; } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ devvp = NULL; error = nlookup_init(&nd, args.fspec, UIO_USERSPACE, NLC_FOLLOW); if (error == 0) error = nlookup(&nd); if (error == 0) error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp); nlookup_done(&nd); if (error) goto error_1; if (!vn_isdisk(devvp, &error)) goto error_2; if (mp->mnt_flag & MNT_UPDATE) { #if 0 /* ******************** * UPDATE ******************** */ if (devvp != ntmp->um_devvp) error = EINVAL; /* needs translation */ else vrele(devvp); /* * Update device name only on success */ if( !error) { /* Save "mounted from" info for mount point (NULL pad)*/ copyinstr( args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); } #endif } else { /* ******************** * NEW MOUNT ******************** */ /* Save "mounted from" info for mount point (NULL pad)*/ copyinstr( args.fspec, /* device name*/ mp->mnt_stat.f_mntfromname, /* save area*/ MNAMELEN - 1, /* max size*/ &size); /* real size*/ bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); error = ntfs_mountfs(devvp, mp, &args, cred); } if (error) { goto error_2; } dostatfs: /* * Initialize FS stat information in mount struct; uses * mp->mnt_stat.f_mntfromname. * * This code is common to root and non-root mounts */ (void)VFS_STATFS(mp, &mp->mnt_stat, cred); goto success; error_2: /* error with devvp held*/ /* release devvp before failing*/ vrele(devvp); error_1: /* no state to back out*/ success: return(error); }
static int _xfs_mount(struct mount *mp) { struct xfsmount *xmp; struct xfs_vnode *rootvp; struct ucred *curcred; struct vnode *rvp, *devvp; struct cdev *ddev; struct g_consumer *cp; struct thread *td; int error; td = curthread; ddev = NULL; cp = NULL; if (vfs_filteropt(mp->mnt_optnew, xfs_opts)) return (EINVAL); if (mp->mnt_flag & MNT_UPDATE) return (0); if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EPERM); xmp = xfsmount_allocate(mp); if (xmp == NULL) return (ENOMEM); if((error = _xfs_param_copyin(mp, td)) != 0) goto fail; curcred = td->td_ucred; XVFS_MOUNT(XFSTOVFS(xmp), &xmp->m_args, curcred, error); if (error) goto fail; XVFS_ROOT(XFSTOVFS(xmp), &rootvp, error); ddev = XFS_VFSTOM(XFSTOVFS(xmp))->m_ddev_targp->dev; devvp = XFS_VFSTOM(XFSTOVFS(xmp))->m_ddev_targp->specvp; if (error) goto fail_unmount; if (ddev->si_iosize_max != 0) mp->mnt_iosize_max = ddev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; mp->mnt_flag |= MNT_LOCAL; mp->mnt_stat.f_fsid.val[0] = dev2udev(ddev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; if ((error = VFS_STATFS(mp, &mp->mnt_stat)) != 0) goto fail_unmount; rvp = rootvp->v_vnode; rvp->v_vflag |= VV_ROOT; VN_RELE(rootvp); return (0); fail_unmount: XVFS_UNMOUNT(XFSTOVFS(xmp), 0, curcred, error); if (devvp != NULL) { cp = devvp->v_bufobj.bo_private; if (cp != NULL) { DROP_GIANT(); g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); PICKUP_GIANT(); } } fail: if (xmp != NULL) xfsmount_deallocate(xmp); return (error); }
int domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath, char *fspec, int fsflags) { struct mount *mp; struct vfsconf *vfsp; struct ucred *newcr, *oldcr; int error; /* * Be ultra-paranoid about making sure the type and fspath * variables will fit in our mp buffers, including the * terminating NUL. */ if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN) return (ENAMETOOLONG); vfsp = vfs_byname_kld(fstype, td, &error); if (vfsp == NULL) return (ENODEV); if (vp->v_type != VDIR) return (ENOTDIR); simple_lock(&vp->v_interlock); if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) { simple_unlock(&vp->v_interlock); return (EBUSY); } vp->v_iflag |= VI_MOUNT; simple_unlock(&vp->v_interlock); /* * Allocate and initialize the filesystem. */ vn_lock(vp, LK_SHARED | LK_RETRY); mp = vfs_mount_alloc(vp, vfsp, fspath, td); VOP_UNLOCK(vp); mp->mnt_optnew = NULL; vfs_setmntopt(mp, "from", fspec, 0); mp->mnt_optnew = mp->mnt_opt; mp->mnt_opt = NULL; /* * Set the mount level flags. * crdup() can sleep, so do it before acquiring a mutex. */ newcr = crdup(kcred); MNT_ILOCK(mp); if (fsflags & MNT_RDONLY) mp->mnt_flag |= MNT_RDONLY; mp->mnt_flag &=~ MNT_UPDATEMASK; mp->mnt_flag |= fsflags & (MNT_UPDATEMASK | MNT_FORCE | MNT_ROOTFS); /* * Unprivileged user can trigger mounting a snapshot, but we don't want * him to unmount it, so we switch to privileged credentials. */ oldcr = mp->mnt_cred; mp->mnt_cred = newcr; mp->mnt_stat.f_owner = mp->mnt_cred->cr_uid; MNT_IUNLOCK(mp); crfree(oldcr); /* * Mount the filesystem. * XXX The final recipients of VFS_MOUNT just overwrite the ndp they * get. No freeing of cn_pnbuf. */ error = VFS_MOUNT(mp, td); if (!error) { if (mp->mnt_opt != NULL) vfs_freeopts(mp->mnt_opt); mp->mnt_opt = mp->mnt_optnew; (void)VFS_STATFS(mp, &mp->mnt_stat, td); } /* * Prevent external consumers of mount options from reading * mnt_optnew. */ mp->mnt_optnew = NULL; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * Put the new filesystem on the mount list after root. */ #ifdef FREEBSD_NAMECACHE cache_purge(vp); #endif if (!error) { vnode_t *mvp; simple_lock(&vp->v_interlock); vp->v_iflag &= ~VI_MOUNT; simple_unlock(&vp->v_interlock); vp->v_mountedhere = mp; mountlist_append(mp); vfs_event_signal(NULL, VQ_MOUNT, 0); if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp, td)) panic("mount: lost mount"); mountcheckdirs(vp, mvp); vput(mvp); VOP_UNLOCK(vp); if ((mp->mnt_flag & MNT_RDONLY) == 0) vfs_syncer_add_to_worklist(mp); vfs_unbusy(mp, td); vfs_mountedfrom(mp, fspec); } else { simple_lock(&vp->v_interlock); vp->v_iflag &= ~VI_MOUNT; simple_unlock(&vp->v_interlock); VOP_UNLOCK(vp); vfs_unbusy(mp, td); vfs_mount_destroy(mp); } return (error); }
int mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath, char *fspec, int fsflags) { struct vfsconf *vfsp; struct mount *mp; vnode_t *vp, *mvp; struct ucred *cr; int error; /* * Be ultra-paranoid about making sure the type and fspath * variables will fit in our mp buffers, including the * terminating NUL. */ if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN) return (ENAMETOOLONG); vfsp = vfs_byname_kld(fstype, td, &error); if (vfsp == NULL) return (ENODEV); vp = *vpp; if (vp->v_type != VDIR) return (ENOTDIR); /* * We need vnode lock to protect v_mountedhere and vnode interlock * to protect v_iflag. */ vn_lock(vp, LK_SHARED | LK_RETRY); VI_LOCK(vp); if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) { VI_UNLOCK(vp); VOP_UNLOCK(vp, 0); return (EBUSY); } vp->v_iflag |= VI_MOUNT; VI_UNLOCK(vp); VOP_UNLOCK(vp, 0); /* * Allocate and initialize the filesystem. * We don't want regular user that triggered snapshot mount to be able * to unmount it, so pass credentials of the parent mount. */ mp = vfs_mount_alloc(vp, vfsp, fspath, vp->v_mount->mnt_cred); mp->mnt_optnew = NULL; vfs_setmntopt(mp, "from", fspec, 0); mp->mnt_optnew = mp->mnt_opt; mp->mnt_opt = NULL; /* * Set the mount level flags. */ mp->mnt_flag = fsflags & MNT_UPDATEMASK; /* * Snapshots are always read-only. */ mp->mnt_flag |= MNT_RDONLY; /* * We don't want snapshots to allow access to vulnerable setuid * programs, so we turn off setuid when mounting snapshots. */ mp->mnt_flag |= MNT_NOSUID; /* * We don't want snapshots to be visible in regular * mount(8) and df(1) output. */ mp->mnt_flag |= MNT_IGNORE; /* * XXX: This is evil, but we can't mount a snapshot as a regular user. * XXX: Is is safe when snapshot is mounted from within a jail? */ cr = td->td_ucred; td->td_ucred = kcred; error = VFS_MOUNT(mp); td->td_ucred = cr; if (error != 0) { VI_LOCK(vp); vp->v_iflag &= ~VI_MOUNT; VI_UNLOCK(vp); vrele(vp); vfs_unbusy(mp); vfs_mount_destroy(mp); *vpp = NULL; return (error); } if (mp->mnt_opt != NULL) vfs_freeopts(mp->mnt_opt); mp->mnt_opt = mp->mnt_optnew; (void)VFS_STATFS(mp, &mp->mnt_stat); /* * Prevent external consumers of mount options from reading * mnt_optnew. */ mp->mnt_optnew = NULL; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); #ifdef FREEBSD_NAMECACHE cache_purge(vp); #endif VI_LOCK(vp); vp->v_iflag &= ~VI_MOUNT; VI_UNLOCK(vp); vp->v_mountedhere = mp; /* Put the new filesystem on the mount list. */ mtx_lock(&mountlist_mtx); TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); mtx_unlock(&mountlist_mtx); vfs_event_signal(NULL, VQ_MOUNT, 0); if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp)) panic("mount: lost mount"); vput(vp); vfs_unbusy(mp); *vpp = mvp; return (0); }