STATIC int xfs_qm_syncall( struct bhv_desc *bhv, int flags, cred_t *credp) { struct vfs *vfsp = bhvtovfs(bhv); struct xfs_mount *mp = XFS_VFSTOM(vfsp); int error; /* * Get the Quota Manager to flush the dquots. */ if (XFS_IS_QUOTA_ON(mp)) { if ((error = xfs_qm_sync(mp, flags))) { /* * If we got an IO error, we will be shutting down. * So, there's nothing more for us to do here. */ ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); if (XFS_FORCED_SHUTDOWN(mp)) { return XFS_ERROR(error); } } } PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error); return error; }
STATIC int xfs_qm_showargs( struct bhv_desc *bhv, struct seq_file *m) { struct vfs *vfsp = bhvtovfs(bhv); struct xfs_mount *mp = XFS_VFSTOM(vfsp); int error; if (mp->m_qflags & XFS_UQUOTA_ACCT) { (mp->m_qflags & XFS_UQUOTA_ENFD) ? seq_puts(m, "," MNTOPT_USRQUOTA) : seq_puts(m, "," MNTOPT_UQUOTANOENF); } if (mp->m_qflags & XFS_PQUOTA_ACCT) { (mp->m_qflags & XFS_OQUOTA_ENFD) ? seq_puts(m, "," MNTOPT_PRJQUOTA) : seq_puts(m, "," MNTOPT_PQUOTANOENF); } if (mp->m_qflags & XFS_GQUOTA_ACCT) { (mp->m_qflags & XFS_OQUOTA_ENFD) ? seq_puts(m, "," MNTOPT_GRPQUOTA) : seq_puts(m, "," MNTOPT_GQUOTANOENF); } if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) seq_puts(m, "," MNTOPT_NOQUOTA); PVFS_SHOWARGS(BHV_NEXT(bhv), m, error); return error; }
void xfs_initialize_vnode( bhv_desc_t *bdp, vnode_t *vp, bhv_desc_t *inode_bhv, int unlock) { xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); struct inode *inode = LINVFS_GET_IP(vp); if (!inode_bhv->bd_vobj) { vp->v_vfsp = bhvtovfs(bdp); bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops); bhv_insert(VN_BHV_HEAD(vp), inode_bhv); } vp->v_type = IFTOVT(ip->i_d.di_mode); /* Have we been called during the new inode create process, * in which case we are too early to fill in the Linux inode. */ if (vp->v_type == VNON) return; xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); /* For new inodes we need to set the ops vectors, * and unlock the inode. */ if (unlock && (inode->i_state & I_NEW)) { xfs_set_inodeops(inode); unlock_new_inode(inode); } }
STATIC int xfs_qm_parseargs( struct bhv_desc *bhv, char *options, struct xfs_mount_args *args, int update) { size_t length; char *local_options = options; char *this_char; int error; int referenced = update; while ((this_char = strsep(&local_options, ",")) != NULL) { length = strlen(this_char); if (local_options) length++; if (!strcmp(this_char, MNTOPT_NOQUOTA)) { args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); referenced = update; } else if (!strcmp(this_char, MNTOPT_QUOTA) || !strcmp(this_char, MNTOPT_UQUOTA) || !strcmp(this_char, MNTOPT_USRQUOTA)) { args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; referenced = 1; } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || !strcmp(this_char, MNTOPT_UQUOTANOENF)) { args->flags |= XFSMNT_UQUOTA; args->flags &= ~XFSMNT_UQUOTAENF; referenced = 1; } else if (!strcmp(this_char, MNTOPT_GQUOTA) || !strcmp(this_char, MNTOPT_GRPQUOTA)) { args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; referenced = 1; } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { args->flags |= XFSMNT_GQUOTA; args->flags &= ~XFSMNT_GQUOTAENF; referenced = 1; } else { if (local_options) *(local_options-1) = ','; continue; } while (length--) *this_char++ = ','; } PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error); if (!error && !referenced) bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM); return error; }
STATIC int xfs_mntupdate( bhv_desc_t *bdp, int *flags, struct xfs_mount_args *args) { struct vfs *vfsp = bhvtovfs(bdp); xfs_mount_t *mp = XFS_BHVTOM(bdp); int pincount, error; int count = 0; if (args->flags & XFSMNT_NOATIME) mp->m_flags |= XFS_MOUNT_NOATIME; else mp->m_flags &= ~XFS_MOUNT_NOATIME; if (!(vfsp->vfs_flag & VFS_RDONLY)) { VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); } if (*flags & MS_RDONLY) { xfs_refcache_purge_mp(mp); xfs_flush_buftarg(mp->m_ddev_targp, 0); xfs_finish_reclaim_all(mp, 0); /* This loop must run at least twice. * The first instance of the loop will flush * most meta data but that will generate more * meta data (typically directory updates). * Which then must be flushed and logged before * we can write the unmount record. */ do { VFS_SYNC(vfsp, REMOUNT_READONLY_FLAGS, NULL, error); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { delay(50); count++; } } while (count < 2); /* Ok now write out an unmount record */ xfs_log_unmount_write(mp); xfs_unmountfs_writesb(mp); vfsp->vfs_flag |= VFS_RDONLY; } else { vfsp->vfs_flag &= ~VFS_RDONLY; } return 0; }
STATIC int xfs_qm_mount( struct bhv_desc *bhv, struct xfs_mount_args *args, struct cred *cr) { struct vfs *vfsp = bhvtovfs(bhv); struct xfs_mount *mp = XFS_VFSTOM(vfsp); int error; if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA | XFSMNT_PQUOTA)) xfs_qm_mount_quotainit(mp, args->flags); PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error); return error; }
/* * The main distribution switch of all XFS quotactl system calls. */ int xfs_qm_quotactl( struct bhv_desc *bdp, int cmd, int id, xfs_caddr_t addr) { xfs_mount_t *mp; bhv_vfs_t *vfsp; int error; vfsp = bhvtovfs(bdp); mp = XFS_VFSTOM(vfsp); ASSERT(addr != NULL || cmd == Q_XQUOTASYNC); /* * The following commands are valid even when quotaoff. */ switch (cmd) { case Q_XQUOTARM: /* * Truncate quota files. quota must be off. */ if (XFS_IS_QUOTA_ON(mp)) return XFS_ERROR(EINVAL); if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); return (xfs_qm_scall_trunc_qfiles(mp, xfs_qm_import_qtype_flags(*(uint *)addr))); case Q_XGETQSTAT: /* * Get quota status information. */ return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr)); case Q_XQUOTAON: /* * QUOTAON - enabling quota enforcement. * Quota accounting must be turned on at mount time. */ if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); return (xfs_qm_scall_quotaon(mp, xfs_qm_import_flags(*(uint *)addr))); case Q_XQUOTAOFF: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); break; case Q_XQUOTASYNC: return (xfs_sync_inodes(mp, SYNC_DELWRI, NULL)); default: break; } if (! XFS_IS_QUOTA_ON(mp)) return XFS_ERROR(ESRCH); switch (cmd) { case Q_XQUOTAOFF: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_quotaoff(mp, xfs_qm_import_flags(*(uint *)addr), B_FALSE); break; case Q_XGETQUOTA: error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER, (fs_disk_quota_t *)addr); break; case Q_XGETGQUOTA: error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, (fs_disk_quota_t *)addr); break; case Q_XGETPQUOTA: error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_PROJ, (fs_disk_quota_t *)addr); break; case Q_XSETQLIM: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER, (fs_disk_quota_t *)addr); break; case Q_XSETGQLIM: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, (fs_disk_quota_t *)addr); break; case Q_XSETPQLIM: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_PROJ, (fs_disk_quota_t *)addr); break; default: error = XFS_ERROR(EINVAL); break; } return (error); }
/* * The main distribution switch of all XFS quotactl system calls. */ int xfs_qm_quotactl( struct bhv_desc *bdp, int cmd, int id, xfs_caddr_t addr) { xfs_mount_t *mp; int error; struct vfs *vfsp; vfsp = bhvtovfs(bdp); mp = XFS_VFSTOM(vfsp); if (addr == NULL && cmd != Q_SYNC) return XFS_ERROR(EINVAL); if (id < 0 && cmd != Q_SYNC) return XFS_ERROR(EINVAL); /* * The following commands are valid even when quotaoff. */ switch (cmd) { /* * truncate quota files. quota must be off. */ case Q_XQUOTARM: if (XFS_IS_QUOTA_ON(mp) || addr == NULL) return XFS_ERROR(EINVAL); if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); return (xfs_qm_scall_trunc_qfiles(mp, xfs_qm_import_qtype_flags(*(uint *)addr))); /* * Get quota status information. */ case Q_XGETQSTAT: return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr)); /* * QUOTAON for root f/s and quota enforcement on others.. * Quota accounting for non-root f/s's must be turned on * at mount time. */ case Q_XQUOTAON: if (addr == NULL) return XFS_ERROR(EINVAL); if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); return (xfs_qm_scall_quotaon(mp, xfs_qm_import_flags(*(uint *)addr))); case Q_XQUOTAOFF: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); break; default: break; } if (! XFS_IS_QUOTA_ON(mp)) return XFS_ERROR(ESRCH); switch (cmd) { case Q_XQUOTAOFF: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_quotaoff(mp, xfs_qm_import_flags(*(uint *)addr), B_FALSE); break; /* * Defaults to XFS_GETUQUOTA. */ case Q_XGETQUOTA: error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER, (fs_disk_quota_t *)addr); break; /* * Set limits, both hard and soft. Defaults to Q_SETUQLIM. */ case Q_XSETQLIM: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER, (fs_disk_quota_t *)addr); break; case Q_XSETGQLIM: if (vfsp->vfs_flag & VFS_RDONLY) return XFS_ERROR(EROFS); error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, (fs_disk_quota_t *)addr); break; case Q_XGETGQUOTA: error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, (fs_disk_quota_t *)addr); break; /* * Quotas are entirely undefined after quotaoff in XFS quotas. * For instance, there's no way to set limits when quotaoff. */ default: error = XFS_ERROR(EINVAL); break; } return (error); }
STATIC int xfs_unmount( bhv_desc_t *bdp, int flags, cred_t *credp) { struct vfs *vfsp = bhvtovfs(bdp); xfs_mount_t *mp = XFS_BHVTOM(bdp); xfs_inode_t *rip; vnode_t *rvp; int unmount_event_wanted = 0; int unmount_event_flags = 0; int xfs_unmountfs_needed = 0; int error; rip = mp->m_rootip; rvp = XFS_ITOV(rip); if (vfsp->vfs_flag & VFS_DMI) { error = XFS_SEND_PREUNMOUNT(mp, vfsp, rvp, DM_RIGHT_NULL, rvp, DM_RIGHT_NULL, NULL, NULL, 0, 0, (mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))? 0:DM_FLAGS_UNWANTED); if (error) return XFS_ERROR(error); unmount_event_wanted = 1; unmount_event_flags = (mp->m_dmevmask & (1<<DM_EVENT_UNMOUNT))? 0 : DM_FLAGS_UNWANTED; } /* * First blow any referenced inode from this file system * out of the reference cache, and delete the timer. */ xfs_refcache_purge_mp(mp); XFS_bflush(mp->m_ddev_targp); error = xfs_unmount_flush(mp, 0); if (error) goto out; ASSERT(vn_count(rvp) == 1); /* * Drop the reference count */ VN_RELE(rvp); /* * If we're forcing a shutdown, typically because of a media error, * we want to make sure we invalidate dirty pages that belong to * referenced vnodes as well. */ if (XFS_FORCED_SHUTDOWN(mp)) { error = xfs_sync(&mp->m_bhv, (SYNC_WAIT | SYNC_CLOSE), credp); ASSERT(error != EFSCORRUPTED); } xfs_unmountfs_needed = 1; out: /* Send DMAPI event, if required. * Then do xfs_unmountfs() if needed. * Then return error (or zero). */ if (unmount_event_wanted) { /* Note: mp structure must still exist for * XFS_SEND_UNMOUNT() call. */ XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL, DM_RIGHT_NULL, 0, error, unmount_event_flags); } if (xfs_unmountfs_needed) { /* * Call common unmount function to flush to disk * and free the super block buffer & mount structures. */ xfs_unmountfs(mp, credp); } return XFS_ERROR(error); }
/* * xfs_mount * * The file system configurations are: * (1) device (partition) with data and internal log * (2) logical volume with data and log subvolumes. * (3) logical volume with data, log, and realtime subvolumes. * * We only have to handle opening the log and realtime volumes here if * they are present. The data subvolume has already been opened by * get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev. */ STATIC int xfs_mount( struct bhv_desc *bhvp, struct xfs_mount_args *args, cred_t *credp) { struct vfs *vfsp = bhvtovfs(bhvp); struct bhv_desc *p; struct xfs_mount *mp = XFS_BHVTOM(bhvp); struct block_device *ddev, *logdev, *rtdev; int flags = 0, error; ddev = vfsp->vfs_super->s_bdev; logdev = rtdev = NULL; /* * Open real time and log devices - order is important. */ if (args->logname[0]) { error = xfs_blkdev_get(mp, args->logname, &logdev); if (error) return error; } if (args->rtname[0]) { error = xfs_blkdev_get(mp, args->rtname, &rtdev); if (error) { xfs_blkdev_put(logdev); return error; } if (rtdev == ddev || rtdev == logdev) { cmn_err(CE_WARN, "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); xfs_blkdev_put(logdev); xfs_blkdev_put(rtdev); return EINVAL; } } /* * Setup xfs_mount function vectors from available behaviors */ p = vfs_bhv_lookup(vfsp, VFS_POSITION_DM); mp->m_dm_ops = p ? *(xfs_dmops_t *) vfs_bhv_custom(p) : xfs_dmcore_stub; p = vfs_bhv_lookup(vfsp, VFS_POSITION_QM); mp->m_qm_ops = p ? *(xfs_qmops_t *) vfs_bhv_custom(p) : xfs_qmcore_stub; p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO); mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs; /* * Setup xfs_mount buffer target pointers */ mp->m_ddev_targp = xfs_alloc_buftarg(ddev); if (rtdev) mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev); mp->m_logdev_targp = (logdev && logdev != ddev) ? xfs_alloc_buftarg(logdev) : mp->m_ddev_targp; /* * Setup flags based on mount(2) options and then the superblock */ error = xfs_start_flags(vfsp, args, mp); if (error) goto error; error = xfs_readsb(mp); if (error) goto error; error = xfs_finish_flags(vfsp, args, mp); if (error) { xfs_freesb(mp); goto error; } /* * Setup xfs_mount buffer target pointers based on superblock */ xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, mp->m_sb.sb_sectsize); if (logdev && logdev != ddev) { unsigned int log_sector_size = BBSIZE; if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) log_sector_size = mp->m_sb.sb_logsectsize; xfs_setsize_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize, log_sector_size); } if (rtdev) xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize, mp->m_sb.sb_blocksize); if (!(error = XFS_IOINIT(vfsp, args, flags))) return 0; error: xfs_binval(mp->m_ddev_targp); if (logdev != NULL && logdev != ddev) { xfs_binval(mp->m_logdev_targp); } if (rtdev != NULL) { xfs_binval(mp->m_rtdev_targp); } xfs_unmountfs_close(mp, NULL); return error; }