/* * Initialize realtime fields in the mount structure. */ static int rtmount_init( xfs_mount_t *mp, /* file system mount structure */ int flags) { xfs_buf_t *bp; /* buffer for last block of subvolume */ xfs_daddr_t d; /* address of last block of subvolume */ xfs_sb_t *sbp; /* filesystem superblock copy in mount */ sbp = &mp->m_sb; if (sbp->sb_rblocks == 0) return 0; if (mp->m_rtdev_targp->dev == 0 && !(flags & LIBXFS_MOUNT_DEBUGGER)) { fprintf(stderr, _("%s: filesystem has a realtime subvolume\n"), progname); return -1; } mp->m_rsumlevels = sbp->sb_rextslog + 1; mp->m_rsumsize = (uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels * sbp->sb_rbmblocks; mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize); mp->m_rbmip = mp->m_rsumip = NULL; /* * Allow debugger to be run without the realtime device present. */ if (flags & LIBXFS_MOUNT_DEBUGGER) return 0; /* * Check that the realtime section is an ok size. */ d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) { fprintf(stderr, _("%s: realtime init - %llu != %llu\n"), progname, (unsigned long long) XFS_BB_TO_FSB(mp, d), (unsigned long long) mp->m_sb.sb_rblocks); return -1; } bp = libxfs_readbuf(mp->m_rtdev, d - XFS_FSB_TO_BB(mp, 1), XFS_FSB_TO_BB(mp, 1), 0, NULL); if (bp == NULL) { fprintf(stderr, _("%s: realtime size check failed\n"), progname); return -1; } libxfs_putbuf(bp); return 0; }
STATIC bool xfs_dquot_buf_verify_crc( struct xfs_mount *mp, struct xfs_buf *bp) { struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; int ndquots; int i; if (!xfs_sb_version_hascrc(&mp->m_sb)) return true; /* * if we are in log recovery, the quota subsystem has not been * initialised so we have no quotainfo structure. In that case, we need * to manually calculate the number of dquots in the buffer. */ if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else ndquots = xfs_calc_dquots_per_chunk(mp, XFS_BB_TO_FSB(mp, bp->b_length)); for (i = 0; i < ndquots; i++, d++) { if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF)) return false; if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid)) return false; } return true; }
/* * Calculate the minimum valid log size for the given superblock configuration. * Used to calculate the minimum log size at mkfs time, and to determine if * the log is large enough or not at mount time. Returns the minimum size in * filesystem block size units. */ int xfs_log_calc_minimum_size( struct xfs_mount *mp) { struct xfs_trans_res tres = {0}; int max_logres; int min_logblks = 0; int lsunit = 0; xfs_log_get_max_trans_res(mp, &tres); max_logres = xfs_log_calc_unit_res(mp, tres.tr_logres); if (tres.tr_logcount > 1) max_logres *= tres.tr_logcount; if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) lsunit = BTOBB(mp->m_sb.sb_logsunit); /* * Two factors should be taken into account for calculating the minimum * log space. * 1) The fundamental limitation is that no single transaction can be * larger than half size of the log. * * From mkfs.xfs, this is considered by the XFS_MIN_LOG_FACTOR * define, which is set to 3. That means we can definitely fit * maximally sized 2 transactions in the log. We'll use this same * value here. * * 2) If the lsunit option is specified, a transaction requires 2 LSU * for the reservation because there are two log writes that can * require padding - the transaction data and the commit record which * are written separately and both can require padding to the LSU. * Consider that we can have an active CIL reservation holding 2*LSU, * but the CIL is not over a push threshold, in this case, if we * don't have enough log space for at one new transaction, which * includes another 2*LSU in the reservation, we will run into dead * loop situation in log space grant procedure. i.e. * xlog_grant_head_wait(). * * Hence the log size needs to be able to contain two maximally sized * and padded transactions, which is (2 * (2 * LSU + maxlres)). * * Also, the log size should be a multiple of the log stripe unit, round * it up to lsunit boundary if lsunit is specified. */ if (lsunit) { min_logblks = roundup_64(BTOBB(max_logres), lsunit) + 2 * lsunit; } else min_logblks = BTOBB(max_logres) + 2 * BBSIZE; min_logblks *= XFS_MIN_LOG_FACTOR; return XFS_BB_TO_FSB(mp, min_logblks); }
/* * trim a range of the filesystem. * * Note: the parameters passed from userspace are byte ranges into the * filesystem which does not match to the format we use for filesystem block * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format * is a linear address range. Hence we need to use DADDR based conversions and * comparisons for determining the correct offset and regions to trim. */ int xfs_ioc_trim( struct xfs_mount *mp, struct fstrim_range __user *urange) { struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; unsigned int granularity = q->limits.discard_granularity; struct fstrim_range range; xfs_daddr_t start, end, minlen; xfs_agnumber_t start_agno, end_agno, agno; __uint64_t blocks_trimmed = 0; int error, last_error = 0; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); if (!blk_queue_discard(q)) return -XFS_ERROR(EOPNOTSUPP); if (copy_from_user(&range, urange, sizeof(range))) return -XFS_ERROR(EFAULT); /* * Truncating down the len isn't actually quite correct, but using * BBTOB would mean we trivially get overflows for values * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default * used by the fstrim application. In the end it really doesn't * matter as trimming blocks is an advisory interface. */ start = BTOBB(range.start); end = start + BTOBBT(range.len) - 1; minlen = BTOBB(max_t(u64, granularity, range.minlen)); if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks) return -XFS_ERROR(EINVAL); if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1) end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; start_agno = xfs_daddr_to_agno(mp, start); end_agno = xfs_daddr_to_agno(mp, end); for (agno = start_agno; agno <= end_agno; agno++) { error = -xfs_trim_extents(mp, agno, start, end, minlen, &blocks_trimmed); if (error) last_error = error; } if (last_error) return last_error; range.len = XFS_FSB_TO_B(mp, blocks_trimmed); if (copy_to_user(urange, &range, sizeof(range))) return -XFS_ERROR(EFAULT); return 0; }
/* * If we are doing readahead on an inode buffer, we might be in log recovery * reading an inode allocation buffer that hasn't yet been replayed, and hence * has not had the inode cores stamped into it. Hence for readahead, the buffer * may be potentially invalid. * * If the readahead buffer is invalid, we need to mark it with an error and * clear the DONE status of the buffer so that a followup read will re-read it * from disk. We don't report the error otherwise to avoid warnings during log * recovery and we don't get unnecssary panics on debug kernels. We use EIO here * because all we want to do is say readahead failed; there is no-one to report * the error to, so this will distinguish it from a non-ra verifier failure. * Changes to this readahead error behavour also need to be reflected in * xfs_dquot_buf_readahead_verify(). */ static void xfs_inode_buf_verify( struct xfs_buf *bp, bool readahead) { struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agnumber_t agno; int i; int ni; /* * Validate the magic number and version of every inode in the buffer */ agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp)); ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; for (i = 0; i < ni; i++) { int di_ok; xfs_dinode_t *dip; xfs_agino_t unlinked_ino; dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); unlinked_ino = be32_to_cpu(dip->di_next_unlinked); di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && xfs_dinode_good_version(mp, dip->di_version) && (unlinked_ino == NULLAGINO || xfs_verify_agino(mp, agno, unlinked_ino)); if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP))) { if (readahead) { bp->b_flags &= ~XBF_DONE; xfs_buf_ioerror(bp, -EIO); return; } #ifdef DEBUG xfs_alert(mp, "bad inode magic/vsn daddr %lld #%d (magic=%x)", (unsigned long long)bp->b_bn, i, be16_to_cpu(dip->di_magic)); #endif xfs_buf_verifier_error(bp, -EFSCORRUPTED, __func__, dip, sizeof(*dip), NULL); return; } } }
/* Execute a getfsmap query against the realtime device. */ STATIC int __xfs_getfsmap_rtdev( struct xfs_trans *tp, struct xfs_fsmap *keys, int (*query_fn)(struct xfs_trans *, struct xfs_getfsmap_info *), struct xfs_getfsmap_info *info) { struct xfs_mount *mp = tp->t_mountp; xfs_fsblock_t start_fsb; xfs_fsblock_t end_fsb; xfs_daddr_t eofs; int error = 0; eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); if (keys[0].fmr_physical >= eofs) return 0; if (keys[1].fmr_physical >= eofs) keys[1].fmr_physical = eofs - 1; start_fsb = XFS_BB_TO_FSBT(mp, keys[0].fmr_physical); end_fsb = XFS_BB_TO_FSB(mp, keys[1].fmr_physical); /* Set up search keys */ info->low.rm_startblock = start_fsb; error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]); if (error) return error; info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset); info->low.rm_blockcount = 0; xfs_getfsmap_set_irec_flags(&info->low, &keys[0]); info->high.rm_startblock = end_fsb; error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]); if (error) return error; info->high.rm_offset = XFS_BB_TO_FSBT(mp, keys[1].fmr_offset); info->high.rm_blockcount = 0; xfs_getfsmap_set_irec_flags(&info->high, &keys[1]); trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low); trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high); return query_fn(tp, info); }
/* * If we are doing readahead on an inode buffer, we might be in log recovery * reading an inode allocation buffer that hasn't yet been replayed, and hence * has not had the inode cores stamped into it. Hence for readahead, the buffer * may be potentially invalid. * * If the readahead buffer is invalid, we don't want to mark it with an error, * but we do want to clear the DONE status of the buffer so that a followup read * will re-read it from disk. This will ensure that we don't get an unnecessary * warnings during log recovery and we don't get unnecssary panics on debug * kernels. */ static void xfs_inode_buf_verify( struct xfs_buf *bp, bool readahead) { struct xfs_mount *mp = bp->b_target->bt_mount; int i; int ni; /* * Validate the magic number and version of every inode in the buffer */ ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; for (i = 0; i < ni; i++) { int di_ok; xfs_dinode_t *dip; dip = (struct xfs_dinode *)xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && XFS_DINODE_GOOD_VERSION(dip->di_version); if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, XFS_RANDOM_ITOBP_INOTOBP))) { if (readahead) { bp->b_flags &= ~XBF_DONE; return; } xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_verifier_error(bp); #ifdef DEBUG xfs_alert(mp, "bad inode magic/vsn daddr %lld #%d (magic=%x)", (unsigned long long)bp->b_bn, i, be16_to_cpu(dip->di_magic)); #endif } } xfs_inobp_check(mp, bp); }
/* * returns 1 for success, 0 if we failed to map the extent. */ STATIC int xfs_getbmapx_fix_eof_hole( xfs_inode_t *ip, /* xfs incore inode pointer */ struct getbmapx *out, /* output structure */ int prealloced, /* this is a file with * preallocated data space */ __int64_t end, /* last block requested */ xfs_fsblock_t startblock) { __int64_t fixlen; xfs_mount_t *mp; /* file system mount point */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t lastx; /* last extent pointer */ xfs_fileoff_t fileblock; if (startblock == HOLESTARTBLOCK) { mp = ip->i_mount; out->bmv_block = -1; fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); fixlen -= out->bmv_offset; if (prealloced && out->bmv_offset + out->bmv_length == end) { /* Came to hole at EOF. Trim it. */ if (fixlen <= 0) return 0; out->bmv_length = fixlen; } } else { if (startblock == DELAYSTARTBLOCK) out->bmv_block = -2; else out->bmv_block = xfs_fsb_to_db(ip, startblock); fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1)) out->bmv_oflags |= BMV_OF_LAST; } return 1; }
/* * Adjust quota limits, and start/stop timers accordingly. */ STATIC int xfs_qm_scall_setqlim( xfs_mount_t *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0) return (0); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } /* * We don't want to race with a quotaoff so take the quotaoff lock. * (We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening). (XXXThis doesn't currently happen * because we take the vfslock before calling xfs_qm_sysent). */ mutex_lock(&(XFS_QI_QOFFLOCK(mp))); /* * Get the dquot (locked), and join it to the transaction. * Allocate the dquot if this doesn't exist. */ if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); ASSERT(error != ENOENT); return (error); } xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); if (id == 0) { mp->m_quotainfo->qi_bhardlimit = hard; mp->m_quotainfo->qi_bsoftlimit = soft; } } else { qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { mp->m_quotainfo->qi_rtbhardlimit = hard; mp->m_quotainfo->qi_rtbsoftlimit = soft; } } else { qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { mp->m_quotainfo->qi_ihardlimit = hard; mp->m_quotainfo->qi_isoftlimit = soft; } } else { qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); xfs_trans_commit(tp, 0, NULL); xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); return (0); }
/* * Get inode's extents as described in bmv, and format for output. * Calls formatter to fill the user's buffer until all extents * are mapped, until the passed-in bmv->bmv_count slots have * been filled, or until the formatter short-circuits the loop, * if it is tracking filled-in extents on its own. */ int /* error code */ xfs_getbmap( xfs_inode_t *ip, struct getbmapx *bmv, /* user bmap structure */ xfs_bmap_format_t formatter, /* format to user */ void *arg) /* formatter arg */ { __int64_t bmvend; /* last block requested */ int error = 0; /* return value */ __int64_t fixlen; /* length for -1 case */ int i; /* extent number */ int lock; /* lock state */ xfs_bmbt_irec_t *map; /* buffer for user's data */ xfs_mount_t *mp; /* file system mount point */ int nex; /* # of user extents can do */ int nexleft; /* # of user extents left */ int subnex; /* # of bmapi's can do */ int nmap; /* number of map entries */ struct getbmapx *out; /* output structure */ int whichfork; /* data or attr fork */ int prealloced; /* this is a file with * preallocated data space */ int iflags; /* interface flags */ int bmapi_flags; /* flags for xfs_bmapi */ int cur_ext = 0; mp = ip->i_mount; iflags = bmv->bmv_iflags; whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; if (whichfork == XFS_ATTR_FORK) { if (XFS_IFORK_Q(ip)) { if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) return XFS_ERROR(EINVAL); } else if (unlikely( ip->i_d.di_aformat != 0 && ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } prealloced = 0; fixlen = 1LL << 32; } else { if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && ip->i_d.di_format != XFS_DINODE_FMT_BTREE && ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) return XFS_ERROR(EINVAL); if (xfs_get_extsz_hint(ip) || ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ prealloced = 1; fixlen = mp->m_super->s_maxbytes; } else { prealloced = 0; fixlen = XFS_ISIZE(ip); } } if (bmv->bmv_length == -1) { fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); bmv->bmv_length = max_t(__int64_t, fixlen - bmv->bmv_offset, 0); } else if (bmv->bmv_length == 0) { bmv->bmv_entries = 0; return 0; } else if (bmv->bmv_length < 0) { return XFS_ERROR(EINVAL); } nex = bmv->bmv_count - 1; if (nex <= 0) return XFS_ERROR(EINVAL); bmvend = bmv->bmv_offset + bmv->bmv_length; if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) return XFS_ERROR(ENOMEM); out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0); if (!out) return XFS_ERROR(ENOMEM); xfs_ilock(ip, XFS_IOLOCK_SHARED); if (whichfork == XFS_DATA_FORK) { if (!(iflags & BMV_IF_DELALLOC) && (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); if (error) goto out_unlock_iolock; /* * Even after flushing the inode, there can still be * delalloc blocks on the inode beyond EOF due to * speculative preallocation. These are not removed * until the release function is called or the inode * is inactivated. Hence we cannot assert here that * ip->i_delayed_blks == 0. */ } lock = xfs_ilock_data_map_shared(ip); } else { lock = xfs_ilock_attr_map_shared(ip); } /* * Don't let nex be bigger than the number of extents * we can have assuming alternating holes and real extents. */ if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; bmapi_flags = xfs_bmapi_aflag(whichfork); if (!(iflags & BMV_IF_PREALLOC)) bmapi_flags |= XFS_BMAPI_IGSTATE; /* * Allocate enough space to handle "subnex" maps at a time. */ error = ENOMEM; subnex = 16; map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); if (!map) goto out_unlock_ilock; bmv->bmv_entries = 0; if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) { error = 0; goto out_free_map; } nexleft = nex; do { nmap = (nexleft > subnex) ? subnex : nexleft; error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), XFS_BB_TO_FSB(mp, bmv->bmv_length), map, &nmap, bmapi_flags); if (error) goto out_free_map; ASSERT(nmap <= subnex); for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { out[cur_ext].bmv_oflags = 0; if (map[i].br_state == XFS_EXT_UNWRITTEN) out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; else if (map[i].br_startblock == DELAYSTARTBLOCK) out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC; out[cur_ext].bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); out[cur_ext].bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); out[cur_ext].bmv_unused1 = 0; out[cur_ext].bmv_unused2 = 0; /* * delayed allocation extents that start beyond EOF can * occur due to speculative EOF allocation when the * delalloc extent is larger than the largest freespace * extent at conversion time. These extents cannot be * converted by data writeback, so can exist here even * if we are not supposed to be finding delalloc * extents. */ if (map[i].br_startblock == DELAYSTARTBLOCK && map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) ASSERT((iflags & BMV_IF_DELALLOC) != 0); if (map[i].br_startblock == HOLESTARTBLOCK && whichfork == XFS_ATTR_FORK) { /* came to the end of attribute fork */ out[cur_ext].bmv_oflags |= BMV_OF_LAST; goto out_free_map; } if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext], prealloced, bmvend, map[i].br_startblock)) goto out_free_map; bmv->bmv_offset = out[cur_ext].bmv_offset + out[cur_ext].bmv_length; bmv->bmv_length = max_t(__int64_t, 0, bmvend - bmv->bmv_offset); /* * In case we don't want to return the hole, * don't increase cur_ext so that we can reuse * it in the next loop. */ if ((iflags & BMV_IF_NO_HOLES) && map[i].br_startblock == HOLESTARTBLOCK) { memset(&out[cur_ext], 0, sizeof(out[cur_ext])); continue; } nexleft--; bmv->bmv_entries++; cur_ext++; } } while (nmap && nexleft && bmv->bmv_length); out_free_map: kmem_free(map); out_unlock_ilock: xfs_iunlock(ip, lock); out_unlock_iolock: xfs_iunlock(ip, XFS_IOLOCK_SHARED); for (i = 0; i < cur_ext; i++) { int full = 0; /* user array is full */ /* format results & advance arg */ error = formatter(&arg, &out[i], &full); if (error || full) break; } kmem_free(out); return error; }
/* * Process a bmap update intent item that was recovered from the log. * We need to update some inode's bmbt. */ int xfs_bui_recover( struct xfs_mount *mp, struct xfs_bui_log_item *buip, struct xfs_defer_ops *dfops) { int error = 0; unsigned int bui_type; struct xfs_map_extent *bmap; xfs_fsblock_t startblock_fsb; xfs_fsblock_t inode_fsb; xfs_filblks_t count; bool op_ok; struct xfs_bud_log_item *budp; enum xfs_bmap_intent_type type; int whichfork; xfs_exntst_t state; struct xfs_trans *tp; struct xfs_inode *ip = NULL; struct xfs_bmbt_irec irec; ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); /* Only one mapping operation per BUI... */ if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EIO; } /* * First check the validity of the extent described by the * BUI. If anything is bad, then toss the BUI. */ bmap = &buip->bui_format.bui_extents[0]; startblock_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, bmap->me_startblock)); inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, XFS_INO_TO_FSB(mp, bmap->me_owner))); switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) { case XFS_BMAP_MAP: case XFS_BMAP_UNMAP: op_ok = true; break; default: op_ok = false; break; } if (!op_ok || startblock_fsb == 0 || bmap->me_len == 0 || inode_fsb == 0 || startblock_fsb >= mp->m_sb.sb_dblocks || bmap->me_len >= mp->m_sb.sb_agblocks || inode_fsb >= mp->m_sb.sb_dblocks || (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) { /* * This will pull the BUI from the AIL and * free the memory associated with it. */ set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EIO; } error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp); if (error) return error; budp = xfs_trans_get_bud(tp, buip); /* Grab the inode. */ error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip); if (error) goto err_inode; if (VFS_I(ip)->i_nlink == 0) xfs_iflags_set(ip, XFS_IRECOVERY); /* Process deferred bmap item. */ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? XFS_EXT_UNWRITTEN : XFS_EXT_NORM; whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK; switch (bui_type) { case XFS_BMAP_MAP: case XFS_BMAP_UNMAP: type = bui_type; break; default: error = -EFSCORRUPTED; goto err_inode; } xfs_trans_ijoin(tp, ip, 0); count = bmap->me_len; error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type, ip, whichfork, bmap->me_startoff, bmap->me_startblock, &count, state); if (error) goto err_inode; if (count > 0) { ASSERT(type == XFS_BMAP_UNMAP); irec.br_startblock = bmap->me_startblock; irec.br_blockcount = count; irec.br_startoff = bmap->me_startoff; irec.br_state = state; error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec); if (error) goto err_inode; } set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); return error; err_inode: xfs_trans_cancel(tp); if (ip) { xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); } return error; }
/* * Process a refcount update intent item that was recovered from the log. * We need to update the refcountbt. */ int xfs_cui_recover( struct xfs_mount *mp, struct xfs_cui_log_item *cuip) { int i; int error = 0; unsigned int refc_type; struct xfs_phys_extent *refc; xfs_fsblock_t startblock_fsb; bool op_ok; struct xfs_cud_log_item *cudp; struct xfs_trans *tp; struct xfs_btree_cur *rcur = NULL; enum xfs_refcount_intent_type type; xfs_fsblock_t firstfsb; xfs_fsblock_t new_fsb; xfs_extlen_t new_len; struct xfs_bmbt_irec irec; struct xfs_defer_ops dfops; bool requeue_only = false; ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)); /* * First check the validity of the extents described by the * CUI. If any are bad, then assume that all are bad and * just toss the CUI. */ for (i = 0; i < cuip->cui_format.cui_nextents; i++) { refc = &cuip->cui_format.cui_extents[i]; startblock_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, refc->pe_startblock)); switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) { case XFS_REFCOUNT_INCREASE: case XFS_REFCOUNT_DECREASE: case XFS_REFCOUNT_ALLOC_COW: case XFS_REFCOUNT_FREE_COW: op_ok = true; break; default: op_ok = false; break; } if (!op_ok || startblock_fsb == 0 || refc->pe_len == 0 || startblock_fsb >= mp->m_sb.sb_dblocks || refc->pe_len >= mp->m_sb.sb_agblocks || (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)) { /* * This will pull the CUI from the AIL and * free the memory associated with it. */ set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); xfs_cui_release(cuip); return -EIO; } } /* * Under normal operation, refcount updates are deferred, so we * wouldn't be adding them directly to a transaction. All * refcount updates manage reservation usage internally and * dynamically by deferring work that won't fit in the * transaction. Normally, any work that needs to be deferred * gets attached to the same defer_ops that scheduled the * refcount update. However, we're in log recovery here, so we * we create our own defer_ops and use that to finish up any * work that doesn't fit. */ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); if (error) return error; cudp = xfs_trans_get_cud(tp, cuip); xfs_defer_init(&dfops, &firstfsb); for (i = 0; i < cuip->cui_format.cui_nextents; i++) { refc = &cuip->cui_format.cui_extents[i]; refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; switch (refc_type) { case XFS_REFCOUNT_INCREASE: case XFS_REFCOUNT_DECREASE: case XFS_REFCOUNT_ALLOC_COW: case XFS_REFCOUNT_FREE_COW: type = refc_type; break; default: error = -EFSCORRUPTED; goto abort_error; } if (requeue_only) { new_fsb = refc->pe_startblock; new_len = refc->pe_len; } else error = xfs_trans_log_finish_refcount_update(tp, cudp, &dfops, type, refc->pe_startblock, refc->pe_len, &new_fsb, &new_len, &rcur); if (error) goto abort_error; /* Requeue what we didn't finish. */ if (new_len > 0) { irec.br_startblock = new_fsb; irec.br_blockcount = new_len; switch (type) { case XFS_REFCOUNT_INCREASE: error = xfs_refcount_increase_extent( tp->t_mountp, &dfops, &irec); break; case XFS_REFCOUNT_DECREASE: error = xfs_refcount_decrease_extent( tp->t_mountp, &dfops, &irec); break; case XFS_REFCOUNT_ALLOC_COW: error = xfs_refcount_alloc_cow_extent( tp->t_mountp, &dfops, irec.br_startblock, irec.br_blockcount); break; case XFS_REFCOUNT_FREE_COW: error = xfs_refcount_free_cow_extent( tp->t_mountp, &dfops, irec.br_startblock, irec.br_blockcount); break; default: ASSERT(0); } if (error) goto abort_error; requeue_only = true; } } xfs_refcount_finish_one_cleanup(tp, rcur, error); error = xfs_defer_finish(&tp, &dfops, NULL); if (error) goto abort_defer; set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); error = xfs_trans_commit(tp); return error; abort_error: xfs_refcount_finish_one_cleanup(tp, rcur, error); abort_defer: xfs_defer_cancel(&dfops); xfs_trans_cancel(tp); return error; }
/* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_disk_dquot *ddq; struct xfs_dquot *dqp; struct xfs_trans *tp; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_DQ_MASK) return -EINVAL; if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) return 0; /* * We don't want to race with a quotaoff so take the quotaoff lock. * We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening. */ mutex_lock(&q->qi_quotaofflock); /* * Get the dquot (locked) before we start, as we need to do a * transaction to allocate it if it doesn't exist. Once we have the * dquot, unlock it so we can start the next transaction safely. We hold * a reference to the dquot, so it's safe to do this unlock/lock without * it being reclaimed in the mean time. */ error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); if (error) { ASSERT(error != -ENOENT); goto out_unlock; } xfs_dqunlock(dqp); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0); if (error) { xfs_trans_cancel(tp, 0); goto out_rele; } xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); xfs_dquot_set_prealloc_limits(dqp); if (id == 0) { q->qi_bhardlimit = hard; q->qi_bsoftlimit = soft; } } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_rtbhardlimit = hard; q->qi_rtbsoftlimit = soft; } } else { xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_ihardlimit = hard; q->qi_isoftlimit = soft; } } else { xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { q->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { q->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { q->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) q->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) q->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) q->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); out_rele: xfs_qm_dqrele(dqp); out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; }
/* * Mount structure initialization, provides a filled-in xfs_mount_t * such that the numerous XFS_* macros can be used. If dev is zero, * no IO will be performed (no size checks, read root inodes). */ xfs_mount_t * libxfs_mount( xfs_mount_t *mp, xfs_sb_t *sb, dev_t dev, dev_t logdev, dev_t rtdev, int flags) { xfs_daddr_t d; xfs_buf_t *bp; xfs_sb_t *sbp; int error; libxfs_buftarg_init(mp, dev, logdev, rtdev); mp->m_flags = (LIBXFS_MOUNT_32BITINODES|LIBXFS_MOUNT_32BITINOOPT); mp->m_sb = *sb; INIT_RADIX_TREE(&mp->m_perag_tree, GFP_KERNEL); sbp = &(mp->m_sb); xfs_sb_mount_common(mp, sb); xfs_alloc_compute_maxlevels(mp); xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); xfs_ialloc_compute_maxlevels(mp); if (sbp->sb_imax_pct) { /* Make sure the maximum inode count is a multiple of the * units we allocate inodes in. */ mp->m_maxicount = (sbp->sb_dblocks * sbp->sb_imax_pct) / 100; mp->m_maxicount = ((mp->m_maxicount / mp->m_ialloc_blks) * mp->m_ialloc_blks) << sbp->sb_inopblog; } else mp->m_maxicount = 0; mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; /* * Set whether we're using stripe alignment. */ if (xfs_sb_version_hasdalign(&mp->m_sb)) { mp->m_dalign = sbp->sb_unit; mp->m_swidth = sbp->sb_width; } /* * Set whether we're using inode alignment. */ if (xfs_sb_version_hasalign(&mp->m_sb) && mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; else mp->m_inoalign_mask = 0; /* * If we are using stripe alignment, check whether * the stripe unit is a multiple of the inode alignment */ if (mp->m_dalign && mp->m_inoalign_mask && !(mp->m_dalign & mp->m_inoalign_mask)) mp->m_sinoalign = mp->m_dalign; else mp->m_sinoalign = 0; /* * Check that the data (and log if separate) are an ok size. */ d = (xfs_daddr_t) XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { fprintf(stderr, _("%s: size check failed\n"), progname); if (!(flags & LIBXFS_MOUNT_DEBUGGER)) return NULL; } /* * We automatically convert v1 inodes to v2 inodes now, so if * the NLINK bit is not set we can't operate on the filesystem. */ if (!(sbp->sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { fprintf(stderr, _( "%s: V1 inodes unsupported. Please try an older xfsprogs.\n"), progname); exit(1); } /* Check for supported directory formats */ if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT)) { fprintf(stderr, _( "%s: V1 directories unsupported. Please try an older xfsprogs.\n"), progname); exit(1); } /* check for unsupported other features */ if (!xfs_sb_good_version(sbp)) { fprintf(stderr, _( "%s: Unsupported features detected. Please try a newer xfsprogs.\n"), progname); exit(1); } xfs_da_mount(mp); if (xfs_sb_version_hasattr2(&mp->m_sb)) mp->m_flags |= LIBXFS_MOUNT_ATTR2; /* Initialize the precomputed transaction reservations values */ xfs_trans_init(mp); if (dev == 0) /* maxtrres, we have no device so leave now */ return mp; bp = libxfs_readbuf(mp->m_dev, d - XFS_FSS_TO_BB(mp, 1), XFS_FSS_TO_BB(mp, 1), !(flags & LIBXFS_MOUNT_DEBUGGER), NULL); if (!bp) { fprintf(stderr, _("%s: data size check failed\n"), progname); if (!(flags & LIBXFS_MOUNT_DEBUGGER)) return NULL; } else libxfs_putbuf(bp); if (mp->m_logdev_targp->dev && mp->m_logdev_targp->dev != mp->m_ddev_targp->dev) { d = (xfs_daddr_t) XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); if ( (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) || (!(bp = libxfs_readbuf(mp->m_logdev_targp, d - XFS_FSB_TO_BB(mp, 1), XFS_FSB_TO_BB(mp, 1), !(flags & LIBXFS_MOUNT_DEBUGGER), NULL))) ) { fprintf(stderr, _("%s: log size checks failed\n"), progname); if (!(flags & LIBXFS_MOUNT_DEBUGGER)) return NULL; } if (bp) libxfs_putbuf(bp); } /* Initialize realtime fields in the mount structure */ if (rtmount_init(mp, flags)) { fprintf(stderr, _("%s: realtime device init failed\n"), progname); return NULL; } error = libxfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); if (error) { fprintf(stderr, _("%s: perag init failed\n"), progname); exit(1); } return mp; }
int xfs_qm_scall_setqlim( xfs_mount_t *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_DQ_MASK) return EINVAL; if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) return 0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } mutex_lock(&q->qi_quotaofflock); if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); ASSERT(error != ENOENT); goto out_unlock; } xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_bhardlimit = hard; q->qi_bsoftlimit = soft; } } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_rtbhardlimit = hard; q->qi_rtbsoftlimit = soft; } } else { xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_ihardlimit = hard; q->qi_isoftlimit = soft; } } else { xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); } if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { if (newlim->d_fieldmask & FS_DQ_BTIMER) { q->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { q->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { q->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) q->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) q->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) q->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); xfs_qm_dqrele(dqp); out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; }