/* * Read in the in-core dquot's on-disk metadata and return the buffer. * Returns ENOENT to signal a hole. */ STATIC int xfs_dquot_disk_read( struct xfs_mount *mp, struct xfs_dquot *dqp, struct xfs_buf **bpp) { struct xfs_bmbt_irec map; struct xfs_buf *bp; struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); uint lock_mode; int nmaps = 1; int error; lock_mode = xfs_ilock_data_map_shared(quotip); if (!xfs_this_quota_on(mp, dqp->dq_flags)) { /* * Return if this type of quotas is turned off while we * didn't have the quota inode lock. */ xfs_iunlock(quotip, lock_mode); return -ESRCH; } /* * Find the block map; no allocations yet */ error = xfs_bmapi_read(quotip, dqp->q_fileoffset, XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); xfs_iunlock(quotip, lock_mode); if (error) return error; ASSERT(nmaps == 1); ASSERT(map.br_blockcount >= 1); ASSERT(map.br_startblock != DELAYSTARTBLOCK); if (map.br_startblock == HOLESTARTBLOCK) return -ENOENT; trace_xfs_dqtobp_read(dqp); /* * store the blkno etc so that we don't have to do the * mapping all the time */ dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops); if (error) { ASSERT(bp == NULL); return error; } ASSERT(xfs_buf_islocked(bp)); xfs_buf_set_ref(bp, XFS_DQUOT_REF); *bpp = bp; return 0; }
/* * Advance to the next id in the current chunk, or if at the * end of the chunk, skip ahead to first id in next allocated chunk * using the SEEK_DATA interface. */ static int xfs_dq_get_next_id( struct xfs_mount *mp, uint type, xfs_dqid_t *id) { struct xfs_inode *quotip = xfs_quota_inode(mp, type); xfs_dqid_t next_id = *id + 1; /* simple advance */ uint lock_flags; struct xfs_bmbt_irec got; struct xfs_iext_cursor cur; xfs_fsblock_t start; int error = 0; /* If we'd wrap past the max ID, stop */ if (next_id < *id) return -ENOENT; /* If new ID is within the current chunk, advancing it sufficed */ if (next_id % mp->m_quotainfo->qi_dqperchunk) { *id = next_id; return 0; } /* Nope, next_id is now past the current chunk, so find the next one */ start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk; lock_flags = xfs_ilock_data_map_shared(quotip); if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK); if (error) return error; } if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) { /* contiguous chunk, bump startoff for the id calculation */ if (got.br_startoff < start) got.br_startoff = start; *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk; } else { error = -ENOENT; } xfs_iunlock(quotip, lock_flags); return error; }
/* * Ensure that the given in-core dquot has a buffer on disk backing it, and * return the buffer locked and held. This is called when the bmapi finds a * hole. */ STATIC int xfs_dquot_disk_alloc( struct xfs_trans **tpp, struct xfs_dquot *dqp, struct xfs_buf **bpp) { struct xfs_bmbt_irec map; struct xfs_trans *tp = *tpp; struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *bp; struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); int nmaps = 1; int error; trace_xfs_dqalloc(dqp); xfs_ilock(quotip, XFS_ILOCK_EXCL); if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { /* * Return if this type of quotas is turned off while we didn't * have an inode lock */ xfs_iunlock(quotip, XFS_ILOCK_EXCL); return -ESRCH; } /* Create the block mapping. */ xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset, XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps); if (error) return error; ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); ASSERT(nmaps == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); /* * Keep track of the blkno to save a lookup later */ dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); /* now we can just get the buffer (there's nothing to read yet) */ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0); if (!bp) return -ENOMEM; bp->b_ops = &xfs_dquot_buf_ops; /* * Make a chunk of dquots out of this buffer and log * the entire thing. */ xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), dqp->dq_flags & XFS_DQ_ALLTYPES, bp); xfs_buf_set_ref(bp, XFS_DQUOT_REF); /* * Hold the buffer and join it to the dfops so that we'll still own * the buffer when we return to the caller. The buffer disposal on * error must be paid attention to very carefully, as it has been * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota * code when allocating a new dquot record" in 2005, and the later * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep * the buffer locked across the _defer_finish call. We can now do * this correctly with xfs_defer_bjoin. * * Above, we allocated a disk block for the dquot information and used * get_buf to initialize the dquot. If the _defer_finish fails, the old * transaction is gone but the new buffer is not joined or held to any * transaction, so we must _buf_relse it. * * If everything succeeds, the caller of this function is returned a * buffer that is locked and held to the transaction. The caller * is responsible for unlocking any buffer passed back, either * manually or by committing the transaction. On error, the buffer is * released and not passed back. */ xfs_trans_bhold(tp, bp); error = xfs_defer_finish(tpp); if (error) { xfs_trans_bhold_release(*tpp, bp); xfs_trans_brelse(*tpp, bp); return error; } *bpp = bp; return 0; }
/* * Maps a dquot to the buffer containing its on-disk version. * This returns a ptr to the buffer containing the on-disk dquot * in the bpp param, and a ptr to the on-disk dquot within that buffer */ STATIC int xfs_qm_dqtobp( xfs_trans_t **tpp, xfs_dquot_t *dqp, xfs_disk_dquot_t **O_ddpp, xfs_buf_t **O_bpp, uint flags) { struct xfs_bmbt_irec map; int nmaps = 1, error; struct xfs_buf *bp; struct xfs_inode *quotip; struct xfs_mount *mp = dqp->q_mount; xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); struct xfs_trans *tp = (tpp ? *tpp : NULL); uint lock_mode; quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags); dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; lock_mode = xfs_ilock_data_map_shared(quotip); if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { /* * Return if this type of quotas is turned off while we * didn't have the quota inode lock. */ xfs_iunlock(quotip, lock_mode); return -ESRCH; } /* * Find the block map; no allocations yet */ error = xfs_bmapi_read(quotip, dqp->q_fileoffset, XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); xfs_iunlock(quotip, lock_mode); if (error) return error; ASSERT(nmaps == 1); ASSERT(map.br_blockcount == 1); /* * Offset of dquot in the (fixed sized) dquot chunk. */ dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * sizeof(xfs_dqblk_t); ASSERT(map.br_startblock != DELAYSTARTBLOCK); if (map.br_startblock == HOLESTARTBLOCK) { /* * We don't allocate unless we're asked to */ if (!(flags & XFS_QMOPT_DQALLOC)) return -ENOENT; ASSERT(tp); error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, dqp->q_fileoffset, &bp); if (error) return error; tp = *tpp; } else { trace_xfs_dqtobp_read(dqp); /* * store the blkno etc so that we don't have to do the * mapping all the time */ dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops); if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff * mp->m_quotainfo->qi_dqperchunk; ASSERT(bp == NULL); error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp); } if (error) { ASSERT(bp == NULL); return error; } } ASSERT(xfs_buf_islocked(bp)); *O_bpp = bp; *O_ddpp = bp->b_addr + dqp->q_bufoffset; return 0; }