STATIC int xfs_iomap_eof_align_last_fsb( xfs_mount_t *mp, xfs_iocore_t *io, xfs_fsize_t isize, xfs_extlen_t extsize, xfs_fileoff_t *last_fsb) { xfs_fileoff_t new_last_fsb = 0; xfs_extlen_t align; int eof, error; if (io->io_flags & XFS_IOCORE_RT) ; /* * If mounted with the "-o swalloc" option, roundup the allocation * request to a stripe width boundary if the file size is >= * stripe width and we are allocating past the allocation eof. */ else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth))) new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); /* * Roundup the allocation request to a stripe unit (m_dalign) boundary * if the file size is >= stripe unit size, and we are allocating past * the allocation eof. */ else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign))) new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); /* * Always round up the allocation request to an extent boundary * (when file on a real-time subvolume or has di_extsize hint). */ if (extsize) { if (new_last_fsb) align = roundup_64(new_last_fsb, extsize); else align = extsize; new_last_fsb = roundup_64(*last_fsb, align); } if (new_last_fsb) { error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); if (error) return error; if (eof) *last_fsb = new_last_fsb; } return 0; }
int xfs_iomap_write_delay( xfs_inode_t *ip, xfs_off_t offset, size_t count, int ioflag, xfs_bmbt_irec_t *ret_imap, int *nmaps) { xfs_mount_t *mp = ip->i_mount; xfs_iocore_t *io = &ip->i_iocore; xfs_fileoff_t offset_fsb; xfs_fileoff_t last_fsb; xfs_fsize_t isize; xfs_fsblock_t firstblock; int nimaps; int error; xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; int aeof; int fsynced = 0; ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); /* * Make sure that the dquots are there. This doesn't hold * the ilock across a disk read. */ error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); if (error) return XFS_ERROR(error); retry: isize = ip->i_d.di_size; if (io->io_new_size > isize) { isize = io->io_new_size; } aeof = 0; offset_fsb = XFS_B_TO_FSBT(mp, offset); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); /* * If the caller is doing a write at the end of the file, * then extend the allocation (and the buffer used for the write) * out to the file system's write iosize. We clean up any extra * space left over when the file is closed in xfs_inactive(). * * For sync writes, we are flushing delayed allocate space to * try to make additional space available for allocation near * the filesystem full boundary - preallocation hurts in that * situation, of course. */ if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) { xfs_off_t aligned_offset; xfs_filblks_t count_fsb; unsigned int iosize; xfs_fileoff_t ioalign; int n; xfs_fileoff_t start_fsb; /* * If there are any real blocks past eof, then don't * do any speculative allocation. */ start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); while (count_fsb > 0) { nimaps = XFS_WRITE_IMAPS; error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 0, &firstblock, 0, imap, &nimaps, NULL); if (error) { return error; } for (n = 0; n < nimaps; n++) { if ( !(io->io_flags & XFS_IOCORE_RT) && !imap[n].br_startblock) { cmn_err(CE_PANIC,"Access to block " "zero: fs <%s> inode: %lld " "start_block : %llx start_off " ": %llx blkcnt : %llx " "extent-state : %x \n", (ip->i_mount)->m_fsname, (long long)ip->i_ino, imap[n].br_startblock, imap[n].br_startoff, imap[n].br_blockcount, imap[n].br_state); } if ((imap[n].br_startblock != HOLESTARTBLOCK) && (imap[n].br_startblock != DELAYSTARTBLOCK)) { goto write_map; } start_fsb += imap[n].br_blockcount; count_fsb -= imap[n].br_blockcount; } } iosize = mp->m_writeio_blocks; aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); ioalign = XFS_B_TO_FSBT(mp, aligned_offset); last_fsb = ioalign + iosize; aeof = 1; } write_map: nimaps = XFS_WRITE_IMAPS; firstblock = NULLFSBLOCK; /* * If mounted with the "-o swalloc" option, roundup the allocation * request to a stripe width boundary if the file size is >= * stripe width and we are allocating past the allocation eof. */ if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) { int eof; xfs_fileoff_t new_last_fsb; new_last_fsb = roundup_64(last_fsb, mp->m_swidth); error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); if (error) { return error; } if (eof) { last_fsb = new_last_fsb; } /* * Roundup the allocation request to a stripe unit (m_dalign) boundary * if the file size is >= stripe unit size, and we are allocating past * the allocation eof. */ } else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) { int eof; xfs_fileoff_t new_last_fsb; new_last_fsb = roundup_64(last_fsb, mp->m_dalign); error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); if (error) { return error; } if (eof) { last_fsb = new_last_fsb; } /* * Round up the allocation request to a real-time extent boundary * if the file is on the real-time subvolume. */ } else if (io->io_flags & XFS_IOCORE_RT && aeof) { int eof; xfs_fileoff_t new_last_fsb; new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize); error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); if (error) { return error; } if (eof) last_fsb = new_last_fsb; } error = xfs_bmapi(NULL, ip, offset_fsb, (xfs_filblks_t)(last_fsb - offset_fsb), XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, &nimaps, NULL); /* * This can be EDQUOT, if nimaps == 0 */ if (error && (error != ENOSPC)) { return XFS_ERROR(error); } /* * If bmapi returned us nothing, and if we didn't get back EDQUOT, * then we must have run out of space. */ if (nimaps == 0) { xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, io, offset, count); if (xfs_flush_space(ip, &fsynced, &ioflag)) return XFS_ERROR(ENOSPC); error = 0; goto retry; } *ret_imap = imap[0]; *nmaps = 1; if ( !(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) { cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld " "start_block : %llx start_off : %llx blkcnt : %llx " "extent-state : %x \n", (ip->i_mount)->m_fsname, (long long)ip->i_ino, ret_imap->br_startblock, ret_imap->br_startoff, ret_imap->br_blockcount,ret_imap->br_state); } return 0; }