/* * xfs_inval_cached_pages * * This routine is responsible for keeping direct I/O and buffered I/O * somewhat coherent. From here we make sure that we're at least * temporarily holding the inode I/O lock exclusively and then call * the page cache to flush and invalidate any cached pages. If there * are no cached pages this routine will be very quick. */ void xfs_inval_cached_pages( vnode_t *vp, xfs_iocore_t *io, xfs_off_t offset, int write, int relock) { if (VN_CACHED(vp)) { xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1); VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED); } }
/* * xfs_inval_cached_pages * * This routine is responsible for keeping direct I/O and buffered I/O * somewhat coherent. From here we make sure that we're at least * temporarily holding the inode I/O lock exclusively and then call * the page cache to flush and invalidate any cached pages. If there * are no cached pages this routine will be very quick. */ void xfs_inval_cached_pages( vnode_t *vp, xfs_iocore_t *io, xfs_off_t offset, int write, int relock) { xfs_mount_t *mp; if (!VN_CACHED(vp)) { return; } mp = io->io_mount; /* * We need to get the I/O lock exclusively in order * to safely invalidate pages and mappings. */ if (relock) { XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED); XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL); } /* Writing beyond EOF creates a hole that must be zeroed */ if (write && (offset > XFS_SIZE(mp, io))) { xfs_fsize_t isize; XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); isize = XFS_SIZE(mp, io); if (offset > isize) { xfs_zero_eof(vp, io, offset, isize, offset); } XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); } xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1); VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED); if (relock) { XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); } }
ssize_t /* bytes written, or (-) error */ xfs_write( bhv_desc_t *bdp, struct kiocb *iocb, const struct iovec *iovp, unsigned int nsegs, loff_t *offset, int ioflags, cred_t *credp) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; unsigned long segs = nsegs; xfs_inode_t *xip; xfs_mount_t *mp; ssize_t ret = 0, error = 0; xfs_fsize_t isize, new_size; xfs_iocore_t *io; vnode_t *vp; unsigned long seg; int iolock; int eventsent = 0; vrwlock_t locktype; size_t ocount = 0, count; loff_t pos; int need_isem = 1, need_flush = 0; XFS_STATS_INC(xs_write_calls); vp = BHV_TO_VNODE(bdp); xip = XFS_BHVTOI(bdp); for (seg = 0; seg < segs; seg++) { const struct iovec *iv = &iovp[seg]; /* * If any segment has a negative length, or the cumulative * length ever wraps negative then return -EINVAL. */ ocount += iv->iov_len; if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) return -EINVAL; if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) continue; if (seg == 0) return -EFAULT; segs = seg; ocount -= iv->iov_len; /* This segment is no good */ break; } count = ocount; pos = *offset; if (count == 0) return 0; io = &xip->i_iocore; mp = io->io_mount; if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; if (ioflags & IO_ISDIRECT) { xfs_buftarg_t *target = (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? mp->m_rtdev_targp : mp->m_ddev_targp; if ((pos & target->pbr_smask) || (count & target->pbr_smask)) return XFS_ERROR(-EINVAL); if (!VN_CACHED(vp) && pos < i_size_read(inode)) need_isem = 0; if (VN_CACHED(vp)) need_flush = 1; } relock: if (need_isem) { iolock = XFS_IOLOCK_EXCL; locktype = VRWLOCK_WRITE; down(&inode->i_sem); } else { iolock = XFS_IOLOCK_SHARED; locktype = VRWLOCK_WRITE_DIRECT; } xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); isize = i_size_read(inode); if (file->f_flags & O_APPEND) *offset = isize; start: error = -generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); goto out_unlock_isem; } new_size = pos + count; if (new_size > isize) io->io_new_size = new_size; if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS) && !eventsent)) { loff_t savedsize = pos; int dmflags = FILP_DELAY_FLAG(file); if (need_isem) dmflags |= DM_FLAGS_ISEM; xfs_iunlock(xip, XFS_ILOCK_EXCL); error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp, pos, count, dmflags, &locktype); if (error) { xfs_iunlock(xip, iolock); goto out_unlock_isem; } xfs_ilock(xip, XFS_ILOCK_EXCL); eventsent = 1; /* * The iolock was dropped and reaquired in XFS_SEND_DATA * so we have to recheck the size when appending. * We will only "goto start;" once, since having sent the * event prevents another call to XFS_SEND_DATA, which is * what allows the size to change in the first place. */ if ((file->f_flags & O_APPEND) && savedsize != isize) { pos = isize = xip->i_d.di_size; goto start; } } /* * On Linux, generic_file_write updates the times even if * no data is copied in so long as the write had a size. * * We must update xfs' times since revalidate will overcopy xfs. */ if (!(ioflags & IO_INVIS)) { xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); inode_update_time(inode, 1); } /* * If the offset is beyond the size of the file, we have a couple * of things to do. First, if there is already space allocated * we need to either create holes or zero the disk or ... * * If there is a page where the previous size lands, we need * to zero it out up to the new size. */ if (pos > isize) { error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos, isize, pos + count); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); goto out_unlock_isem; } } xfs_iunlock(xip, XFS_ILOCK_EXCL); /* * If we're writing the file then make sure to clear the * setuid and setgid bits if the process is not being run * by root. This keeps people from modifying setuid and * setgid binaries. */ if (((xip->i_d.di_mode & S_ISUID) || ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && !capable(CAP_FSETID)) { error = xfs_write_clear_setuid(xip); if (likely(!error)) error = -remove_suid(file->f_dentry); if (unlikely(error)) { xfs_iunlock(xip, iolock); goto out_unlock_isem; } } retry: /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; if ((ioflags & IO_ISDIRECT)) { if (need_flush) { xfs_inval_cached_trace(io, pos, -1, ctooff(offtoct(pos)), -1); VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)), -1, FI_REMAPF_LOCKED); } if (need_isem) { /* demote the lock now the cached pages are gone */ XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); up(&inode->i_sem); iolock = XFS_IOLOCK_SHARED; locktype = VRWLOCK_WRITE_DIRECT; need_isem = 0; } xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, *offset, ioflags); ret = generic_file_direct_write(iocb, iovp, &segs, pos, offset, count, ocount); /* * direct-io write to a hole: fall through to buffered I/O * for completing the rest of the request. */ if (ret >= 0 && ret != count) { XFS_STATS_ADD(xs_write_bytes, ret); pos += ret; count -= ret; need_isem = 1; ioflags &= ~IO_ISDIRECT; xfs_iunlock(xip, iolock); goto relock; } } else { xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs, *offset, ioflags); ret = generic_file_buffered_write(iocb, iovp, segs, pos, offset, count, ret); } current->backing_dev_info = NULL; if (ret == -EIOCBQUEUED) ret = wait_on_sync_kiocb(iocb); if ((ret == -ENOSPC) && DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { xfs_rwunlock(bdp, locktype); error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ if (error) goto out_unlock_isem; xfs_rwlock(bdp, locktype); pos = xip->i_d.di_size; goto retry; } if (*offset > xip->i_d.di_size) { xfs_ilock(xip, XFS_ILOCK_EXCL); if (*offset > xip->i_d.di_size) { xip->i_d.di_size = *offset; i_size_write(inode, *offset); xip->i_update_core = 1; xip->i_update_size = 1; } xfs_iunlock(xip, XFS_ILOCK_EXCL); } error = -ret; if (ret <= 0) goto out_unlock_internal; XFS_STATS_ADD(xs_write_bytes, ret); /* Handle various SYNC-type writes */ if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { /* * If we're treating this as O_DSYNC and we have not updated the * size, force the log. */ if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) && !(xip->i_update_size)) { xfs_inode_log_item_t *iip = xip->i_itemp; /* * If an allocation transaction occurred * without extending the size, then we have to force * the log up the proper point to ensure that the * allocation is permanent. We can't count on * the fact that buffered writes lock out direct I/O * writes - the direct I/O write could have extended * the size nontransactionally, then finished before * we started. xfs_write_file will think that the file * didn't grow but the update isn't safe unless the * size change is logged. * * Force the log if we've committed a transaction * against the inode or if someone else has and * the commit record hasn't gone to disk (e.g. * the inode is pinned). This guarantees that * all changes affecting the inode are permanent * when we return. */ if (iip && iip->ili_last_lsn) { xfs_log_force(mp, iip->ili_last_lsn, XFS_LOG_FORCE | XFS_LOG_SYNC); } else if (xfs_ipincount(xip) > 0) { xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); } } else { xfs_trans_t *tp; /* * O_SYNC or O_DSYNC _with_ a size update are handled * the same way. * * If the write was synchronous then we need to make * sure that the inode modification time is permanent. * We'll have updated the timestamp above, so here * we use a synchronous transaction to log the inode. * It's not fast, but it's necessary. * * If this a dsync write and the size got changed * non-transactionally, then we need to ensure that * the size change gets logged in a synchronous * transaction. */ tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC); if ((error = xfs_trans_reserve(tp, 0, XFS_SWRITE_LOG_RES(mp), 0, 0, 0))) { /* Transaction reserve failed */ xfs_trans_cancel(tp, 0); } else { /* Transaction reserve successful */ xfs_ilock(xip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL); xfs_trans_ihold(tp, xip); xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE); xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0, NULL); xfs_iunlock(xip, XFS_ILOCK_EXCL); if (error) goto out_unlock_internal; } } xfs_rwunlock(bdp, locktype); if (need_isem) up(&inode->i_sem); error = sync_page_range(inode, mapping, pos, ret); if (!error) error = ret; return error; } out_unlock_internal: xfs_rwunlock(bdp, locktype); out_unlock_isem: if (need_isem) up(&inode->i_sem); return -error; }
ssize_t /* bytes read, or (-) error */ xfs_read( bhv_desc_t *bdp, struct kiocb *iocb, const struct iovec *iovp, unsigned int segs, loff_t *offset, int ioflags, cred_t *credp) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; size_t size = 0; ssize_t ret; xfs_fsize_t n; xfs_inode_t *ip; xfs_mount_t *mp; vnode_t *vp; unsigned long seg; ip = XFS_BHVTOI(bdp); vp = BHV_TO_VNODE(bdp); mp = ip->i_mount; XFS_STATS_INC(xs_read_calls); /* START copy & waste from filemap.c */ for (seg = 0; seg < segs; seg++) { const struct iovec *iv = &iovp[seg]; /* * If any segment has a negative length, or the cumulative * length ever wraps negative then return -EINVAL. */ size += iv->iov_len; if (unlikely((ssize_t)(size|iv->iov_len) < 0)) return XFS_ERROR(-EINVAL); } /* END copy & waste from filemap.c */ if (unlikely(ioflags & IO_ISDIRECT)) { xfs_buftarg_t *target = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? mp->m_rtdev_targp : mp->m_ddev_targp; if ((*offset & target->bt_smask) || (size & target->bt_smask)) { if (*offset == ip->i_d.di_size) { return (0); } return -XFS_ERROR(EINVAL); } } n = XFS_MAXIOFFSET(mp) - *offset; if ((n <= 0) || (size == 0)) return 0; if (n < size) size = n; if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; if (unlikely(ioflags & IO_ISDIRECT)) mutex_lock(&inode->i_mutex); xfs_ilock(ip, XFS_IOLOCK_SHARED); if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { vrwlock_t locktype = VRWLOCK_READ; int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, size, dmflags, &locktype); if (ret) { xfs_iunlock(ip, XFS_IOLOCK_SHARED); goto unlock_mutex; } } if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp))) VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)), -1, FI_REMAPF_LOCKED); xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, (void *)iovp, segs, *offset, ioflags); ret = __generic_file_aio_read(iocb, iovp, segs, offset); if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) ret = wait_on_sync_kiocb(iocb); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); xfs_iunlock(ip, XFS_IOLOCK_SHARED); unlock_mutex: if (unlikely(ioflags & IO_ISDIRECT)) mutex_unlock(&inode->i_mutex); return ret; }
/* * Syssgi interface for swapext */ int xfs_swapext( xfs_swapext_t __user *sxu) { xfs_swapext_t *sxp; xfs_inode_t *ip=NULL, *tip=NULL, *ips[2]; xfs_trans_t *tp; xfs_mount_t *mp; xfs_bstat_t *sbp; struct file *fp = NULL, *tfp = NULL; vnode_t *vp, *tvp; static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; int ilf_fields, tilf_fields; int error = 0; xfs_ifork_t *tempifp, *ifp, *tifp; __uint64_t tmp; int aforkblks = 0; int taforkblks = 0; char locked = 0; sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL); tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); if (!sxp || !tempifp) { error = XFS_ERROR(ENOMEM); goto error0; } if (copy_from_user(sxp, sxu, sizeof(xfs_swapext_t))) { error = XFS_ERROR(EFAULT); goto error0; } /* Pull information for the target fd */ if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || ((vp = vn_from_inode(fp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } ip = xfs_vtoi(vp); if (ip == NULL) { error = XFS_ERROR(EBADF); goto error0; } if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || ((tvp = vn_from_inode(tfp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } tip = xfs_vtoi(tvp); if (tip == NULL) { error = XFS_ERROR(EBADF); goto error0; } if (ip->i_mount != tip->i_mount) { error = XFS_ERROR(EINVAL); goto error0; } if (ip->i_ino == tip->i_ino) { error = XFS_ERROR(EINVAL); goto error0; } mp = ip->i_mount; sbp = &sxp->sx_stat; if (XFS_FORCED_SHUTDOWN(mp)) { error = XFS_ERROR(EIO); goto error0; } locked = 1; /* Lock in i_ino order */ if (ip->i_ino < tip->i_ino) { ips[0] = ip; ips[1] = tip; } else { ips[0] = tip; ips[1] = ip; } xfs_lock_inodes(ips, 2, 0, lock_flags); /* Check permissions */ error = xfs_iaccess(ip, S_IWUSR, NULL); if (error) goto error0; error = xfs_iaccess(tip, S_IWUSR, NULL); if (error) goto error0; /* Verify that both files have the same format */ if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { error = XFS_ERROR(EINVAL); goto error0; } /* Verify both files are either real-time or non-realtime */ if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != (tip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { error = XFS_ERROR(EINVAL); goto error0; } /* Should never get a local format */ if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) { error = XFS_ERROR(EINVAL); goto error0; } if (VN_CACHED(tvp) != 0) { xfs_inval_cached_trace(&tip->i_iocore, 0, -1, 0, -1); VOP_FLUSHINVAL_PAGES(tvp, 0, -1, FI_REMAPF_LOCKED); } /* Verify O_DIRECT for ftmp */ if (VN_CACHED(tvp) != 0) { error = XFS_ERROR(EINVAL); goto error0; } /* Verify all data are being swapped */ if (sxp->sx_offset != 0 || sxp->sx_length != ip->i_d.di_size || sxp->sx_length != tip->i_d.di_size) { error = XFS_ERROR(EFAULT); goto error0; } /* * If the target has extended attributes, the tmp file * must also in order to ensure the correct data fork * format. */ if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) { error = XFS_ERROR(EINVAL); goto error0; } /* * Compare the current change & modify times with that * passed in. If they differ, we abort this swap. * This is the mechanism used to ensure the calling * process that the file was not changed out from * under it. */ if ((sbp->bs_ctime.tv_sec != ip->i_d.di_ctime.t_sec) || (sbp->bs_ctime.tv_nsec != ip->i_d.di_ctime.t_nsec) || (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) || (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) { error = XFS_ERROR(EBUSY); goto error0; } /* We need to fail if the file is memory mapped. Once we have tossed * all existing pages, the page fault will have no option * but to go to the filesystem for pages. By making the page fault call * VOP_READ (or write in the case of autogrow) they block on the iolock * until we have switched the extents. */ if (VN_MAPPED(vp)) { error = XFS_ERROR(EBUSY); goto error0; } xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(tip, XFS_ILOCK_EXCL); /* * There is a race condition here since we gave up the * ilock. However, the data fork will not change since * we have the iolock (locked for truncation too) so we * are safe. We don't really care if non-io related * fields change. */ VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); if ((error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0))) { xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(tip, XFS_IOLOCK_EXCL); xfs_trans_cancel(tp, 0); locked = 0; goto error0; } xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); /* * Count the number of extended attribute blocks */ if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks); if (error) { xfs_trans_cancel(tp, 0); goto error0; } } if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &taforkblks); if (error) { xfs_trans_cancel(tp, 0); goto error0; } } /* * Swap the data forks of the inodes */ ifp = &ip->i_df; tifp = &tip->i_df; *tempifp = *ifp; /* struct copy */ *ifp = *tifp; /* struct copy */ *tifp = *tempifp; /* struct copy */ /* * Fix the on-disk inode values */ tmp = (__uint64_t)ip->i_d.di_nblocks; ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; tmp = (__uint64_t) ip->i_d.di_nextents; ip->i_d.di_nextents = tip->i_d.di_nextents; tip->i_d.di_nextents = tmp; tmp = (__uint64_t) ip->i_d.di_format; ip->i_d.di_format = tip->i_d.di_format; tip->i_d.di_format = tmp; ilf_fields = XFS_ILOG_CORE; switch(ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: /* If the extents fit in the inode, fix the * pointer. Otherwise it's already NULL or * pointing to the extent. */ if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) { ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; } ilf_fields |= XFS_ILOG_DEXT; break; case XFS_DINODE_FMT_BTREE: ilf_fields |= XFS_ILOG_DBROOT; break; } tilf_fields = XFS_ILOG_CORE; switch(tip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: /* If the extents fit in the inode, fix the * pointer. Otherwise it's already NULL or * pointing to the extent. */ if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) { tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext; } tilf_fields |= XFS_ILOG_DEXT; break; case XFS_DINODE_FMT_BTREE: tilf_fields |= XFS_ILOG_DBROOT; break; } /* * Increment vnode ref counts since xfs_trans_commit & * xfs_trans_cancel will both unlock the inodes and * decrement the associated ref counts. */ VN_HOLD(vp); VN_HOLD(tvp); xfs_trans_ijoin(tp, ip, lock_flags); xfs_trans_ijoin(tp, tip, lock_flags); xfs_trans_log_inode(tp, ip, ilf_fields); xfs_trans_log_inode(tp, tip, tilf_fields); /* * If this is a synchronous mount, make sure that the * transaction goes to disk before returning to the user. */ if (mp->m_flags & XFS_MOUNT_WSYNC) { xfs_trans_set_sync(tp); } error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT, NULL); locked = 0; error0: if (locked) { xfs_iunlock(ip, lock_flags); xfs_iunlock(tip, lock_flags); } if (fp != NULL) fput(fp); if (tfp != NULL) fput(tfp); if (sxp != NULL) kmem_free(sxp, sizeof(xfs_swapext_t)); if (tempifp != NULL) kmem_free(tempifp, sizeof(xfs_ifork_t)); return error; }
/* * xfs sync routine for internal use * * This routine supports all of the flags defined for the generic VFS_SYNC * interface as explained above under xfs_sync. In the interests of not * changing interfaces within the 6.5 family, additional internallly- * required functions are specified within a separate xflags parameter, * only available by calling this routine. * */ STATIC int xfs_sync_inodes( xfs_mount_t *mp, int flags, int xflags, int *bypassed) { xfs_inode_t *ip = NULL; xfs_inode_t *ip_next; xfs_buf_t *bp; vnode_t *vp = NULL; vmap_t vmap; int error; int last_error; uint64_t fflag; uint lock_flags; uint base_lock_flags; boolean_t mount_locked; boolean_t vnode_refed; int preempt; xfs_dinode_t *dip; xfs_iptr_t *ipointer; #ifdef DEBUG boolean_t ipointer_in = B_FALSE; #define IPOINTER_SET ipointer_in = B_TRUE #define IPOINTER_CLR ipointer_in = B_FALSE #else #define IPOINTER_SET #define IPOINTER_CLR #endif /* Insert a marker record into the inode list after inode ip. The list * must be locked when this is called. After the call the list will no * longer be locked. */ #define IPOINTER_INSERT(ip, mp) { \ ASSERT(ipointer_in == B_FALSE); \ ipointer->ip_mnext = ip->i_mnext; \ ipointer->ip_mprev = ip; \ ip->i_mnext = (xfs_inode_t *)ipointer; \ ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \ preempt = 0; \ XFS_MOUNT_IUNLOCK(mp); \ mount_locked = B_FALSE; \ IPOINTER_SET; \ } /* Remove the marker from the inode list. If the marker was the only item * in the list then there are no remaining inodes and we should zero out * the whole list. If we are the current head of the list then move the head * past us. */ #define IPOINTER_REMOVE(ip, mp) { \ ASSERT(ipointer_in == B_TRUE); \ if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \ ip = ipointer->ip_mnext; \ ip->i_mprev = ipointer->ip_mprev; \ ipointer->ip_mprev->i_mnext = ip; \ if (mp->m_inodes == (xfs_inode_t *)ipointer) { \ mp->m_inodes = ip; \ } \ } else { \ ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \ mp->m_inodes = NULL; \ ip = NULL; \ } \ IPOINTER_CLR; \ } #define XFS_PREEMPT_MASK 0x7f if (bypassed) *bypassed = 0; if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) return 0; error = 0; last_error = 0; preempt = 0; /* Allocate a reference marker */ ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); fflag = XFS_B_ASYNC; /* default is don't wait */ if (flags & SYNC_BDFLUSH) fflag = XFS_B_DELWRI; if (flags & SYNC_WAIT) fflag = 0; /* synchronous overrides all */ base_lock_flags = XFS_ILOCK_SHARED; if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { /* * We need the I/O lock if we're going to call any of * the flush/inval routines. */ base_lock_flags |= XFS_IOLOCK_SHARED; } XFS_MOUNT_ILOCK(mp); ip = mp->m_inodes; mount_locked = B_TRUE; vnode_refed = B_FALSE; IPOINTER_CLR; do { ASSERT(ipointer_in == B_FALSE); ASSERT(vnode_refed == B_FALSE); lock_flags = base_lock_flags; /* * There were no inodes in the list, just break out * of the loop. */ if (ip == NULL) { break; } /* * We found another sync thread marker - skip it */ if (ip->i_mount == NULL) { ip = ip->i_mnext; continue; } vp = XFS_ITOV_NULL(ip); /* * If the vnode is gone then this is being torn down, * call reclaim if it is flushed, else let regular flush * code deal with it later in the loop. */ if (vp == NULL) { /* Skip ones already in reclaim */ if (ip->i_flags & XFS_IRECLAIM) { ip = ip->i_mnext; continue; } if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { ip = ip->i_mnext; } else if ((xfs_ipincount(ip) == 0) && xfs_iflock_nowait(ip)) { IPOINTER_INSERT(ip, mp); xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); XFS_MOUNT_ILOCK(mp); mount_locked = B_TRUE; IPOINTER_REMOVE(ip, mp); } else { xfs_iunlock(ip, XFS_ILOCK_EXCL); ip = ip->i_mnext; } continue; } if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { XFS_MOUNT_IUNLOCK(mp); kmem_free(ipointer, sizeof(xfs_iptr_t)); return 0; } /* * If this is just vfs_sync() or pflushd() calling * then we can skip inodes for which it looks like * there is nothing to do. Since we don't have the * inode locked this is racey, but these are periodic * calls so it doesn't matter. For the others we want * to know for sure, so we at least try to lock them. */ if (flags & SYNC_BDFLUSH) { if (((ip->i_itemp == NULL) || !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) && (ip->i_update_core == 0)) { ip = ip->i_mnext; continue; } } /* * Try to lock without sleeping. We're out of order with * the inode list lock here, so if we fail we need to drop * the mount lock and try again. If we're called from * bdflush() here, then don't bother. * * The inode lock here actually coordinates with the * almost spurious inode lock in xfs_ireclaim() to prevent * the vnode we handle here without a reference from * being freed while we reference it. If we lock the inode * while it's on the mount list here, then the spurious inode * lock in xfs_ireclaim() after the inode is pulled from * the mount list will sleep until we release it here. * This keeps the vnode from being freed while we reference * it. It is also cheaper and simpler than actually doing * a vn_get() for every inode we touch here. */ if (xfs_ilock_nowait(ip, lock_flags) == 0) { if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { ip = ip->i_mnext; continue; } /* * We need to unlock the inode list lock in order * to lock the inode. Insert a marker record into * the inode list to remember our position, dropping * the lock is now done inside the IPOINTER_INSERT * macro. * * We also use the inode list lock to protect us * in taking a snapshot of the vnode version number * for use in calling vn_get(). */ VMAP(vp, vmap); IPOINTER_INSERT(ip, mp); vp = vn_get(vp, &vmap); if (vp == NULL) { /* * The vnode was reclaimed once we let go * of the inode list lock. Skip to the * next list entry. Remove the marker. */ XFS_MOUNT_ILOCK(mp); mount_locked = B_TRUE; vnode_refed = B_FALSE; IPOINTER_REMOVE(ip, mp); continue; } xfs_ilock(ip, lock_flags); ASSERT(vp == XFS_ITOV(ip)); ASSERT(ip->i_mount == mp); vnode_refed = B_TRUE; } /* From here on in the loop we may have a marker record * in the inode list. */ if ((flags & SYNC_CLOSE) && (vp != NULL)) { /* * This is the shutdown case. We just need to * flush and invalidate all the pages associated * with the inode. Drop the inode lock since * we can't hold it across calls to the buffer * cache. * * We don't set the VREMAPPING bit in the vnode * here, because we don't hold the vnode lock * exclusively. It doesn't really matter, though, * because we only come here when we're shutting * down anyway. */ xfs_iunlock(ip, XFS_ILOCK_SHARED); if (XFS_FORCED_SHUTDOWN(mp)) { VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); } else { VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF); } xfs_ilock(ip, XFS_ILOCK_SHARED); } else if ((flags & SYNC_DELWRI) && (vp != NULL)) { if (VN_DIRTY(vp)) { /* We need to have dropped the lock here, * so insert a marker if we have not already * done so. */ if (mount_locked) { IPOINTER_INSERT(ip, mp); } /* * Drop the inode lock since we can't hold it * across calls to the buffer cache. */ xfs_iunlock(ip, XFS_ILOCK_SHARED); VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, fflag, FI_NONE, error); xfs_ilock(ip, XFS_ILOCK_SHARED); } } if (flags & SYNC_BDFLUSH) { if ((flags & SYNC_ATTR) && ((ip->i_update_core) || ((ip->i_itemp != NULL) && (ip->i_itemp->ili_format.ilf_fields != 0)))) { /* Insert marker and drop lock if not already * done. */ if (mount_locked) { IPOINTER_INSERT(ip, mp); } /* * We don't want the periodic flushing of the * inodes by vfs_sync() to interfere with * I/O to the file, especially read I/O * where it is only the access time stamp * that is being flushed out. To prevent * long periods where we have both inode * locks held shared here while reading the * inode's buffer in from disk, we drop the * inode lock while reading in the inode * buffer. We have to release the buffer * and reacquire the inode lock so that they * are acquired in the proper order (inode * locks first). The buffer will go at the * end of the lru chain, though, so we can * expect it to still be there when we go * for it again in xfs_iflush(). */ if ((xfs_ipincount(ip) == 0) && xfs_iflock_nowait(ip)) { xfs_ifunlock(ip); xfs_iunlock(ip, XFS_ILOCK_SHARED); error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); if (!error) { xfs_buf_relse(bp); } else { /* Bailing out, remove the * marker and free it. */ XFS_MOUNT_ILOCK(mp); IPOINTER_REMOVE(ip, mp); XFS_MOUNT_IUNLOCK(mp); ASSERT(!(lock_flags & XFS_IOLOCK_SHARED)); kmem_free(ipointer, sizeof(xfs_iptr_t)); return (0); } /* * Since we dropped the inode lock, * the inode may have been reclaimed. * Therefore, we reacquire the mount * lock and check to see if we were the * inode reclaimed. If this happened * then the ipointer marker will no * longer point back at us. In this * case, move ip along to the inode * after the marker, remove the marker * and continue. */ XFS_MOUNT_ILOCK(mp); mount_locked = B_TRUE; if (ip != ipointer->ip_mprev) { IPOINTER_REMOVE(ip, mp); ASSERT(!vnode_refed); ASSERT(!(lock_flags & XFS_IOLOCK_SHARED)); continue; } ASSERT(ip->i_mount == mp); if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED) == 0) { ASSERT(ip->i_mount == mp); /* * We failed to reacquire * the inode lock without * sleeping, so just skip * the inode for now. We * clear the ILOCK bit from * the lock_flags so that we * won't try to drop a lock * we don't hold below. */ lock_flags &= ~XFS_ILOCK_SHARED; IPOINTER_REMOVE(ip_next, mp); } else if ((xfs_ipincount(ip) == 0) && xfs_iflock_nowait(ip)) { ASSERT(ip->i_mount == mp); /* * Since this is vfs_sync() * calling we only flush the * inode out if we can lock * it without sleeping and * it is not pinned. Drop * the mount lock here so * that we don't hold it for * too long. We already have * a marker in the list here. */ XFS_MOUNT_IUNLOCK(mp); mount_locked = B_FALSE; error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); } else { ASSERT(ip->i_mount == mp); IPOINTER_REMOVE(ip_next, mp); } } } } else { if ((flags & SYNC_ATTR) && ((ip->i_update_core) || ((ip->i_itemp != NULL) && (ip->i_itemp->ili_format.ilf_fields != 0)))) { if (mount_locked) { IPOINTER_INSERT(ip, mp); } if (flags & SYNC_WAIT) { xfs_iflock(ip); error = xfs_iflush(ip, XFS_IFLUSH_SYNC); } else { /* * If we can't acquire the flush * lock, then the inode is already * being flushed so don't bother * waiting. If we can lock it then * do a delwri flush so we can * combine multiple inode flushes * in each disk write. */ if (xfs_iflock_nowait(ip)) { error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); } else if (bypassed) (*bypassed)++; } } } if (lock_flags != 0) { xfs_iunlock(ip, lock_flags); } if (vnode_refed) { /* * If we had to take a reference on the vnode * above, then wait until after we've unlocked * the inode to release the reference. This is * because we can be already holding the inode * lock when VN_RELE() calls xfs_inactive(). * * Make sure to drop the mount lock before calling * VN_RELE() so that we don't trip over ourselves if * we have to go for the mount lock again in the * inactive code. */ if (mount_locked) { IPOINTER_INSERT(ip, mp); } VN_RELE(vp); vnode_refed = B_FALSE; } if (error) { last_error = error; } /* * bail out if the filesystem is corrupted. */ if (error == EFSCORRUPTED) { if (!mount_locked) { XFS_MOUNT_ILOCK(mp); IPOINTER_REMOVE(ip, mp); } XFS_MOUNT_IUNLOCK(mp); ASSERT(ipointer_in == B_FALSE); kmem_free(ipointer, sizeof(xfs_iptr_t)); return XFS_ERROR(error); } /* Let other threads have a chance at the mount lock * if we have looped many times without dropping the * lock. */ if ((++preempt & XFS_PREEMPT_MASK) == 0) { if (mount_locked) { IPOINTER_INSERT(ip, mp); } } if (mount_locked == B_FALSE) { XFS_MOUNT_ILOCK(mp); mount_locked = B_TRUE; IPOINTER_REMOVE(ip, mp); continue; } ASSERT(ipointer_in == B_FALSE); ip = ip->i_mnext; } while (ip != mp->m_inodes); XFS_MOUNT_IUNLOCK(mp); ASSERT(ipointer_in == B_FALSE); kmem_free(ipointer, sizeof(xfs_iptr_t)); return XFS_ERROR(last_error); }