Esempio n. 1
0
void
vsetobjdirty(struct vnode *vp)
{
	struct syncer_ctx *ctx;

	if ((vp->v_flag & VOBJDIRTY) == 0) {
		ctx = vp->v_mount->mnt_syncer_ctx;
		vsetflags(vp, VOBJDIRTY);
		lwkt_gettoken(&ctx->sc_token);
		if ((vp->v_flag & VONWORKLST) == 0)
			vn_syncer_add(vp, syncdelay);
		lwkt_reltoken(&ctx->sc_token);
	}
}
Esempio n. 2
0
/*
 * Create a new filesystem syncer vnode for the specified mount point.
 * This vnode is placed on the worklist and is responsible for sync'ing
 * the filesystem.
 *
 * NOTE: read-only mounts are also placed on the worklist.  The filesystem
 * sync code is also responsible for cleaning up vnodes.
 */
int
vfs_allocate_syncvnode(struct mount *mp)
{
	struct vnode *vp;
	static long start, incr, next;
	int error;

	/* Allocate a new vnode */
	error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
	if (error) {
		mp->mnt_syncer = NULL;
		return (error);
	}
	vp->v_type = VNON;
	/*
	 * Place the vnode onto the syncer worklist. We attempt to
	 * scatter them about on the list so that they will go off
	 * at evenly distributed times even if all the filesystems
	 * are mounted at once.
	 */
	next += incr;
	if (next == 0 || next > SYNCER_MAXDELAY) {
		start /= 2;
		incr /= 2;
		if (start == 0) {
			start = SYNCER_MAXDELAY / 2;
			incr = SYNCER_MAXDELAY;
		}
		next = start;
	}

	/*
	 * Only put the syncer vnode onto the syncer list if we have a
	 * syncer thread.  Some VFS's (aka NULLFS) don't need a syncer
	 * thread.
	 */
	if (mp->mnt_syncer_ctx)
		vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);

	/*
	 * The mnt_syncer field inherits the vnode reference, which is
	 * held until later decomissioning.
	 */
	mp->mnt_syncer = vp;
	vx_unlock(vp);
	return (0);
}
Esempio n. 3
0
/*
 * Do a lazy sync of the filesystem.
 *
 * sync_fsync { struct vnode *a_vp, int a_waitfor }
 */
static int
sync_fsync(struct vop_fsync_args *ap)
{
	struct vnode *syncvp = ap->a_vp;
	struct mount *mp = syncvp->v_mount;
	int asyncflag;

	/*
	 * We only need to do something if this is a lazy evaluation.
	 */
	if ((ap->a_waitfor & MNT_LAZY) == 0)
		return (0);

	/*
	 * Move ourselves to the back of the sync list.
	 */
	vn_syncer_add(syncvp, syncdelay);

	/*
	 * Walk the list of vnodes pushing all that are dirty and
	 * not already on the sync list, and freeing vnodes which have
	 * no refs and whos VM objects are empty.  vfs_msync() handles
	 * the VM issues and must be called whether the mount is readonly
	 * or not.
	 */
	if (vfs_busy(mp, LK_NOWAIT) != 0)
		return (0);
	if (mp->mnt_flag & MNT_RDONLY) {
		vfs_msync(mp, MNT_NOWAIT);
	} else {
		asyncflag = mp->mnt_flag & MNT_ASYNC;
		mp->mnt_flag &= ~MNT_ASYNC;	/* ZZZ hack */
		vfs_msync(mp, MNT_NOWAIT);
		VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
		if (asyncflag)
			mp->mnt_flag |= MNT_ASYNC;
	}
	vfs_unbusy(mp);
	return (0);
}
Esempio n. 4
0
/*
 * System filesystem synchronizer daemon.
 */
static void
syncer_thread(void *_ctx)
{
	struct thread *td = curthread;
	struct syncer_ctx *ctx = _ctx;
	struct synclist *slp;
	struct vnode *vp;
	long starttime;
	int *sc_flagsp;
	int sc_flags;
	int vnodes_synced = 0;

	/*
	 * syncer0 runs till system shutdown; per-filesystem syncers are
	 * terminated on filesystem unmount
	 */
	if (ctx == &syncer_ctx0) 
		EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
				      SHUTDOWN_PRI_LAST);
	for (;;) {
		kproc_suspend_loop();

		starttime = time_second;
		lwkt_gettoken(&ctx->sc_token);

		/*
		 * Push files whose dirty time has expired.  Be careful
		 * of interrupt race on slp queue.
		 */
		slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
		ctx->syncer_delayno += 1;
		if (ctx->syncer_delayno == syncer_maxdelay)
			ctx->syncer_delayno = 0;

		while ((vp = LIST_FIRST(slp)) != NULL) {
			if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
				VOP_FSYNC(vp, MNT_LAZY, 0);
				vput(vp);
				vnodes_synced++;
			}

			/*
			 * vp is stale but can still be used if we can
			 * verify that it remains at the head of the list.
			 * Be careful not to try to get vp->v_token as
			 * vp can become stale if this blocks.
			 *
			 * If the vp is still at the head of the list were
			 * unable to completely flush it and move it to
			 * a later slot to give other vnodes a fair shot.
			 *
			 * Note that v_tag VT_VFS vnodes can remain on the
			 * worklist with no dirty blocks, but sync_fsync()
			 * moves it to a later slot so we will never see it
			 * here.
			 *
			 * It is possible to race a vnode with no dirty
			 * buffers being removed from the list.  If this
			 * occurs we will move the vnode in the synclist
			 * and then the other thread will remove it.  Do
			 * not try to remove it here.
			 */
			if (LIST_FIRST(slp) == vp)
				vn_syncer_add(vp, syncdelay);
		}

		sc_flags = ctx->sc_flags;

		/* Exit on unmount */
		if (sc_flags & SC_FLAG_EXIT)
			break;

		lwkt_reltoken(&ctx->sc_token);

		/*
		 * Do sync processing for each mount.
		 */
		if (ctx->sc_mp || sc_flags & SC_FLAG_BIOOPS_ALL)
			bio_ops_sync(ctx->sc_mp);

		/*
		 * The variable rushjob allows the kernel to speed up the
		 * processing of the filesystem syncer process. A rushjob
		 * value of N tells the filesystem syncer to process the next
		 * N seconds worth of work on its queue ASAP. Currently rushjob
		 * is used by the soft update code to speed up the filesystem
		 * syncer process when the incore state is getting so far
		 * ahead of the disk that the kernel memory pool is being
		 * threatened with exhaustion.
		 */
		if (ctx == &syncer_ctx0 && rushjob > 0) {
			atomic_subtract_int(&rushjob, 1);
			continue;
		}
		/*
		 * If it has taken us less than a second to process the
		 * current work, then wait. Otherwise start right over
		 * again. We can still lose time if any single round
		 * takes more than two seconds, but it does not really
		 * matter as we are just trying to generally pace the
		 * filesystem activity.
		 */
		if (time_second == starttime)
			tsleep(ctx, 0, "syncer", hz);
	}

	/*
	 * Unmount/exit path for per-filesystem syncers; sc_token held
	 */
	ctx->sc_flags |= SC_FLAG_DONE;
	sc_flagsp = &ctx->sc_flags;
	lwkt_reltoken(&ctx->sc_token);
	wakeup(sc_flagsp);

	kthread_exit();
}
Esempio n. 5
0
/*
 * This is very similar to vmntvnodescan() but it only scans the
 * vnodes on the syncer list.  VFS's which support faster VFS_SYNC
 * operations use the VISDIRTY flag on the vnode to ensure that vnodes
 * with dirty inodes are added to the syncer in addition to vnodes
 * with dirty buffers, and can use this function instead of nmntvnodescan().
 * 
 * This is important when a system has millions of vnodes.
 */
int
vsyncscan(
    struct mount *mp,
    int vmsc_flags,
    int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
    void *data
) {
	struct syncer_ctx *ctx;
	struct synclist *slp;
	struct vnode *vp;
	int b;
	int i;
	int lkflags;

	if (vmsc_flags & VMSC_NOWAIT)
		lkflags = LK_NOWAIT;
	else
		lkflags = 0;

	/*
	 * Syncer list context.  This API requires a dedicated syncer thread.
	 * (MNTK_THR_SYNC).
	 */
	KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
	ctx = mp->mnt_syncer_ctx;
	lwkt_gettoken(&ctx->sc_token);

	/*
	 * Setup for loop.  Allow races against the syncer thread but
	 * require that the syncer thread no be lazy if we were told
	 * not to be lazy.
	 */
	b = ctx->syncer_delayno & ctx->syncer_mask;
	i = b;
	if ((vmsc_flags & VMSC_NOWAIT) == 0)
		++ctx->syncer_forced;

	do {
		slp = &ctx->syncer_workitem_pending[i];

		while ((vp = LIST_FIRST(slp)) != NULL) {
			KKASSERT(vp->v_mount == mp);
			if (vmsc_flags & VMSC_GETVP) {
				if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) {
					slowfunc(mp, vp, data);
					vput(vp);
				}
			} else if (vmsc_flags & VMSC_GETVX) {
				vx_get(vp);
				slowfunc(mp, vp, data);
				vx_put(vp);
			} else {
				vhold(vp);
				slowfunc(mp, vp, data);
				vdrop(vp);
			}
			if (LIST_FIRST(slp) == vp)
				vn_syncer_add(vp, -(i + syncdelay));
		}
		i = (i + 1) & ctx->syncer_mask;
	} while (i != b);

	if ((vmsc_flags & VMSC_NOWAIT) == 0)
		--ctx->syncer_forced;
	lwkt_reltoken(&ctx->sc_token);
	return(0);
}