Esempio n. 1
0
/*
 *  Go through the rigmarole of shutting down..
 * this used to be in machdep.c but I'll be dammned if I could see
 * anything machine dependant in it.
 */
static void
boot(int howto)
{
	/*
	 * Get rid of any user scheduler baggage and then give
	 * us a high priority.
	 */
	if (curthread->td_release)
		curthread->td_release(curthread);
	lwkt_setpri_self(TDPRI_MAX);

	/* collect extra flags that shutdown_nice might have set */
	howto |= shutdown_howto;

#ifdef SMP
	/*
	 * We really want to shutdown on the BSP.  Subsystems such as ACPI
	 * can't power-down the box otherwise.
	 */
	if (smp_active_mask > 1) {
		kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid);
	}
	if (panicstr == NULL && mycpu->gd_cpuid != 0) {
		kprintf("Switching to cpu #0 for shutdown\n");
		lwkt_setcpu_self(globaldata_find(0));
	}
#endif
	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/*
	 * Try to get rid of any remaining FS references.  The calling
	 * process, proc0, and init may still hold references.  The
	 * VFS cache subsystem may still hold a root reference to root.
	 *
	 * XXX this needs work.  We really need to SIGSTOP all remaining
	 * processes in order to avoid blowups due to proc0's filesystem
	 * references going away.  For now just make sure that the init
	 * process is stopped.
	 */
	if (panicstr == NULL) {
		shutdown_cleanup_proc(curproc);
		shutdown_cleanup_proc(&proc0);
		if (initproc) {
			if (initproc != curproc) {
				ksignal(initproc, SIGSTOP);
				tsleep(boot, 0, "shutdn", hz / 20);
			}
			shutdown_cleanup_proc(initproc);
		}
		vfs_cache_setroot(NULL, NULL);
	}

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
		int iter, nbusy, pbusy;

		waittime = 0;
		kprintf("\nsyncing disks... ");

		sys_sync(NULL);	/* YYY was sync(&proc0, NULL). why proc0 ? */

		/*
		 * With soft updates, some buffers that are
		 * written will be remarked as dirty until other
		 * buffers are written.
		 */
		for (iter = pbusy = 0; iter < 20; iter++) {
			nbusy = scan_all_buffers(shutdown_busycount1, NULL);
			if (nbusy == 0)
				break;
			kprintf("%d ", nbusy);
			if (nbusy < pbusy)
				iter = 0;
			pbusy = nbusy;
			/*
			 * XXX:
			 * Process soft update work queue if buffers don't sync
			 * after 6 iterations by permitting the syncer to run.
			 */
			if (iter > 5)
				bio_ops_sync(NULL);
 
			sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */
			tsleep(boot, 0, "shutdn", hz * iter / 20 + 1);
		}
		kprintf("\n");
		/*
		 * Count only busy local buffers to prevent forcing 
		 * a fsck if we're just a client of a wedged NFS server
		 */
		nbusy = scan_all_buffers(shutdown_busycount2, NULL);
		if (nbusy) {
			/*
			 * Failed to sync all blocks. Indicate this and don't
			 * unmount filesystems (thus forcing an fsck on reboot).
			 */
			kprintf("giving up on %d buffers\n", nbusy);
#ifdef DDB
			if (debugger_on_panic)
				Debugger("busy buffer problem");
#endif /* DDB */
			tsleep(boot, 0, "shutdn", hz * 5 + 1);
		} else {
			kprintf("done\n");
			/*
			 * Unmount filesystems
			 */
			if (panicstr == NULL)
				vfs_unmountall();
		}
		tsleep(boot, 0, "shutdn", hz / 10 + 1);
	}

	print_uptime();

	/*
	 * Dump before doing post_sync shutdown ops
	 */
	crit_enter();
	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold) {
		dumpsys();
	}

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.  This will also call the device shutdown
	 * methods.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Esempio n. 2
0
/*
 * System filesystem synchronizer daemon.
 */
static void
syncer_thread(void *_ctx)
{
	struct thread *td = curthread;
	struct syncer_ctx *ctx = _ctx;
	struct synclist *slp;
	struct vnode *vp;
	long starttime;
	int *sc_flagsp;
	int sc_flags;
	int vnodes_synced = 0;

	/*
	 * syncer0 runs till system shutdown; per-filesystem syncers are
	 * terminated on filesystem unmount
	 */
	if (ctx == &syncer_ctx0) 
		EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
				      SHUTDOWN_PRI_LAST);
	for (;;) {
		kproc_suspend_loop();

		starttime = time_second;
		lwkt_gettoken(&ctx->sc_token);

		/*
		 * Push files whose dirty time has expired.  Be careful
		 * of interrupt race on slp queue.
		 */
		slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
		ctx->syncer_delayno += 1;
		if (ctx->syncer_delayno == syncer_maxdelay)
			ctx->syncer_delayno = 0;

		while ((vp = LIST_FIRST(slp)) != NULL) {
			if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
				VOP_FSYNC(vp, MNT_LAZY, 0);
				vput(vp);
				vnodes_synced++;
			}

			/*
			 * vp is stale but can still be used if we can
			 * verify that it remains at the head of the list.
			 * Be careful not to try to get vp->v_token as
			 * vp can become stale if this blocks.
			 *
			 * If the vp is still at the head of the list were
			 * unable to completely flush it and move it to
			 * a later slot to give other vnodes a fair shot.
			 *
			 * Note that v_tag VT_VFS vnodes can remain on the
			 * worklist with no dirty blocks, but sync_fsync()
			 * moves it to a later slot so we will never see it
			 * here.
			 *
			 * It is possible to race a vnode with no dirty
			 * buffers being removed from the list.  If this
			 * occurs we will move the vnode in the synclist
			 * and then the other thread will remove it.  Do
			 * not try to remove it here.
			 */
			if (LIST_FIRST(slp) == vp)
				vn_syncer_add(vp, syncdelay);
		}

		sc_flags = ctx->sc_flags;

		/* Exit on unmount */
		if (sc_flags & SC_FLAG_EXIT)
			break;

		lwkt_reltoken(&ctx->sc_token);

		/*
		 * Do sync processing for each mount.
		 */
		if (ctx->sc_mp || sc_flags & SC_FLAG_BIOOPS_ALL)
			bio_ops_sync(ctx->sc_mp);

		/*
		 * The variable rushjob allows the kernel to speed up the
		 * processing of the filesystem syncer process. A rushjob
		 * value of N tells the filesystem syncer to process the next
		 * N seconds worth of work on its queue ASAP. Currently rushjob
		 * is used by the soft update code to speed up the filesystem
		 * syncer process when the incore state is getting so far
		 * ahead of the disk that the kernel memory pool is being
		 * threatened with exhaustion.
		 */
		if (ctx == &syncer_ctx0 && rushjob > 0) {
			atomic_subtract_int(&rushjob, 1);
			continue;
		}
		/*
		 * If it has taken us less than a second to process the
		 * current work, then wait. Otherwise start right over
		 * again. We can still lose time if any single round
		 * takes more than two seconds, but it does not really
		 * matter as we are just trying to generally pace the
		 * filesystem activity.
		 */
		if (time_second == starttime)
			tsleep(ctx, 0, "syncer", hz);
	}

	/*
	 * Unmount/exit path for per-filesystem syncers; sc_token held
	 */
	ctx->sc_flags |= SC_FLAG_DONE;
	sc_flagsp = &ctx->sc_flags;
	lwkt_reltoken(&ctx->sc_token);
	wakeup(sc_flagsp);

	kthread_exit();
}