Example #1
0
/*
 * Simplified back end of syscall(), used when returning from fork()
 * directly into user mode.
 *
 * This code will return back into the fork trampoline code which then
 * runs doreti.
 */
void
generic_lwp_return(struct lwp *lp, struct trapframe *frame)
{
	struct proc *p = lp->lwp_proc;

	/*
	 * Check for exit-race.  If one lwp exits the process concurrent with
	 * another lwp creating a new thread, the two operations may cross
	 * each other resulting in the newly-created lwp not receiving a
	 * KILL signal.
	 */
	if (p->p_flags & P_WEXIT) {
		lwpsignal(p, lp, SIGKILL);
	}

	/*
	 * Newly forked processes are given a kernel priority.  We have to
	 * adjust the priority to a normal user priority and fake entry
	 * into the kernel (call userenter()) to install a passive release
	 * function just in case userret() decides to stop the process.  This
	 * can occur when ^Z races a fork.  If we do not install the passive
	 * release function the current process designation will not be
	 * released when the thread goes to sleep.
	 */
	lwkt_setpri_self(TDPRI_USER_NORM);
	userenter(lp->lwp_thread, p);
	userret(lp, frame, 0);
#ifdef KTRACE
	if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
		ktrsysret(lp, SYS_fork, 0, 0);
#endif
	lp->lwp_flags |= LWP_PASSIVE_ACQ;
	userexit(lp);
	lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
}
Example #2
0
/*
 * Simplified back end of syscall(), used when returning from fork()
 * directly into user mode.
 *
 * This code will return back into the fork trampoline code which then
 * runs doreti.
 *
 * NOTE: The mplock is not held at any point.
 */
void
generic_lwp_return(struct lwp *lp, struct trapframe *frame)
{
	struct proc *p = lp->lwp_proc;

	/*
	 * Newly forked processes are given a kernel priority.  We have to
	 * adjust the priority to a normal user priority and fake entry
	 * into the kernel (call userenter()) to install a passive release
	 * function just in case userret() decides to stop the process.  This
	 * can occur when ^Z races a fork.  If we do not install the passive
	 * release function the current process designation will not be
	 * released when the thread goes to sleep.
	 */
	lwkt_setpri_self(TDPRI_USER_NORM);
	userenter(lp->lwp_thread, p);
	userret(lp, frame, 0);
#ifdef KTRACE
	if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
		ktrsysret(lp, SYS_fork, 0, 0);
#endif
	lp->lwp_flags |= LWP_PASSIVE_ACQ;
	userexit(lp);
	lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
}
Example #3
0
__stdcall __regcall void 
hal_lower_irql(REGARGS1(uint8_t oldirql))
{
	if (oldirql == DISPATCH_LEVEL)
		return;

	if (hal_irql() != DISPATCH_LEVEL)
		panic("IRQL_NOT_GREATER_THAN");

	lwkt_setpri_self(oldirql);
}
Example #4
0
__stdcall __regcall uint8_t
hal_raise_irql(REGARGS1(uint8_t irql))
{
	uint8_t			oldirql;

	if (irql < hal_irql())
		panic("IRQL_NOT_LESS_THAN");

	if (hal_irql() == DISPATCH_LEVEL)
		return(DISPATCH_LEVEL);

	oldirql = lwkt_getpri_self();
	lwkt_setpri_self(TDPRI_INT_HIGH);

	return(oldirql);
}
Example #5
0
/*
 *  Go through the rigmarole of shutting down..
 * this used to be in machdep.c but I'll be dammned if I could see
 * anything machine dependant in it.
 */
static void
boot(int howto)
{
	/*
	 * Get rid of any user scheduler baggage and then give
	 * us a high priority.
	 */
	if (curthread->td_release)
		curthread->td_release(curthread);
	lwkt_setpri_self(TDPRI_MAX);

	/* collect extra flags that shutdown_nice might have set */
	howto |= shutdown_howto;

#ifdef SMP
	/*
	 * We really want to shutdown on the BSP.  Subsystems such as ACPI
	 * can't power-down the box otherwise.
	 */
	if (smp_active_mask > 1) {
		kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid);
	}
	if (panicstr == NULL && mycpu->gd_cpuid != 0) {
		kprintf("Switching to cpu #0 for shutdown\n");
		lwkt_setcpu_self(globaldata_find(0));
	}
#endif
	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/*
	 * Try to get rid of any remaining FS references.  The calling
	 * process, proc0, and init may still hold references.  The
	 * VFS cache subsystem may still hold a root reference to root.
	 *
	 * XXX this needs work.  We really need to SIGSTOP all remaining
	 * processes in order to avoid blowups due to proc0's filesystem
	 * references going away.  For now just make sure that the init
	 * process is stopped.
	 */
	if (panicstr == NULL) {
		shutdown_cleanup_proc(curproc);
		shutdown_cleanup_proc(&proc0);
		if (initproc) {
			if (initproc != curproc) {
				ksignal(initproc, SIGSTOP);
				tsleep(boot, 0, "shutdn", hz / 20);
			}
			shutdown_cleanup_proc(initproc);
		}
		vfs_cache_setroot(NULL, NULL);
	}

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
		int iter, nbusy, pbusy;

		waittime = 0;
		kprintf("\nsyncing disks... ");

		sys_sync(NULL);	/* YYY was sync(&proc0, NULL). why proc0 ? */

		/*
		 * With soft updates, some buffers that are
		 * written will be remarked as dirty until other
		 * buffers are written.
		 */
		for (iter = pbusy = 0; iter < 20; iter++) {
			nbusy = scan_all_buffers(shutdown_busycount1, NULL);
			if (nbusy == 0)
				break;
			kprintf("%d ", nbusy);
			if (nbusy < pbusy)
				iter = 0;
			pbusy = nbusy;
			/*
			 * XXX:
			 * Process soft update work queue if buffers don't sync
			 * after 6 iterations by permitting the syncer to run.
			 */
			if (iter > 5)
				bio_ops_sync(NULL);
 
			sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */
			tsleep(boot, 0, "shutdn", hz * iter / 20 + 1);
		}
		kprintf("\n");
		/*
		 * Count only busy local buffers to prevent forcing 
		 * a fsck if we're just a client of a wedged NFS server
		 */
		nbusy = scan_all_buffers(shutdown_busycount2, NULL);
		if (nbusy) {
			/*
			 * Failed to sync all blocks. Indicate this and don't
			 * unmount filesystems (thus forcing an fsck on reboot).
			 */
			kprintf("giving up on %d buffers\n", nbusy);
#ifdef DDB
			if (debugger_on_panic)
				Debugger("busy buffer problem");
#endif /* DDB */
			tsleep(boot, 0, "shutdn", hz * 5 + 1);
		} else {
			kprintf("done\n");
			/*
			 * Unmount filesystems
			 */
			if (panicstr == NULL)
				vfs_unmountall();
		}
		tsleep(boot, 0, "shutdn", hz / 10 + 1);
	}

	print_uptime();

	/*
	 * Dump before doing post_sync shutdown ops
	 */
	crit_enter();
	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold) {
		dumpsys();
	}

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.  This will also call the device shutdown
	 * methods.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Example #6
0
/*
 * MPSAFE thread
 */
static void
vm_pagezero(void *arg)
{
	vm_page_t m = NULL;
	struct lwbuf *lwb = NULL;
	struct lwbuf lwb_cache;
	enum zeroidle_state state = STATE_IDLE;
	char *pg = NULL;
	int npages = 0;
	int sleep_time;	
	int i = 0;
	int cpu = (int)(intptr_t)arg;
	int zero_state = 0;

	/*
	 * Adjust thread parameters before entering our loop.  The thread
	 * is started with the MP lock held and with normal kernel thread
	 * priority.
	 *
	 * Also put us on the last cpu for now.
	 *
	 * For now leave the MP lock held, the VM routines cannot be called
	 * with it released until tokenization is finished.
	 */
	lwkt_setpri_self(TDPRI_IDLE_WORK);
	lwkt_setcpu_self(globaldata_find(cpu));
	sleep_time = DEFAULT_SLEEP_TIME;

	/*
	 * Loop forever
	 */
	for (;;) {
		int zero_count;

		switch(state) {
		case STATE_IDLE:
			/*
			 * Wait for work.
			 */
			tsleep(&zero_state, 0, "pgzero", sleep_time);
			if (vm_page_zero_check(&zero_count, &zero_state))
				npages = idlezero_rate / 10;
			sleep_time = vm_page_zero_time(zero_count);
			if (npages)
				state = STATE_GET_PAGE;	/* Fallthrough */
			break;
		case STATE_GET_PAGE:
			/*
			 * Acquire page to zero
			 */
			if (--npages == 0) {
				state = STATE_IDLE;
			} else {
				m = vm_page_free_fromq_fast();
				if (m == NULL) {
					state = STATE_IDLE;
				} else {
					state = STATE_ZERO_PAGE;
					lwb = lwbuf_alloc(m, &lwb_cache);
					pg = (char *)lwbuf_kva(lwb);
					i = 0;
				}
			}
			break;
		case STATE_ZERO_PAGE:
			/*
			 * Zero-out the page
			 */
			while (i < PAGE_SIZE) {
				if (idlezero_nocache == 1)
					bzeront(&pg[i], IDLEZERO_RUN);
				else
					bzero(&pg[i], IDLEZERO_RUN);
				i += IDLEZERO_RUN;
				lwkt_yield();
			}
			state = STATE_RELEASE_PAGE;
			break;
		case STATE_RELEASE_PAGE:
			lwbuf_free(lwb);
			vm_page_flag_set(m, PG_ZERO);
			vm_page_free_toq(m);
			state = STATE_GET_PAGE;
			++idlezero_count;	/* non-locked, SMP race ok */
			break;
		}
		lwkt_yield();
	}
}