Ejemplo n.º 1
0
void
cpu_lwp_exit(void)
{
	struct thread *td = curthread;
	struct pcb *pcb;

	npxexit();
	pcb = td->td_pcb;

	/* Some i386 functionality was dropped */
	KKASSERT(pcb->pcb_ext == NULL);

	/*
	 * disable all hardware breakpoints
	 */
        if (pcb->pcb_flags & PCB_DBREGS) {
                reset_dbregs();
                pcb->pcb_flags &= ~PCB_DBREGS;
        }
	td->td_gd->gd_cnt.v_swtch++;

	crit_enter_quick(td);
	if (td->td_flags & TDF_TSLEEPQ)
		tsleep_remove(td);
	lwkt_deschedule_self(td);
	lwkt_remove_tdallq(td);
	cpu_thread_exit();
}
Ejemplo n.º 2
0
/*
 * Userland override of lwkt_exit. The only difference is
 * the manipulation of gd->gd_num_threads;
 */
void
lwkt_exit(void)
{
    thread_t td = curthread;
    globaldata_t gd = mycpu;

    if (td->td_flags & TDF_VERBOSE)
	printf("kthread %p %s has exited\n", td, td->td_comm);
    crit_enter();
    lwkt_deschedule_self(td);
    ++gd->gd_tdfreecount;
    if (td->td_flags & TDF_SYSTHREAD)
	--gd->gd_sys_threads;
    --gd->gd_num_threads;
    TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq);
    cpu_thread_exit();
}
Ejemplo n.º 3
0
void
cpu_lwp_exit(void)
{
	struct thread *td = curthread;
	struct pcb *pcb;
	struct pcb_ext *ext;

	/*
	 * If we were using a private TSS do a forced-switch to ourselves
	 * to switch back to the common TSS before freeing it.
	 */
	pcb = td->td_pcb;
	if ((ext = pcb->pcb_ext) != NULL) {
		crit_enter();
		pcb->pcb_ext = NULL;
		lwkt_switch_return(td->td_switch(td));
		crit_exit();
		kmem_free(&kernel_map, (vm_offset_t)ext, ctob(IOPAGES + 1));
	}
	user_ldt_free(pcb);
        if (pcb->pcb_flags & PCB_DBREGS) {
                /*
                 * disable all hardware breakpoints
                 */
                reset_dbregs();
                pcb->pcb_flags &= ~PCB_DBREGS;
        }
	td->td_gd->gd_cnt.v_swtch++;

	crit_enter_quick(td);
	if (td->td_flags & TDF_TSLEEPQ)
		tsleep_remove(td);
	lwkt_deschedule_self(td);
	lwkt_remove_tdallq(td);
	cpu_thread_exit();
}
Ejemplo n.º 4
0
/*
 * Panic is called on unresolvable fatal errors.  It prints "panic: mesg",
 * and then reboots.  If we are called twice, then we avoid trying to sync
 * the disks as this often leads to recursive panics.
 */
void
panic(const char *fmt, ...)
{
	int bootopt, newpanic;
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;
	__va_list ap;
	static char buf[256];

#ifdef SMP
	/*
	 * If a panic occurs on multiple cpus before the first is able to
	 * halt the other cpus, only one cpu is allowed to take the panic.
	 * Attempt to be verbose about this situation but if the kprintf() 
	 * itself panics don't let us overrun the kernel stack.
	 *
	 * Be very nasty about descheduling our thread at the lowest
	 * level possible in an attempt to freeze the thread without
	 * inducing further panics.
	 *
	 * Bumping gd_trap_nesting_level will also bypass assertions in
	 * lwkt_switch() and allow us to switch away even if we are a
	 * FAST interrupt or IPI.
	 *
	 * The setting of panic_cpu_gd also determines how kprintf()
	 * spin-locks itself.  DDB can set panic_cpu_gd as well.
	 */
	for (;;) {
		globaldata_t xgd = panic_cpu_gd;

		/*
		 * Someone else got the panic cpu
		 */
		if (xgd && xgd != gd) {
			crit_enter();
			++mycpu->gd_trap_nesting_level;
			if (mycpu->gd_trap_nesting_level < 25) {
				kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n",
					mycpu->gd_cpuid, td);
			}
			td->td_release = NULL;	/* be a grinch */
			for (;;) {
				lwkt_deschedule_self(td);
				lwkt_switch();
			}
			/* NOT REACHED */
			/* --mycpu->gd_trap_nesting_level */
			/* crit_exit() */
		}

		/*
		 * Reentrant panic
		 */
		if (xgd && xgd == gd)
			break;

		/*
		 * We got it
		 */
		if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd))
			break;
	}
#else
	panic_cpu_gd = gd;
#endif
	/*
	 * Try to get the system into a working state.  Save information
	 * we are about to destroy.
	 */
	kvcreinitspin();
	if (panicstr == NULL) {
		bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens));
		panic_tokens_count = td->td_toks_stop - &td->td_toks_base;
	}
	lwkt_relalltokens(td);
	td->td_toks_stop = &td->td_toks_base;

	/*
	 * Setup
	 */
	bootopt = RB_AUTOBOOT | RB_DUMP;
	if (sync_on_panic == 0)
		bootopt |= RB_NOSYNC;
	newpanic = 0;
	if (panicstr) {
		bootopt |= RB_NOSYNC;
	} else {
		panicstr = fmt;
		newpanic = 1;
	}

	/*
	 * Format the panic string.
	 */
	__va_start(ap, fmt);
	kvsnprintf(buf, sizeof(buf), fmt, ap);
	if (panicstr == fmt)
		panicstr = buf;
	__va_end(ap);
	kprintf("panic: %s\n", buf);
#ifdef SMP
	/* two separate prints in case of an unmapped page and trap */
	kprintf("cpuid = %d\n", mycpu->gd_cpuid);
#endif

#if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC)
	led_switch("error", 1);
#endif

#if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE)
	wdog_disable();
#endif

	/*
	 * Enter the debugger or fall through & dump.  Entering the
	 * debugger will stop cpus.  If not entering the debugger stop
	 * cpus here.
	 */
#if defined(DDB)
	if (newpanic && trace_on_panic)
		print_backtrace(-1);
	if (debugger_on_panic)
		Debugger("panic");
	else
#endif
#ifdef SMP
	if (newpanic)
		stop_cpus(mycpu->gd_other_cpus);
#else
	;
#endif
	boot(bootopt);
}
Ejemplo n.º 5
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	void (*c_func)(void *);
	void *c_arg;
	int mpsafe = 1;

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & callwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}
			if (c->c_flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			sc->running = c;
			c_func = c->c_func;
			c_arg = c->c_arg;
			c->c_func = NULL;
			KKASSERT(c->c_flags & CALLOUT_DID_INIT);
			c->c_flags &= ~CALLOUT_PENDING;
			crit_exit();
			c_func(c_arg);
			crit_enter();
			sc->running = NULL;
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
Ejemplo n.º 6
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	struct callout slotimer;
	int mpsafe = 1;
	int flags;

	/*
	 * Setup pcpu slow clocks which we want to run from the callout
	 * thread.
	 */
	callout_init_mp(&slotimer);
	callout_reset(&slotimer, hz * 10, slotimer_callback, &slotimer);

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	/*
	 * Loop critical section against ipi operations to this cpu.
	 */
	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & cwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}

			flags = c->c_flags;
			if (flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}

			/*
			 * Queue protection only exists while we hold the
			 * critical section uninterrupted.
			 *
			 * Adjust sc->next when removing (c) from the queue,
			 * note that an IPI on this cpu may make further
			 * adjustments to sc->next.
			 */
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			KASSERT((c->c_flags & CALLOUT_ARMED) &&
				(c->c_flags & CALLOUT_PENDING) &&
				CALLOUT_FLAGS_TO_CPU(c->c_flags) ==
				mycpu->gd_cpuid,
				("callout %p: bad flags %08x", c, c->c_flags));

			/*
			 * Once CALLOUT_PENDING is cleared, sc->running
			 * protects the callout structure's existance but
			 * only until we call c_func().  A callout_stop()
			 * or callout_reset() issued from within c_func()
			 * will not block.  The callout can also be kfree()d
			 * by c_func().
			 *
			 * We set EXECUTED before calling c_func() so a
			 * callout_stop() issued from within c_func() returns
			 * the correct status.
			 */
			if ((flags & (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) ==
			    (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) {
				void (*c_func)(void *);
				void *c_arg;
				struct lock *c_lk;
				int error;

				/*
				 * NOTE: sc->running must be set prior to
				 *	 CALLOUT_PENDING being cleared to
				 *	 avoid missed CANCELs and *_stop()
				 *	 races.
				 */
				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_lk = c->c_lk;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				error = lockmgr(c_lk, LK_EXCLUSIVE |
						      LK_CANCELABLE);
				if (error == 0) {
					atomic_set_int(&c->c_flags,
						       CALLOUT_EXECUTED);
					crit_exit();
					c_func(c_arg);
					crit_enter();
					lockmgr(c_lk, LK_RELEASE);
				}
			} else if (flags & CALLOUT_ACTIVE) {
				void (*c_func)(void *);
				void *c_arg;

				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				atomic_set_int(&c->c_flags, CALLOUT_EXECUTED);
				crit_exit();
				c_func(c_arg);
				crit_enter();
			} else {
				flags = callout_unpend_disarm(c);
			}

			/*
			 * Read and clear sc->running.  If bit 0 was set,
			 * a callout_stop() is likely blocked waiting for
			 * the callback to complete.
			 *
			 * The sigclear above also cleared CALLOUT_WAITING
			 * and returns the contents of flags prior to clearing
			 * any bits.
			 *
			 * Interlock wakeup any _stop's waiting on us.  Note
			 * that once c_func() was called, the callout
			 * structure (c) pointer may no longer be valid.  It
			 * can only be used for the wakeup.
			 */
			if ((atomic_readandclear_ptr(&sc->running) & 1) ||
			    (flags & CALLOUT_WAITING)) {
				wakeup(c);
			}
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}

	/*
	 * Don't leave us holding the MP lock when we deschedule ourselves.
	 */
	if (mpsafe == 0) {
		mpsafe = 1;
		rel_mplock();
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}