Exemplo n.º 1
0
void
cpu_thread_exit(void)
{
	curthread->td_switch = cpu_exit_switch;
	lwkt_switch();
	panic("cpu_exit");
}
Exemplo n.º 2
0
/*
 * Terminate the current thread.  The caller must have already acquired
 * the thread's rwlock and placed it on a reap list or otherwise notified
 * a reaper of its existance.  We set a special assembly switch function which
 * releases td_rwlock after it has cleaned up the MMU state and switched
 * out the stack.
 *
 * Must be caller from a critical section and with the thread descheduled.
 */
void
cpu_thread_exit(void)
{
	curthread->td_switch = cpu_exit_switch;
	curthread->td_flags |= TDF_EXITING;
	lwkt_switch();
	panic("cpu_thread_exit: lwkt_switch() unexpectedly returned");
}
Exemplo n.º 3
0
/*
 * Use this to lwkt_switch() when the scheduler clock is not
 * yet running, otherwise lwkt_switch() won't do anything.
 * XXX needs cleaning up in lwkt_thread.c
 */
static void
lwkt_force_switch(void)
{
	crit_enter();
	lwkt_schedulerclock(curthread);
	crit_exit();
	lwkt_switch();
}
Exemplo n.º 4
0
/*
 * Similar to gettoken but we acquire a shared token instead of an exclusive
 * token.
 */
void
lwkt_gettoken_shared(lwkt_token_t tok)
{
	thread_t td = curthread;
	lwkt_tokref_t ref;

	ref = td->td_toks_stop;
	KKASSERT(ref < &td->td_toks_end);
	++td->td_toks_stop;
	cpu_ccfence();
	_lwkt_tokref_init(ref, tok, td, TOK_EXCLREQ);

#ifdef DEBUG_LOCKS
        /*
         * Taking a pool token in shared mode is a bad idea; other
         * addresses deeper in the call stack may hash to the same pool
         * token and you may end up with an exclusive-shared livelock.
         * Warn in this condition.
         */
        if ((tok >= &pool_tokens[0].token) &&
            (tok < &pool_tokens[LWKT_NUM_POOL_TOKENS].token))
                kprintf("Warning! Taking pool token %p in shared mode\n", tok);
#endif


	if (_lwkt_trytokref_spin(ref, td, TOK_EXCLREQ))
		return;

	/*
	 * Give up running if we can't acquire the token right now.
	 *
	 * Since the tokref is already active the scheduler now
	 * takes care of acquisition, so we need only call
	 * lwkt_switch().
	 *
	 * Since we failed this was not a recursive token so upon
	 * return tr_tok->t_ref should be assigned to this specific
	 * ref.
	 */
	td->td_wmesg = tok->t_desc;
	++tok->t_collisions;
	logtoken(fail, ref);
	td->td_toks_have = td->td_toks_stop - 1;

	if (tokens_debug_output > 0) {
		--tokens_debug_output;
		spin_lock(&tok_debug_spin);
		kprintf("Shar Token thread %p %s %s\n",
			td, tok->t_desc, td->td_comm);
		print_backtrace(6);
		kprintf("\n");
		spin_unlock(&tok_debug_spin);
	}

	lwkt_switch();
	logtoken(succ, ref);
}
Exemplo n.º 5
0
void
cpu_idle(void)
{
	struct thread *td = curthread;
	struct mdglobaldata *gd = mdcpu;
	int reqflags;

	crit_exit();
	KKASSERT(td->td_critcount == 0);
	cpu_enable_intr();
	for (;;) {
		/*
		 * See if there are any LWKTs ready to go.
		 */
		lwkt_switch();

		/*
		 * The idle loop halts only if no threads are scheduleable
		 * and no signals have occured.
		 */
		if (cpu_idle_hlt &&
		    (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
			splz();
#ifdef SMP
			KKASSERT(MP_LOCK_HELD() == 0);
#endif
			if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
#ifdef DEBUGIDLE
				struct timeval tv1, tv2;
				gettimeofday(&tv1, NULL);
#endif
				reqflags = gd->mi.gd_reqflags &
					   ~RQF_IDLECHECK_WK_MASK;
				umtx_sleep(&gd->mi.gd_reqflags, reqflags,
					   1000000);
#ifdef DEBUGIDLE
				gettimeofday(&tv2, NULL);
				if (tv2.tv_usec - tv1.tv_usec +
				    (tv2.tv_sec - tv1.tv_sec) * 1000000 
				    > 500000) {
					kprintf("cpu %d idlelock %08x %08x\n",
						gd->mi.gd_cpuid,
						gd->mi.gd_reqflags,
						gd->gd_fpending);
				}
#endif
			}
			++cpu_idle_hltcnt;
		} else {
			splz();
#ifdef SMP
			__asm __volatile("pause");
#endif
			++cpu_idle_spincnt;
		}
	}
}
Exemplo n.º 6
0
/*
 * Terminate the current thread.  The caller must have already acquired
 * the thread's rwlock and placed it on a reap list or otherwise notified
 * a reaper of its existance.  We set a special assembly switch function which
 * releases td_rwlock after it has cleaned up the MMU state and switched
 * out the stack.
 *
 * Must be caller from a critical section and with the thread descheduled.
 */
void
cpu_thread_exit(void)
{
#if NNPX > 0
	npxexit();
#endif
	curthread->td_switch = cpu_exit_switch;
	curthread->td_flags |= TDF_EXITING;
	lwkt_switch();
	panic("cpu_exit");
}
Exemplo n.º 7
0
/*
 * Get a serializing token.  This routine can block.
 */
void
lwkt_gettoken(lwkt_token_t tok)
{
	thread_t td = curthread;
	lwkt_tokref_t ref;

	ref = td->td_toks_stop;
	KKASSERT(ref < &td->td_toks_end);
	++td->td_toks_stop;
	cpu_ccfence();
	_lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ);

#ifdef DEBUG_LOCKS
	/*
	 * Taking an exclusive token after holding it shared will
	 * livelock. Scan for that case and assert.
	 */
	lwkt_tokref_t tk;
	int found = 0;
	for (tk = &td->td_toks_base; tk < ref; tk++) {
		if (tk->tr_tok != tok)
			continue;
		
		found++;
		if (tk->tr_count & TOK_EXCLUSIVE) 
			goto good;
	}
	/* We found only shared instances of this token if found >0 here */
	KASSERT((found == 0), ("Token %p s/x livelock", tok));
good:
#endif

	if (_lwkt_trytokref_spin(ref, td, TOK_EXCLUSIVE|TOK_EXCLREQ))
		return;

	/*
	 * Give up running if we can't acquire the token right now.
	 *
	 * Since the tokref is already active the scheduler now
	 * takes care of acquisition, so we need only call
	 * lwkt_switch().
	 *
	 * Since we failed this was not a recursive token so upon
	 * return tr_tok->t_ref should be assigned to this specific
	 * ref.
	 */
	td->td_wmesg = tok->t_desc;
	++tok->t_collisions;
	logtoken(fail, ref);
	td->td_toks_have = td->td_toks_stop - 1;
	lwkt_switch();
	logtoken(succ, ref);
	KKASSERT(tok->t_ref == ref);
}
Exemplo n.º 8
0
static void
lwkt_idleloop(void *dummy)
{
    globaldata_t gd = mycpu;

    DBPRINTF(("idlestart cpu %d pri %d (should be < 32) mpcount %d (should be 0)\n",
	gd->gd_cpuid, curthread->td_pri, curthread->td_mpcount));

    gd->gd_pid = getpid();

    for (;;) {
	/*
	 * If only our 'main' thread is left, schedule it.
	 */
        if (gd->gd_num_threads == gd->gd_sys_threads) {
	    int i;
	    globaldata_t tgd;

	    for (i = 0; i < ncpus; ++i) {
		tgd = globaldata_find(i);
		if (tgd->gd_num_threads != tgd->gd_sys_threads)
		    break;
	    }
	    if (i == ncpus && (main_td.td_flags & TDF_RUNQ) == 0)
		lwkt_schedule(&main_td);
        }

	/*
	 * Wait for an interrupt, aka wait for a signal or an upcall to
	 * occur, then switch away.
	 */
	crit_enter();
	if (gd->gd_runqmask || (curthread->td_flags & TDF_IDLE_NOHLT)) {
	    curthread->td_flags &= ~TDF_IDLE_NOHLT;
	} else {
	    printf("cpu %d halting\n", gd->gd_cpuid);
	    cpu_halt();
	    printf("cpu %d resuming\n", gd->gd_cpuid);
	}
	crit_exit();
	lwkt_switch();
    }
}
Exemplo n.º 9
0
int
main(int ac, char **av)
{
    thread_t td;
    thread_t td1;
    thread_t td2;

    uthread_init();
    td = curthread;
    printf("mainthread %p crit_count %d nest %d mp_lock %d\n", 
	td, td->td_pri / TDPRI_CRIT, td->td_mpcount, mp_lock
    );
    lwkt_create(thread1, NULL, &td1, NULL, 0, -1, "thread1");
    lwkt_create(thread2, NULL, &td2, NULL, 0, -1, "thread2");
    printf("thread #1 %p #2 %p\n", td1, td2);
    printf("switching away from main (should come back before exit)\n");
    lwkt_switch();
    printf("Switched back to main, main Exiting\n");
    exit(1);
}
Exemplo n.º 10
0
/*
 * Panic is called on unresolvable fatal errors.  It prints "panic: mesg",
 * and then reboots.  If we are called twice, then we avoid trying to sync
 * the disks as this often leads to recursive panics.
 */
void
panic(const char *fmt, ...)
{
	int bootopt, newpanic;
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;
	__va_list ap;
	static char buf[256];

#ifdef SMP
	/*
	 * If a panic occurs on multiple cpus before the first is able to
	 * halt the other cpus, only one cpu is allowed to take the panic.
	 * Attempt to be verbose about this situation but if the kprintf() 
	 * itself panics don't let us overrun the kernel stack.
	 *
	 * Be very nasty about descheduling our thread at the lowest
	 * level possible in an attempt to freeze the thread without
	 * inducing further panics.
	 *
	 * Bumping gd_trap_nesting_level will also bypass assertions in
	 * lwkt_switch() and allow us to switch away even if we are a
	 * FAST interrupt or IPI.
	 *
	 * The setting of panic_cpu_gd also determines how kprintf()
	 * spin-locks itself.  DDB can set panic_cpu_gd as well.
	 */
	for (;;) {
		globaldata_t xgd = panic_cpu_gd;

		/*
		 * Someone else got the panic cpu
		 */
		if (xgd && xgd != gd) {
			crit_enter();
			++mycpu->gd_trap_nesting_level;
			if (mycpu->gd_trap_nesting_level < 25) {
				kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n",
					mycpu->gd_cpuid, td);
			}
			td->td_release = NULL;	/* be a grinch */
			for (;;) {
				lwkt_deschedule_self(td);
				lwkt_switch();
			}
			/* NOT REACHED */
			/* --mycpu->gd_trap_nesting_level */
			/* crit_exit() */
		}

		/*
		 * Reentrant panic
		 */
		if (xgd && xgd == gd)
			break;

		/*
		 * We got it
		 */
		if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd))
			break;
	}
#else
	panic_cpu_gd = gd;
#endif
	/*
	 * Try to get the system into a working state.  Save information
	 * we are about to destroy.
	 */
	kvcreinitspin();
	if (panicstr == NULL) {
		bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens));
		panic_tokens_count = td->td_toks_stop - &td->td_toks_base;
	}
	lwkt_relalltokens(td);
	td->td_toks_stop = &td->td_toks_base;

	/*
	 * Setup
	 */
	bootopt = RB_AUTOBOOT | RB_DUMP;
	if (sync_on_panic == 0)
		bootopt |= RB_NOSYNC;
	newpanic = 0;
	if (panicstr) {
		bootopt |= RB_NOSYNC;
	} else {
		panicstr = fmt;
		newpanic = 1;
	}

	/*
	 * Format the panic string.
	 */
	__va_start(ap, fmt);
	kvsnprintf(buf, sizeof(buf), fmt, ap);
	if (panicstr == fmt)
		panicstr = buf;
	__va_end(ap);
	kprintf("panic: %s\n", buf);
#ifdef SMP
	/* two separate prints in case of an unmapped page and trap */
	kprintf("cpuid = %d\n", mycpu->gd_cpuid);
#endif

#if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC)
	led_switch("error", 1);
#endif

#if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE)
	wdog_disable();
#endif

	/*
	 * Enter the debugger or fall through & dump.  Entering the
	 * debugger will stop cpus.  If not entering the debugger stop
	 * cpus here.
	 */
#if defined(DDB)
	if (newpanic && trace_on_panic)
		print_backtrace(-1);
	if (debugger_on_panic)
		Debugger("panic");
	else
#endif
#ifdef SMP
	if (newpanic)
		stop_cpus(mycpu->gd_other_cpus);
#else
	;
#endif
	boot(bootopt);
}
Exemplo n.º 11
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	void (*c_func)(void *);
	void *c_arg;
	int mpsafe = 1;

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & callwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}
			if (c->c_flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			sc->running = c;
			c_func = c->c_func;
			c_arg = c->c_arg;
			c->c_func = NULL;
			KKASSERT(c->c_flags & CALLOUT_DID_INIT);
			c->c_flags &= ~CALLOUT_PENDING;
			crit_exit();
			c_func(c_arg);
			crit_enter();
			sc->running = NULL;
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
Exemplo n.º 12
0
/*
 * This procedure is the main loop of our per-cpu helper thread.  The
 * sc->isrunning flag prevents us from racing hardclock_softtick() and
 * a critical section is sufficient to interlock sc->curticks and protect
 * us from remote IPI's / list removal.
 *
 * The thread starts with the MP lock released and not in a critical
 * section.  The loop itself is MP safe while individual callbacks
 * may or may not be, so we obtain or release the MP lock as appropriate.
 */
static void
softclock_handler(void *arg)
{
	softclock_pcpu_t sc;
	struct callout *c;
	struct callout_tailq *bucket;
	struct callout slotimer;
	int mpsafe = 1;
	int flags;

	/*
	 * Setup pcpu slow clocks which we want to run from the callout
	 * thread.
	 */
	callout_init_mp(&slotimer);
	callout_reset(&slotimer, hz * 10, slotimer_callback, &slotimer);

	/*
	 * Run the callout thread at the same priority as other kernel
	 * threads so it can be round-robined.
	 */
	/*lwkt_setpri_self(TDPRI_SOFT_NORM);*/

	/*
	 * Loop critical section against ipi operations to this cpu.
	 */
	sc = arg;
	crit_enter();
loop:
	while (sc->softticks != (int)(sc->curticks + 1)) {
		bucket = &sc->callwheel[sc->softticks & cwheelmask];

		for (c = TAILQ_FIRST(bucket); c; c = sc->next) {
			if (c->c_time != sc->softticks) {
				sc->next = TAILQ_NEXT(c, c_links.tqe);
				continue;
			}

			flags = c->c_flags;
			if (flags & CALLOUT_MPSAFE) {
				if (mpsafe == 0) {
					mpsafe = 1;
					rel_mplock();
				}
			} else {
				/*
				 * The request might be removed while we 
				 * are waiting to get the MP lock.  If it
				 * was removed sc->next will point to the
				 * next valid request or NULL, loop up.
				 */
				if (mpsafe) {
					mpsafe = 0;
					sc->next = c;
					get_mplock();
					if (c != sc->next)
						continue;
				}
			}

			/*
			 * Queue protection only exists while we hold the
			 * critical section uninterrupted.
			 *
			 * Adjust sc->next when removing (c) from the queue,
			 * note that an IPI on this cpu may make further
			 * adjustments to sc->next.
			 */
			sc->next = TAILQ_NEXT(c, c_links.tqe);
			TAILQ_REMOVE(bucket, c, c_links.tqe);

			KASSERT((c->c_flags & CALLOUT_ARMED) &&
				(c->c_flags & CALLOUT_PENDING) &&
				CALLOUT_FLAGS_TO_CPU(c->c_flags) ==
				mycpu->gd_cpuid,
				("callout %p: bad flags %08x", c, c->c_flags));

			/*
			 * Once CALLOUT_PENDING is cleared, sc->running
			 * protects the callout structure's existance but
			 * only until we call c_func().  A callout_stop()
			 * or callout_reset() issued from within c_func()
			 * will not block.  The callout can also be kfree()d
			 * by c_func().
			 *
			 * We set EXECUTED before calling c_func() so a
			 * callout_stop() issued from within c_func() returns
			 * the correct status.
			 */
			if ((flags & (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) ==
			    (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) {
				void (*c_func)(void *);
				void *c_arg;
				struct lock *c_lk;
				int error;

				/*
				 * NOTE: sc->running must be set prior to
				 *	 CALLOUT_PENDING being cleared to
				 *	 avoid missed CANCELs and *_stop()
				 *	 races.
				 */
				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_lk = c->c_lk;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				error = lockmgr(c_lk, LK_EXCLUSIVE |
						      LK_CANCELABLE);
				if (error == 0) {
					atomic_set_int(&c->c_flags,
						       CALLOUT_EXECUTED);
					crit_exit();
					c_func(c_arg);
					crit_enter();
					lockmgr(c_lk, LK_RELEASE);
				}
			} else if (flags & CALLOUT_ACTIVE) {
				void (*c_func)(void *);
				void *c_arg;

				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				atomic_set_int(&c->c_flags, CALLOUT_EXECUTED);
				crit_exit();
				c_func(c_arg);
				crit_enter();
			} else {
				flags = callout_unpend_disarm(c);
			}

			/*
			 * Read and clear sc->running.  If bit 0 was set,
			 * a callout_stop() is likely blocked waiting for
			 * the callback to complete.
			 *
			 * The sigclear above also cleared CALLOUT_WAITING
			 * and returns the contents of flags prior to clearing
			 * any bits.
			 *
			 * Interlock wakeup any _stop's waiting on us.  Note
			 * that once c_func() was called, the callout
			 * structure (c) pointer may no longer be valid.  It
			 * can only be used for the wakeup.
			 */
			if ((atomic_readandclear_ptr(&sc->running) & 1) ||
			    (flags & CALLOUT_WAITING)) {
				wakeup(c);
			}
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}

	/*
	 * Don't leave us holding the MP lock when we deschedule ourselves.
	 */
	if (mpsafe == 0) {
		mpsafe = 1;
		rel_mplock();
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
Exemplo n.º 13
0
/*
 * Start threading.
 */
void
lwkt_start_threading(thread_t td)
{
    lwkt_switch();
}