예제 #1
0
static void
am335x_pmic_intr(void *arg)
{
	struct am335x_pmic_softc *sc = (struct am335x_pmic_softc *)arg;
	struct tps65217_status_reg status_reg;
	struct tps65217_int_reg int_reg;
	int rv;
	char notify_buf[16];

	THREAD_SLEEPING_OK();
	rv = am335x_pmic_read(sc->sc_dev, TPS65217_INT_REG, (uint8_t *)&int_reg, 1);
	if (rv != 0) {
		device_printf(sc->sc_dev, "Cannot read interrupt register\n");
		THREAD_NO_SLEEPING();
		return;
	}
	rv = am335x_pmic_read(sc->sc_dev, TPS65217_STATUS_REG, (uint8_t *)&status_reg, 1);
	if (rv != 0) {
		device_printf(sc->sc_dev, "Cannot read status register\n");
		THREAD_NO_SLEEPING();
		return;
	}
	THREAD_NO_SLEEPING();

	if (int_reg.pbi && status_reg.pb)
		shutdown_nice(RB_POWEROFF);
	if (int_reg.aci) {
		snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x",
		    status_reg.acpwr);
		devctl_notify_f("ACPI", "ACAD", "power", notify_buf, M_NOWAIT);
	}
}
예제 #2
0
void
_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
{
	struct pcpu *pc;
	struct thread *td = tracker->rmp_thread;

	if (SCHEDULER_STOPPED())
		return;

	td->td_critnest++;	/* critical_enter(); */
	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
	rm_tracker_remove(pc, tracker);
	td->td_critnest--;
	sched_unpin();

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		THREAD_SLEEPING_OK();

	if (0 == (td->td_owepreempt | tracker->rmp_flags))
		return;

	_rm_unlock_hard(td, tracker);
}
예제 #3
0
static int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
	struct pcpu *pc;

	critical_enter();
	pc = pcpu_find(curcpu);

	/* Check if we just need to do a proper critical_exit. */
	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
		critical_exit();
		return (1);
	}

	/* Remove our tracker from the per-cpu list. */
	rm_tracker_remove(pc, tracker);

	/* Check to see if the IPI granted us the lock after all. */
	if (tracker->rmp_flags) {
		/* Just add back tracker - we hold the lock. */
		rm_tracker_add(pc, tracker);
		critical_exit();
		return (1);
	}

	/*
	 * We allow readers to acquire a lock even if a writer is blocked if
	 * the lock is recursive and the reader already holds the lock.
	 */
	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		/*
		 * Just grant the lock if this thread already has a tracker
		 * for this lock on the per-cpu queue.
		 */
		if (rm_trackers_present(pc, rm, curthread) != 0) {
			mtx_lock_spin(&rm_spinlock);
			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
			    rmp_qentry);
			tracker->rmp_flags = RMPF_ONQUEUE;
			mtx_unlock_spin(&rm_spinlock);
			rm_tracker_add(pc, tracker);
			critical_exit();
			return (1);
		}
	}

	sched_unpin();
	critical_exit();

	if (trylock) {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			if (!sx_try_xlock(&rm->rm_lock_sx))
				return (0);
		} else {
			if (!mtx_trylock(&rm->rm_lock_mtx))
				return (0);
		}
	} else {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			THREAD_SLEEPING_OK();
			sx_xlock(&rm->rm_lock_sx);
			THREAD_NO_SLEEPING();
		} else
			mtx_lock(&rm->rm_lock_mtx);
	}

	critical_enter();
	pc = pcpu_find(curcpu);
	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
	rm_tracker_add(pc, tracker);
	sched_pin();
	critical_exit();

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		sx_xunlock(&rm->rm_lock_sx);
	else
		mtx_unlock(&rm->rm_lock_mtx);

	return (1);
}
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int mtxcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	mtxcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct mtx *c_mtx;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_mtx = c->c_mtx;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_func = NULL;
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
					curr_callout = NULL;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
					curr_callout = c;
				}
				curr_cancelled = 0;
				mtx_unlock_spin(&callout_lock);
				if (c_mtx != NULL) {
					mtx_lock(c_mtx);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (curr_cancelled) {
						mtx_unlock(c_mtx);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					curr_cancelled = 1;

					if (c_mtx == &Giant) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						mtxcalls++;
						CTR3(KTR_CALLOUT, "callout mtx"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
#ifdef MAXHE_TODO
				THREAD_NO_SLEEPING();
				c_func(c_arg);
				THREAD_SLEEPING_OK();
#else
				c_func(c_arg);
#endif // MAXHE_TODO
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					mtx_unlock(c_mtx);
			skip:
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (callout_wait) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					callout_wait = 0;
					mtx_unlock_spin(&callout_lock);
					wakeup(&callout_wait);
					mtx_lock_spin(&callout_lock);
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
#ifdef MAXHE_TODO
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
#endif // MAXHE_TODO
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *arg)
{
	struct callout_cpu *cc;
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int lockcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	lockcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	cc = (struct callout_cpu *)arg;
	CC_LOCK(cc);
	while (cc->cc_softticks != ticks) {
		/*
		 * cc_softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = cc->cc_softticks;
		cc->cc_softticks++;
		bucket = &cc->cc_callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					cc->cc_next = c;
					/* Give interrupts a chance. */
					CC_UNLOCK(cc);
					;	/* nothing */
					CC_LOCK(cc);
					c = cc->cc_next;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct lock_class *class;
				struct lock_object *c_lock;
				int c_flags, sharedlock;

				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				class = (c->c_lock != NULL) ?
				    LOCK_CLASS(c->c_lock) : NULL;
				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
				    0 : 1;
				c_lock = c->c_lock;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				cc->cc_curr = c;
				cc->cc_cancel = 0;
				CC_UNLOCK(cc);
				if (c_lock != NULL) {
					class->lc_lock(c_lock, sharedlock);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (cc->cc_cancel) {
						class->lc_unlock(c_lock);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					cc->cc_cancel = 1;

					if (c_lock == &Giant.lock_object) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						lockcalls++;
						CTR3(KTR_CALLOUT, "callout lock"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
				THREAD_NO_SLEEPING();
				SDT_PROBE(callout_execute, kernel, ,
				    callout_start, c, 0, 0, 0, 0);
				c_func(c_arg);
				SDT_PROBE(callout_execute, kernel, ,
				    callout_end, c, 0, 0, 0, 0);
				THREAD_SLEEPING_OK();
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				CTR1(KTR_CALLOUT, "callout %p finished", c);
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					class->lc_unlock(c_lock);
			skip:
				CC_LOCK(cc);
				/*
				 * If the current callout is locally
				 * allocated (from timeout(9))
				 * then put it on the freelist.
				 *
				 * Note: we need to check the cached
				 * copy of c_flags because if it was not
				 * local, then it's not safe to deref the
				 * callout pointer.
				 */
				if (c_flags & CALLOUT_LOCAL_ALLOC) {
					KASSERT(c->c_flags ==
					    CALLOUT_LOCAL_ALLOC,
					    ("corrupted callout"));
					c->c_func = NULL;
					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
					    c_links.sle);
				}
				cc->cc_curr = NULL;
				if (cc->cc_waiting) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					cc->cc_waiting = 0;
					CC_UNLOCK(cc);
					wakeup(&cc->cc_waiting);
					CC_LOCK(cc);
				}
				steps = 0;
				c = cc->cc_next;
			}
		}
	}