Ejemplo n.º 1
0
static void xnsched_watchdog_handler(struct xntimer *timer)
{
	struct xnsched *sched = xnpod_current_sched();
	struct xnthread *thread = sched->curr;

	if (likely(xnthread_test_state(thread, XNROOT))) {
		xnsched_reset_watchdog(sched);
		return;
	}

	if (likely(++sched->wdcount < wd_timeout_arg))
		return;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNSHADOW) &&
	    !xnthread_amok_p(thread)) {
		trace_mark(xn_nucleus, watchdog_signal,
			   "thread %p thread_name %s",
			   thread, xnthread_name(thread));
		xnprintf("watchdog triggered -- signaling runaway thread "
			 "'%s'\n", xnthread_name(thread));
		xnthread_set_info(thread, XNAMOK | XNKICKED);
		xnshadow_send_sig(thread, SIGDEBUG, SIGDEBUG_WATCHDOG, 1);
	} else
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	{
		trace_mark(xn_nucleus, watchdog, "thread %p thread_name %s",
			   thread, xnthread_name(thread));
		xnprintf("watchdog triggered -- killing runaway thread '%s'\n",
			 xnthread_name(thread));
		xnpod_delete_thread(thread);
	}
	xnsched_reset_watchdog(sched);
}
Ejemplo n.º 2
0
/* Must be called with nklock locked, interrupts off. */
void xnsched_track_policy(struct xnthread *thread,
			  struct xnthread *target)
{
	union xnsched_policy_param param;

	if (xnthread_test_state(thread, XNREADY))
		xnsched_dequeue(thread);
	/*
	 * Self-targeting means to reset the scheduling policy and
	 * parameters to the base ones. Otherwise, make thread inherit
	 * the scheduling data from target.
	 */
	if (target == thread) {
		thread->sched_class = thread->base_class;
		xnsched_trackprio(thread, NULL);
	} else {
		xnsched_getparam(target, &param);
		thread->sched_class = target->sched_class;
		xnsched_trackprio(thread, &param);
	}

	if (xnthread_test_state(thread, XNREADY))
		xnsched_enqueue(thread);

	xnsched_set_resched(thread->sched);
}
Ejemplo n.º 3
0
/*
 * Detect when a thread is about to sleep on a synchronization
 * object currently owned by someone running in secondary mode.
 */
void xnsynch_detect_relaxed_owner(struct xnsynch *synch, struct xnthread *sleeper)
{
	if (xnthread_test_state(sleeper, XNTRAPSW|XNSWREP) == XNTRAPSW &&
	    xnthread_test_state(synch->owner, XNRELAX)) {
		xnthread_set_state(sleeper, XNSWREP);
		xnshadow_send_sig(sleeper, SIGDEBUG,
				  SIGDEBUG_MIGRATE_PRIOINV, 1);
	} else
		xnthread_clear_state(sleeper,  XNSWREP);
}
Ejemplo n.º 4
0
/* Must be called with nklock locked, interrupts off. */
struct xnthread *xnsched_pick_next(struct xnsched *sched)
{
	struct xnthread *curr = sched->curr;
	struct xnsched_class *p;
	struct xnthread *thread;

	if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) {
		/*
		 * Do not preempt the current thread if it holds the
		 * scheduler lock.
		 */
		if (xnthread_test_state(curr, XNLOCK)) {
			xnsched_set_self_resched(sched);
			return curr;
		}
		/*
		 * Push the current thread back to the runnable queue
		 * of the scheduling class it belongs to, if not yet
		 * linked to it (XNREADY tells us if it is).
		 */
		if (!xnthread_test_state(curr, XNREADY)) {
			xnsched_requeue(curr);
			xnthread_set_state(curr, XNREADY);
		}
#ifdef __XENO_SIM__
		if (nkpod->schedhook)
			nkpod->schedhook(curr, XNREADY);
#endif /* __XENO_SIM__ */
	}

	/*
	 * Find the runnable thread having the highest priority among
	 * all scheduling classes, scanned by decreasing priority.
	 */
#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
	for_each_xnsched_class(p) {
		thread = p->sched_pick(sched);
		if (thread) {
			xnthread_clear_state(thread, XNREADY);
			return thread;
		}
	}

	return NULL; /* Never executed because of the idle class. */
#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
	thread = __xnsched_rt_pick(sched); (void)p;
	if (unlikely(thread == NULL))
		thread = &sched->rootcb;

	xnthread_clear_state(thread, XNREADY);

	return thread;
#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
}
Ejemplo n.º 5
0
/* Must be called with nklock locked, interrupts off. */
int xnsched_set_policy(struct xnthread *thread,
		       struct xnsched_class *sched_class,
		       const union xnsched_policy_param *p)
{
	int ret;

	/*
	 * Declaring a thread to a new scheduling class may fail, so
	 * we do that early, while the thread is still a member of the
	 * previous class. However, this also means that the
	 * declaration callback shall not do anything that might
	 * affect the previous class (such as touching thread->rlink
	 * for instance).
	 */
	if (sched_class != thread->base_class) {
		if (sched_class->sched_declare) {
			ret = sched_class->sched_declare(thread, p);
			if (ret)
				return ret;
		}
		sched_class->nthreads++;
	}

	/*
	 * As a special case, we may be called from xnthread_init()
	 * with no previous scheduling class at all.
	 */
	if (likely(thread->base_class != NULL)) {
		if (xnthread_test_state(thread, XNREADY))
			xnsched_dequeue(thread);

		if (sched_class != thread->base_class)
			xnsched_forget(thread);
	}

	thread->sched_class = sched_class;
	thread->base_class = sched_class;
	xnsched_setparam(thread, p);
	thread->bprio = thread->cprio;

	if (xnthread_test_state(thread, XNREADY))
		xnsched_enqueue(thread);

	if (xnthread_test_state(thread, XNSTARTED))
		xnsched_set_resched(thread->sched);

	return 0;
}
Ejemplo n.º 6
0
xnticks_t xnthread_get_timeout(xnthread_t *thread, xnticks_t tsc_ns)
{
	xnticks_t timeout;
	xntimer_t *timer;

	if (!xnthread_test_state(thread,XNDELAY))
		return 0LL;

	if (xntimer_running_p(&thread->rtimer))
		timer = &thread->rtimer;
	else if (xntimer_running_p(&thread->ptimer))
		timer = &thread->ptimer;
	else
		return 0LL;
	/*
	 * The caller should have masked IRQs while collecting the
	 * timeout(s), so no tick could be announced in the meantime,
	 * and all timeouts would always use the same epoch
	 * value. Obviously, this can't be a valid assumption for
	 * aperiodic timers, which values are based on the hardware
	 * TSC, and as such the current time will change regardless of
	 * the interrupt state; for this reason, we use the "tsc_ns"
	 * input parameter (TSC converted to nanoseconds) the caller
	 * has passed us as the epoch value instead.
	 */
	if (xntbase_periodic_p(xnthread_time_base(thread)))
		return xntimer_get_timeout(timer);

	timeout = xntimer_get_date(timer);

	if (timeout <= tsc_ns)
		return 1;

	return timeout - tsc_ns;
}
Ejemplo n.º 7
0
void xnsched_weak_setparam(struct xnthread *thread,
			   const union xnsched_policy_param *p)
{
	thread->cprio = p->weak.prio;
	if (!xnthread_test_state(thread, XNBOOST))
		xnthread_set_state(thread, XNWEAK);
}
Ejemplo n.º 8
0
static void xnsynch_renice_thread(struct xnthread *thread,
				  struct xnthread *target)
{
	/* Apply the scheduling policy of "target" to "thread" */
	xnsched_track_policy(thread, target);

	if (thread->wchan)
		xnsynch_requeue_sleeper(thread);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNRELAX))
		xnshadow_renice(thread);
	else if (xnthread_test_state(thread, XNSHADOW))
		xnthread_set_info(thread, XNPRIOSET);
#endif /* CONFIG_XENO_OPT_PERVASIVE */
}
Ejemplo n.º 9
0
/* Must be called with nklock locked, interrupts off. thread must be
 * runnable. */
void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
{
	struct xnsched_class *sched_class = thread->sched_class;

	if (xnthread_test_state(thread, XNREADY)) {
		xnsched_dequeue(thread);
		xnthread_clear_state(thread, XNREADY);
	}

	if (sched_class->sched_migrate)
		sched_class->sched_migrate(thread, sched);
	/*
	 * WARNING: the scheduling class may have just changed as a
	 * result of calling the per-class migration hook.
	 */
	xnsched_set_resched(thread->sched);
	thread->sched = sched;

#ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
	/*
	 * Mark the thread in flight, xnsched_finish_unlocked_switch()
	 * will put the thread on the remote runqueue.
	 */
	xnthread_set_state(thread, XNMIGRATE);
#else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
	/* Move thread to the remote runnable queue. */
	xnsched_putback(thread);
#endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
}
Ejemplo n.º 10
0
static void xnsynch_clear_boost(struct xnsynch *synch,
				struct xnthread *owner)
{
	struct xnthread *target;
	struct xnsynch *hsynch;
	struct xnpholder *h;
	int wprio;

	removepq(&owner->claimq, &synch->link);
	__clrbits(synch->status, XNSYNCH_CLAIMED);
	wprio = w_bprio(owner);

	if (emptypq_p(&owner->claimq)) {
		xnthread_clear_state(owner, XNBOOST);
		target = owner;
	} else {
		/* Find the highest priority needed to enforce the PIP. */
		hsynch = link2synch(getheadpq(&owner->claimq));
		h = getheadpq(&hsynch->pendq);
		XENO_BUGON(NUCLEUS, h == NULL);
		target = link2thread(h, plink);
		if (w_cprio(target) > wprio)
			wprio = w_cprio(target);
		else
			target = owner;
	}

	if (w_cprio(owner) != wprio &&
	    !xnthread_test_state(owner, XNZOMBIE))
		xnsynch_renice_thread(owner, target);
}
Ejemplo n.º 11
0
static struct xnthread *
xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	xnhandle_t lastownerh, newownerh;
	struct xnthread *newowner;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(lastowner, XNOTHER)) {
		if (xnthread_get_rescnt(lastowner) == 0)
			xnshadow_send_sig(lastowner, SIGDEBUG,
					  SIGDEBUG_MIGRATE_PRIOINV, 1);
		else
			xnthread_dec_rescnt(lastowner);
	}
#endif
	lastownerh = xnthread_handle(lastowner);

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Ejemplo n.º 12
0
int *xnthread_get_errno_location(xnthread_t *thread)
{
	static int fallback_errno;

	if (unlikely(!xnpod_active_p()))
		return &fallback_errno;

#ifndef __XENO_SIM__
	if (xnthread_test_state(thread, XNSHADOW))
		return &thread->errcode;

	if (xnthread_test_state(thread, XNROOT))
		return &xnshadow_errno(current);
#endif /* !__XENO_SIM__ */

	return &thread->errcode;
}
Ejemplo n.º 13
0
int *xnthread_get_errno_location(xnthread_t *thread)
{
	static int fallback_errno;

	if (unlikely(!xnpod_active_p()))
		return &fallback_errno;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNSHADOW))
		return &thread->errcode;

	if (xnthread_test_state(thread, XNROOT))
		return &xnshadow_errno(current);
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	return &thread->errcode;
}
Ejemplo n.º 14
0
/* NOTE: caller must provide locking */
void xnthread_prepare_wait(struct xnthread_wait_context *wc)
{
	struct xnthread *curr = xnpod_current_thread();

	curr->wcontext = wc;
	wc->oldstate = xnthread_test_state(curr, XNDEFCAN);
	xnthread_set_state(curr, XNDEFCAN);
}
Ejemplo n.º 15
0
static void xnsynch_renice_thread(xnthread_t *thread, int prio)
{
	thread->cprio = prio;

	if (thread->wchan)
		/* Ignoring the XNSYNCH_DREORD flag on purpose here. */
		xnsynch_renice_sleeper(thread);
	else if (thread != xnpod_current_thread() &&
		 xnthread_test_state(thread, XNREADY))
		/* xnpod_resume_thread() must be called for runnable
		   threads but the running one. */
		xnpod_resume_thread(thread, 0);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNRELAX))
		xnshadow_renice(thread);
#endif /* CONFIG_XENO_OPT_PERVASIVE */
}
Ejemplo n.º 16
0
/*
 * Must be called with nklock locked, interrupts off. Thread may be
 * blocked.
 */
void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
{
	migrate_thread(thread, sched);

	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
		xnsched_requeue(thread);
		xnthread_set_state(thread, XNREADY);
	}
}
Ejemplo n.º 17
0
u_long t_start(u_long tid,
	       u_long mode,
	       void (*startaddr) (u_long, u_long, u_long, u_long),
	       u_long targs[])
{
	u_long err = SUCCESS;
	xnflags_t xnmode;
	psostask_t *task;
	spl_t s;
	int n;

	/* We have no error case here: just clear out any unwanted bit. */
	mode &= ~T_START_MASK;

	xnlock_get_irqsave(&nklock, s);

	task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

	if (!task) {
		err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
		goto unlock_and_exit;
	}

	if (!xnthread_test_state(&task->threadbase, XNDORMANT)) {
		err = ERR_ACTIVE;	/* Task already started */
		goto unlock_and_exit;
	}

	xnmode = psos_mode_to_xeno(mode);

	task->entry = startaddr;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(&task->threadbase, XNSHADOW)) {
		memset(task->args, 0, sizeof(task->args));
		/* The shadow will be returned the exact values passed
		 * to t_start(), since the trampoline is performed at
		 * user-space level. We just relay the information
		 * from t_create() to t_start() here.*/
		xnpod_start_thread(&task->threadbase,
				   xnmode,
				   (int)((mode >> 8) & 0x7),
				   XNPOD_ALL_CPUS, (void (*)(void *))startaddr, targs);
	}
Ejemplo n.º 18
0
static int __sc_tecreate(struct task_struct *curr, struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	struct vrtx_arg_bulk bulk;
	int prio, mode, tid, err;
	vrtxtask_t *task;

	if (!__xn_access_ok
	    (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk)))
		return -EFAULT;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(tid)))
		return -EFAULT;

	__xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs),
			    sizeof(bulk));

	/* Suggested task id. */
	tid = bulk.a1;
	/* Task priority. */
	prio = bulk.a2;
	/* Task mode. */
	mode = bulk.a3 | 0x100;

	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	task = xnmalloc(sizeof(*task));

	if (!task) {
		err = ER_TCB;
		goto done;
	}

	xnthread_clear_state(&task->threadbase, XNZOMBIE);

	tid =
	    sc_tecreate_inner(task, NULL, tid, prio, mode, 0, 0, NULL, 0, &err);

	if (tid < 0) {
		if (u_completion)
			xnshadow_signal_completion(u_completion, err);
	} else {
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs),
				  &tid, sizeof(tid));
		err = xnshadow_map(&task->threadbase, u_completion);
	}

	if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE))
		xnfree(task);

      done:

	return err;
}
Ejemplo n.º 19
0
static inline void set_thread_running(struct xnsched *sched,
				      struct xnthread *thread)
{
	xnthread_clear_state(thread, XNREADY);
	if (xnthread_test_state(thread, XNRRB))
		xntimer_start(&sched->rrbtimer,
			      thread->rrperiod, XN_INFINITE, XN_RELATIVE);
	else
		xntimer_stop(&sched->rrbtimer);
}
Ejemplo n.º 20
0
/* Must be called with nklock locked, interrupts off. */
void xnsched_putback(struct xnthread *thread)
{
	if (xnthread_test_state(thread, XNREADY))
		xnsched_dequeue(thread);
	else
		xnthread_set_state(thread, XNREADY);

	xnsched_enqueue(thread);
	xnsched_set_resched(thread->sched);
}
Ejemplo n.º 21
0
static void xnthread_periodic_handler(xntimer_t *timer)
{
	xnthread_t *thread = container_of(timer, xnthread_t, ptimer);
	/*
	 * Prevent unwanted round-robin, and do not wake up threads
	 * blocked on a resource.
	 */
	if (xnthread_test_state(thread, XNDELAY|XNPEND) == XNDELAY)
		xnpod_resume_thread(thread, XNDELAY);
}
Ejemplo n.º 22
0
void xnclock_core_local_shot(struct xnsched *sched)
{
	struct xntimerdata *tmd;
	struct xntimer *timer;
	xnsticks_t delay;
	xntimerq_it_t it;
	xntimerh_t *h;

	/*
	 * Do not reprogram locally when inside the tick handler -
	 * will be done on exit anyway. Also exit if there is no
	 * pending timer.
	 */
	if (sched->status & XNINTCK)
		return;

	tmd = xnclock_this_timerdata(&nkclock);
	h = xntimerq_it_begin(&tmd->q, &it);
	if (h == NULL)
		return;

	/*
	 * Here we try to defer the host tick heading the timer queue,
	 * so that it does not preempt a real-time activity uselessly,
	 * in two cases:
	 *
	 * 1) a rescheduling is pending for the current CPU. We may
	 * assume that a real-time thread is about to resume, so we
	 * want to move the host tick out of the way until the host
	 * kernel resumes, unless there is no other outstanding
	 * timers.
	 *
	 * 2) the current thread is running in primary mode, in which
	 * case we may also defer the host tick until the host kernel
	 * resumes.
	 *
	 * The host tick deferral is cleared whenever Xenomai is about
	 * to yield control to the host kernel (see __xnsched_run()),
	 * or a timer with an earlier timeout date is scheduled,
	 * whichever comes first.
	 */
	sched->lflags &= ~XNHDEFER;
	timer = container_of(h, struct xntimer, aplink);
	if (unlikely(timer == &sched->htimer)) {
		if (xnsched_resched_p(sched) ||
		    !xnthread_test_state(sched->curr, XNROOT)) {
			h = xntimerq_it_next(&tmd->q, &it, h);
			if (h) {
				sched->lflags |= XNHDEFER;
				timer = container_of(h, struct xntimer, aplink);
			}
		}
	}
Ejemplo n.º 23
0
void __xnsched_finalize_zombie(struct xnsched *sched)
{
	struct xnthread *thread = sched->zombie;

	xnthread_cleanup_tcb(thread);

	xnarch_finalize_no_switch(xnthread_archtcb(thread));

	if (xnthread_test_state(sched->curr, XNROOT))
		xnfreesync();

	sched->zombie = NULL;
}
Ejemplo n.º 24
0
/* Must be called with nklock locked, interrupts off. thread may be
 * blocked. */
void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
{
	struct xnsched_class *sched_class = thread->sched_class;

	if (xnthread_test_state(thread, XNREADY)) {
		xnsched_dequeue(thread);
		xnthread_clear_state(thread, XNREADY);
	}

	if (sched_class->sched_migrate)
		sched_class->sched_migrate(thread, sched);
	/*
	 * WARNING: the scheduling class may have just changed as a
	 * result of calling the per-class migration hook.
	 */
	xnsched_set_resched(thread->sched);
	thread->sched = sched;

	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
		xnsched_requeue(thread);
		xnthread_set_state(thread, XNREADY);
	}
}
Ejemplo n.º 25
0
void xnintr_clock_handler(void)
{
	xnstat_exectime_t *prev;
	struct xnsched *sched;
	unsigned cpu;

	cpu = xnarch_current_cpu();

	if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) {
		xnarch_relay_tick();
		return;
	}

	sched = xnpod_sched_slot(cpu);

	prev = xnstat_exectime_switch(sched,
		&nkclock.stat[xnsched_cpu(sched)].account);
	xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits);

	trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ);
	trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&nklock);
	xntimer_tick_aperiodic();
	xnlock_put(&nklock);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
		sched = xnpod_current_sched();
	}
	/*
	 * If the clock interrupt preempted a real-time thread, any
	 * transition to the root thread has already triggered a host
	 * tick propagation from xnpod_schedule(), so at this point,
	 * we only need to propagate the host tick in case the
	 * interrupt preempted the root thread.
	 */
	if (testbits(sched->lflags, XNHTICK) &&
	    xnthread_test_state(sched->curr, XNROOT))
		xnintr_host_tick(sched);

	trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ);
}
Ejemplo n.º 26
0
/**
 * @internal
 * @fn void watchdog_handler(struct xntimer *timer)
 * @brief Process watchdog ticks.
 *
 * This internal routine handles incoming watchdog ticks to detect
 * software lockups. It kills any offending thread which is found to
 * monopolize the CPU so as to starve the Linux kernel for too long.
 *
 * @coretags{coreirq-only, atomic-entry}
 */
static void watchdog_handler(struct xntimer *timer)
{
	struct xnsched *sched = xnsched_current();
	struct xnthread *curr = sched->curr;

	if (likely(xnthread_test_state(curr, XNROOT))) {
		xnsched_reset_watchdog(sched);
		return;
	}

	if (likely(++sched->wdcount < wd_timeout_arg))
		return;

	trace_cobalt_watchdog_signal(curr);

	if (xnthread_test_state(curr, XNUSER)) {
		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
		       "'%s' signaled\n", xnsched_cpu(sched), curr->name);
		xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
	} else {
		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
		       "'%s' canceled\n", xnsched_cpu(sched), curr->name);
		/*
		 * On behalf on an IRQ handler, xnthread_cancel()
		 * would go half way cancelling the preempted
		 * thread. Therefore we manually raise XNKICKED to
		 * cause the next call to xnthread_suspend() to return
		 * early in XNBREAK condition, and XNCANCELD so that
		 * @thread exits next time it invokes
		 * xnthread_test_cancel().
		 */
		xnthread_set_info(curr, XNKICKED|XNCANCELD);
	}

	xnsched_reset_watchdog(sched);
}
Ejemplo n.º 27
0
/* NOTE: caller must provide locking */
void xnthread_finish_wait(struct xnthread_wait_context *wc,
			  void (*cleanup)(struct xnthread_wait_context *wc))
{
	struct xnthread *curr = xnpod_current_thread();

	curr->wcontext = NULL;
	if ((wc->oldstate & XNDEFCAN) == 0)
		xnthread_clear_state(curr, XNDEFCAN);

	if (xnthread_test_state(curr, XNCANPND)) {
		if (cleanup)
			cleanup(wc);
		xnpod_delete_self();
	}
}
Ejemplo n.º 28
0
static void __task_delete_hook(xnthread_t *thread)
{
	RT_TASK *task;

	if (xnthread_get_magic(thread) != RTAI_SKIN_MAGIC)
		return;

	task = thread2rtask(thread);

	removeq(&__rtai_task_q, &task->link);

	rtai_mark_deleted(task);

	if (xnthread_test_state(&task->thread_base, XNSHADOW))
		xnheap_schedule_free(&kheap, task, &task->link);
}
Ejemplo n.º 29
0
xnticks_t xnthread_get_period(xnthread_t *thread)
{
	xnticks_t period = 0;
	/*
	 * The current thread period might be:
	 * - the value of the timer interval for periodic threads (ns/ticks)
	 * - or, the value of the alloted round-robin quantum (ticks)
	 * - or zero, meaning "no periodic activity".
	 */
	if (xntimer_running_p(&thread->ptimer))
		period = xntimer_get_interval(&thread->ptimer);
	else if (xnthread_test_state(thread,XNRRB))
		period = xnthread_time_slice(thread);

	return period;
}
Ejemplo n.º 30
0
void xnsynch_requeue_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
	owner = synch->owner;

	if (owner != NULL && w_cprio(thread) > w_cprio(owner)) {
		/*
		 * The new (weighted) priority of the sleeping thread
		 * is higher than the priority of the current owner of
		 * the resource: we need to update the PI state.
		 */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/*
			 * The resource is already claimed, just
			 * reorder the claim queue.
			 */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
		} else {
			/*
			 * The resource was NOT claimed, claim it now
			 * and boost the owner.
			 */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}
		}
		/*
		 * Renice the owner thread, progressing in the PI
		 * chain as needed.
		 */
		xnsynch_renice_thread(owner, thread);
	}
}