示例#1
0
static inline void enter_root(struct xnthread *root)
{
	struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);

#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
	if (rootcb->core.mm == NULL)
		set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
#endif
	ipipe_unmute_pic();
}
示例#2
0
void xnthread_cleanup_tcb(xnthread_t *thread)
{
	/* Does not wreck the TCB, only releases the held resources. */

#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
	xnarch_free_stack(xnthread_archtcb(thread));
#endif
	if (thread->registry.handle != XN_NO_HANDLE)
		xnregistry_remove(thread->registry.handle);

	thread->registry.handle = XN_NO_HANDLE;
}
示例#3
0
void __xnsched_finalize_zombie(struct xnsched *sched)
{
	struct xnthread *thread = sched->zombie;

	xnthread_cleanup_tcb(thread);

	xnarch_finalize_no_switch(xnthread_archtcb(thread));

	if (xnthread_test_state(sched->curr, XNROOT))
		xnfreesync();

	sched->zombie = NULL;
}
示例#4
0
static inline void leave_root(struct xnthread *root)
{
	struct xnarchtcb *rootcb = xnthread_archtcb(root);
	struct task_struct *p = current;

	ipipe_notify_root_preemption();
	ipipe_mute_pic();
	/* Remember the preempted Linux task pointer. */
	rootcb->core.host_task = p;
	rootcb->core.tsp = &p->thread;
	rootcb->core.mm = rootcb->core.active_mm = ipipe_get_active_mm();
#ifdef CONFIG_XENO_ARCH_WANT_TIP
	rootcb->core.tip = task_thread_info(p);
#endif
	xnarch_leave_root(root);
}
示例#5
0
/*! 
 * @internal
 * \fn void xnsched_renice_root(struct xnsched *sched, struct xnthread *target)
 * \brief Change the root thread priority.
 *
 * xnsched_renice_root() updates the current priority of the root
 * thread for the given scheduler slot. This may lead to changing the
 * scheduling class of the root thread.
 */
void xnsched_renice_root(struct xnsched *sched, struct xnthread *target)
{
	struct xnthread *root = &sched->rootcb;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (target == NULL)
		target = root;

	xnsched_track_policy(root, target);

	trace_mark(xn_nucleus, sched_reniceroot, MARK_NOARGS);
	xnarch_trace_pid(xnarch_user_pid(xnthread_archtcb(root)), root->cprio);

	xnlock_put_irqrestore(&nklock, s);
}
示例#6
0
int xnthread_init(struct xnthread *thread,
		  const struct xnthread_init_attr *attr,
		  struct xnsched *sched,
		  struct xnsched_class *sched_class,
		  const union xnsched_policy_param *sched_param)
{
	unsigned int stacksize = attr->stacksize;
	xnflags_t flags = attr->flags;
	struct xnarchtcb *tcb;
	int ret;

	/* Setup the TCB. */
	tcb = xnthread_archtcb(thread);
	xnarch_init_tcb(tcb);

	flags &= ~XNSUSP;
#ifndef CONFIG_XENO_HW_FPU
	flags &= ~XNFPU;
#endif
#ifdef __XENO_SIM__
	flags &= ~XNSHADOW;
#endif
	if (flags & (XNSHADOW|XNROOT))
		stacksize = 0;
	else {
		if (stacksize == 0) /* Pick a reasonable default. */
			stacksize = XNARCH_THREAD_STACKSZ;
		/* Align stack size on a natural word boundary */
		stacksize &= ~(sizeof(long) - 1);
	}

	if (flags & XNROOT)
		thread->idtag = 0;
	else
		thread->idtag = ++idtags ?: 1;

#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0
#ifndef __XENO_SIM__
	if (stacksize > 0) {
		xnlogerr("%s: cannot create kernel thread '%s' (CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0)\n",
			 __FUNCTION__, attr->name);
		return -ENOMEM;
	}
#endif
#else
	ret = xnarch_alloc_stack(tcb, stacksize);
	if (ret) {
		xnlogerr("%s: no stack for kernel thread '%s' (raise CONFIG_XENO_OPT_SYS_STACKPOOLSZ)\n",
			 __FUNCTION__, attr->name);
		return ret;
	}
#endif
	if (stacksize)
		memset(xnarch_stack_base(tcb), 0, stacksize);

	if (attr->name)
		xnobject_copy_name(thread->name, attr->name);
	else
		snprintf(thread->name, sizeof(thread->name), "%p", thread);

	xntimer_init(&thread->rtimer, attr->tbase, xnthread_timeout_handler);
	xntimer_set_name(&thread->rtimer, thread->name);
	xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO);
	xntimer_init(&thread->ptimer, attr->tbase, xnthread_periodic_handler);
	xntimer_set_name(&thread->ptimer, thread->name);
	xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);

	thread->state = flags;
	thread->info = 0;
	thread->schedlck = 0;
	thread->signals = 0;
	thread->asrmode = 0;
	thread->asrimask = 0;
	thread->asr = XNTHREAD_INVALID_ASR;
	thread->asrlevel = 0;

	thread->ops = attr->ops;
	thread->rrperiod = XN_INFINITE;
	thread->rrcredit = XN_INFINITE;
	thread->wchan = NULL;
	thread->wwake = NULL;
	thread->wcontext = NULL;
	thread->hrescnt = 0;
	thread->errcode = 0;
	thread->registry.handle = XN_NO_HANDLE;
	thread->registry.waitkey = NULL;
	memset(&thread->stat, 0, sizeof(thread->stat));

	/* These will be filled by xnpod_start_thread() */
	thread->imask = 0;
	thread->imode = 0;
	thread->entry = NULL;
	thread->cookie = 0;

	inith(&thread->glink);
	initph(&thread->rlink);
	initph(&thread->plink);
#ifdef CONFIG_XENO_OPT_PRIOCPL
	initph(&thread->xlink);
	thread->rpi = NULL;
#endif /* CONFIG_XENO_OPT_PRIOCPL */
#ifdef CONFIG_XENO_OPT_SELECT
	thread->selector = NULL;
#endif /* CONFIG_XENO_OPT_SELECT */
	initpq(&thread->claimq);

	thread->sched = sched;
	thread->init_class = sched_class;
	thread->base_class = NULL; /* xnsched_set_policy() will set it. */
	thread->init_schedparam = *sched_param;
	ret = xnsched_init_tcb(thread);
	if (ret)
		goto fail;

	/*
	 * We must set the scheduling policy last; the scheduling
	 * class implementation code may need the TCB to be fully
	 * initialized to proceed.
	 */
	ret = xnsched_set_policy(thread, sched_class, sched_param);
	if (ret)
		goto fail;

	xnarch_init_display_context(thread);

	return 0;

fail:
#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
	xnarch_free_stack(tcb);
#endif
	return ret;
}
示例#7
0
void xnsched_init(struct xnsched *sched, int cpu)
{
	char htimer_name[XNOBJECT_NAME_LEN];
	char root_name[XNOBJECT_NAME_LEN];
	union xnsched_policy_param param;
	struct xnthread_init_attr attr;
	struct xnsched_class *p;

	sched->cpu = cpu;

	for_each_xnsched_class(p) {
		if (p->sched_init)
			p->sched_init(sched);
	}

#ifdef CONFIG_SMP
	sprintf(htimer_name, "[host-timer/%u]", cpu);
	sprintf(root_name, "ROOT/%u", cpu);
#else
	strcpy(htimer_name, "[host-timer]");
	strcpy(root_name, "ROOT");
#endif
	sched->status = 0;
	sched->inesting = 0;
	sched->curr = &sched->rootcb;
#ifdef CONFIG_XENO_OPT_PRIOCPL
	xnlock_init(&sched->rpilock);
#endif
	/*
	 * No direct handler here since the host timer processing is
	 * postponed to xnintr_irq_handler(), as part of the interrupt
	 * exit code.
	 */
	xntimer_init(&sched->htimer, &nktbase, NULL);
	xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
	xntimer_set_name(&sched->htimer, htimer_name);
	xntimer_set_sched(&sched->htimer, sched);
	sched->zombie = NULL;
	xnarch_cpus_clear(sched->resched);

	attr.flags = XNROOT | XNSTARTED | XNFPU;
	attr.name = root_name;
	attr.stacksize = 0;
	attr.tbase = &nktbase;
	attr.ops = NULL;
	param.idle.prio = XNSCHED_IDLE_PRIO;

	xnthread_init(&sched->rootcb, &attr,
		      sched, &xnsched_class_idle, &param);

	sched->rootcb.affinity = xnarch_cpumask_of_cpu(cpu);
	xnstat_exectime_set_current(sched, &sched->rootcb.stat.account);
#ifdef CONFIG_XENO_HW_FPU
	sched->fpuholder = &sched->rootcb;
#endif /* CONFIG_XENO_HW_FPU */

	xnarch_init_root_tcb(xnthread_archtcb(&sched->rootcb),
			     &sched->rootcb,
			     xnthread_name(&sched->rootcb));

#ifdef CONFIG_XENO_OPT_WATCHDOG
	xntimer_init(&sched->wdtimer, &nktbase, xnsched_watchdog_handler);
	xntimer_set_name(&sched->wdtimer, "[watchdog]");
	xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO);
	xntimer_set_sched(&sched->wdtimer, sched);
#endif /* CONFIG_XENO_OPT_WATCHDOG */
	xntimerq_init(&sched->timerqueue);
}