Ejemplo n.º 1
0
void xnsynch_init(xnsynch_t *synch, xnflags_t flags)
{
	initph(&synch->link);

	if (flags & XNSYNCH_PIP)
		flags |= XNSYNCH_PRIO;	/* Obviously... */

	synch->status = flags & ~XNSYNCH_CLAIMED;
	synch->owner = NULL;
	synch->cleanup = NULL;	/* Only works for PIP-enabled objects. */
	initpq(&synch->pendq);
	xnarch_init_display_context(synch);
}
Ejemplo n.º 2
0
void __xntimer_init(xntimer_t *timer, xntbase_t *base,
		    void (*handler) (xntimer_t *timer))
{
	/* CAUTION: Setup from xntimer_init() must not depend on the
	   periodic/aperiodic timing mode. */

	xntimerh_init(&timer->aplink);
	xntimerh_date(&timer->aplink) = XN_INFINITE;
#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
	timer->base = base;
	xntlholder_init(&timer->plink);
	xntlholder_date(&timer->plink) = XN_INFINITE;
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
	xntimer_set_priority(timer, XNTIMER_STDPRIO);
	timer->status = XNTIMER_DEQUEUED;
	timer->handler = handler;
	timer->interval = 0;
	timer->sched = xnpod_current_sched();

#ifdef CONFIG_XENO_OPT_STATS
	{
		spl_t s;

		if (!xnpod_current_thread() || xnpod_shadow_p())
			snprintf(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
				 current->pid, current->comm);
		else
			xnobject_copy_name(timer->name,
					   xnpod_current_thread()->name);

		inith(&timer->tblink);
		xnstat_counter_set(&timer->scheduled, 0);
		xnstat_counter_set(&timer->fired, 0);

		xnlock_get_irqsave(&nklock, s);
		appendq(&base->timerq, &timer->tblink);
		base->timerq_rev++;
		xnlock_put_irqrestore(&nklock, s);
	}
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */

	xnarch_init_display_context(timer);
}
Ejemplo n.º 3
0
void xnsynch_init(struct xnsynch *synch, xnflags_t flags, xnarch_atomic_t *fastlock)
{
	initph(&synch->link);

	if (flags & XNSYNCH_PIP)
		flags |= XNSYNCH_PRIO | XNSYNCH_OWNER;	/* Obviously... */

	synch->status = flags & ~XNSYNCH_CLAIMED;
	synch->owner = NULL;
	synch->cleanup = NULL;	/* Only works for PIP-enabled objects. */
#ifdef CONFIG_XENO_FASTSYNCH
	if ((flags & XNSYNCH_OWNER) && fastlock) {
		synch->fastlock = fastlock;
		xnarch_atomic_set(fastlock, XN_NO_HANDLE);
	} else
		synch->fastlock = NULL;
#endif /* CONFIG_XENO_FASTSYNCH */
	initpq(&synch->pendq);
	xnarch_init_display_context(synch);
}
Ejemplo n.º 4
0
int xnthread_init(struct xnthread *thread,
		  const struct xnthread_init_attr *attr,
		  struct xnsched *sched,
		  struct xnsched_class *sched_class,
		  const union xnsched_policy_param *sched_param)
{
	unsigned int stacksize = attr->stacksize;
	xnflags_t flags = attr->flags;
	struct xnarchtcb *tcb;
	int ret;

	/* Setup the TCB. */
	tcb = xnthread_archtcb(thread);
	xnarch_init_tcb(tcb);

	flags &= ~XNSUSP;
#ifndef CONFIG_XENO_HW_FPU
	flags &= ~XNFPU;
#endif
#ifdef __XENO_SIM__
	flags &= ~XNSHADOW;
#endif
	if (flags & (XNSHADOW|XNROOT))
		stacksize = 0;
	else {
		if (stacksize == 0) /* Pick a reasonable default. */
			stacksize = XNARCH_THREAD_STACKSZ;
		/* Align stack size on a natural word boundary */
		stacksize &= ~(sizeof(long) - 1);
	}

	if (flags & XNROOT)
		thread->idtag = 0;
	else
		thread->idtag = ++idtags ?: 1;

#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0
#ifndef __XENO_SIM__
	if (stacksize > 0) {
		xnlogerr("%s: cannot create kernel thread '%s' (CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0)\n",
			 __FUNCTION__, attr->name);
		return -ENOMEM;
	}
#endif
#else
	ret = xnarch_alloc_stack(tcb, stacksize);
	if (ret) {
		xnlogerr("%s: no stack for kernel thread '%s' (raise CONFIG_XENO_OPT_SYS_STACKPOOLSZ)\n",
			 __FUNCTION__, attr->name);
		return ret;
	}
#endif
	if (stacksize)
		memset(xnarch_stack_base(tcb), 0, stacksize);

	if (attr->name)
		xnobject_copy_name(thread->name, attr->name);
	else
		snprintf(thread->name, sizeof(thread->name), "%p", thread);

	xntimer_init(&thread->rtimer, attr->tbase, xnthread_timeout_handler);
	xntimer_set_name(&thread->rtimer, thread->name);
	xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO);
	xntimer_init(&thread->ptimer, attr->tbase, xnthread_periodic_handler);
	xntimer_set_name(&thread->ptimer, thread->name);
	xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);

	thread->state = flags;
	thread->info = 0;
	thread->schedlck = 0;
	thread->signals = 0;
	thread->asrmode = 0;
	thread->asrimask = 0;
	thread->asr = XNTHREAD_INVALID_ASR;
	thread->asrlevel = 0;

	thread->ops = attr->ops;
	thread->rrperiod = XN_INFINITE;
	thread->rrcredit = XN_INFINITE;
	thread->wchan = NULL;
	thread->wwake = NULL;
	thread->wcontext = NULL;
	thread->hrescnt = 0;
	thread->errcode = 0;
	thread->registry.handle = XN_NO_HANDLE;
	thread->registry.waitkey = NULL;
	memset(&thread->stat, 0, sizeof(thread->stat));

	/* These will be filled by xnpod_start_thread() */
	thread->imask = 0;
	thread->imode = 0;
	thread->entry = NULL;
	thread->cookie = 0;

	inith(&thread->glink);
	initph(&thread->rlink);
	initph(&thread->plink);
#ifdef CONFIG_XENO_OPT_PRIOCPL
	initph(&thread->xlink);
	thread->rpi = NULL;
#endif /* CONFIG_XENO_OPT_PRIOCPL */
#ifdef CONFIG_XENO_OPT_SELECT
	thread->selector = NULL;
#endif /* CONFIG_XENO_OPT_SELECT */
	initpq(&thread->claimq);

	thread->sched = sched;
	thread->init_class = sched_class;
	thread->base_class = NULL; /* xnsched_set_policy() will set it. */
	thread->init_schedparam = *sched_param;
	ret = xnsched_init_tcb(thread);
	if (ret)
		goto fail;

	/*
	 * We must set the scheduling policy last; the scheduling
	 * class implementation code may need the TCB to be fully
	 * initialized to proceed.
	 */
	ret = xnsched_set_policy(thread, sched_class, sched_param);
	if (ret)
		goto fail;

	xnarch_init_display_context(thread);

	return 0;

fail:
#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
	xnarch_free_stack(tcb);
#endif
	return ret;
}