void xntslave_init(xntslave_t *slave) { int nr_cpus, cpu, n; for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) { struct percpu_cascade *pc = &slave->cascade[cpu]; for (n = 0; n < XNTIMER_WHEELSIZE; n++) xntlist_init(&pc->wheel[n]); /* Slave periodic time bases are cascaded from the * master aperiodic time base. */ xntimer_init(&pc->timer, &nktbase, xntimer_tick_periodic); xntimer_set_name(&pc->timer, slave->base.name); xntimer_set_priority(&pc->timer, XNTIMER_HIPRIO); xntimer_set_sched(&pc->timer, xnpod_sched_slot(cpu)); } }
void xnsched_init(struct xnsched *sched, int cpu) { char rrbtimer_name[XNOBJECT_NAME_LEN]; char htimer_name[XNOBJECT_NAME_LEN]; char root_name[XNOBJECT_NAME_LEN]; union xnsched_policy_param param; struct xnthread_init_attr attr; struct xnsched_class *p; #ifdef CONFIG_SMP sched->cpu = cpu; ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu); ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu); ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu); cpus_clear(sched->resched); #else strcpy(htimer_name, "[host-timer]"); strcpy(rrbtimer_name, "[rrb-timer]"); strcpy(root_name, "ROOT"); #endif for_each_xnsched_class(p) { if (p->sched_init) p->sched_init(sched); } sched->status = 0; sched->lflags = 0; sched->inesting = 0; sched->curr = &sched->rootcb; attr.flags = XNROOT | XNFPU; attr.name = root_name; attr.personality = &xenomai_personality; attr.affinity = cpumask_of_cpu(cpu); param.idle.prio = XNSCHED_IDLE_PRIO; __xnthread_init(&sched->rootcb, &attr, sched, &xnsched_class_idle, ¶m); /* * No direct handler here since the host timer processing is * postponed to xnintr_irq_handler(), as part of the interrupt * exit code. */ xntimer_init(&sched->htimer, &nkclock, NULL, sched, XNTIMER_IGRAVITY); xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO); xntimer_set_name(&sched->htimer, htimer_name); xntimer_init(&sched->rrbtimer, &nkclock, roundrobin_handler, sched, XNTIMER_IGRAVITY); xntimer_set_name(&sched->rrbtimer, rrbtimer_name); xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO); xnstat_exectime_set_current(sched, &sched->rootcb.stat.account); #ifdef CONFIG_XENO_ARCH_FPU sched->fpuholder = &sched->rootcb; #endif /* CONFIG_XENO_ARCH_FPU */ xnthread_init_root_tcb(&sched->rootcb); list_add_tail(&sched->rootcb.glink, &nkthreadq); cobalt_nrthreads++; #ifdef CONFIG_XENO_OPT_WATCHDOG xntimer_init(&sched->wdtimer, &nkclock, watchdog_handler, sched, XNTIMER_NOBLCK|XNTIMER_IGRAVITY); xntimer_set_name(&sched->wdtimer, "[watchdog]"); xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO); #endif /* CONFIG_XENO_OPT_WATCHDOG */ }
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param) { unsigned int stacksize = attr->stacksize; xnflags_t flags = attr->flags; struct xnarchtcb *tcb; int ret; /* Setup the TCB. */ tcb = xnthread_archtcb(thread); xnarch_init_tcb(tcb); flags &= ~XNSUSP; #ifndef CONFIG_XENO_HW_FPU flags &= ~XNFPU; #endif #ifdef __XENO_SIM__ flags &= ~XNSHADOW; #endif if (flags & (XNSHADOW|XNROOT)) stacksize = 0; else { if (stacksize == 0) /* Pick a reasonable default. */ stacksize = XNARCH_THREAD_STACKSZ; /* Align stack size on a natural word boundary */ stacksize &= ~(sizeof(long) - 1); } if (flags & XNROOT) thread->idtag = 0; else thread->idtag = ++idtags ?: 1; #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0 #ifndef __XENO_SIM__ if (stacksize > 0) { xnlogerr("%s: cannot create kernel thread '%s' (CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0)\n", __FUNCTION__, attr->name); return -ENOMEM; } #endif #else ret = xnarch_alloc_stack(tcb, stacksize); if (ret) { xnlogerr("%s: no stack for kernel thread '%s' (raise CONFIG_XENO_OPT_SYS_STACKPOOLSZ)\n", __FUNCTION__, attr->name); return ret; } #endif if (stacksize) memset(xnarch_stack_base(tcb), 0, stacksize); if (attr->name) xnobject_copy_name(thread->name, attr->name); else snprintf(thread->name, sizeof(thread->name), "%p", thread); xntimer_init(&thread->rtimer, attr->tbase, xnthread_timeout_handler); xntimer_set_name(&thread->rtimer, thread->name); xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO); xntimer_init(&thread->ptimer, attr->tbase, xnthread_periodic_handler); xntimer_set_name(&thread->ptimer, thread->name); xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO); thread->state = flags; thread->info = 0; thread->schedlck = 0; thread->signals = 0; thread->asrmode = 0; thread->asrimask = 0; thread->asr = XNTHREAD_INVALID_ASR; thread->asrlevel = 0; thread->ops = attr->ops; thread->rrperiod = XN_INFINITE; thread->rrcredit = XN_INFINITE; thread->wchan = NULL; thread->wwake = NULL; thread->wcontext = NULL; thread->hrescnt = 0; thread->errcode = 0; thread->registry.handle = XN_NO_HANDLE; thread->registry.waitkey = NULL; memset(&thread->stat, 0, sizeof(thread->stat)); /* These will be filled by xnpod_start_thread() */ thread->imask = 0; thread->imode = 0; thread->entry = NULL; thread->cookie = 0; inith(&thread->glink); initph(&thread->rlink); initph(&thread->plink); #ifdef CONFIG_XENO_OPT_PRIOCPL initph(&thread->xlink); thread->rpi = NULL; #endif /* CONFIG_XENO_OPT_PRIOCPL */ #ifdef CONFIG_XENO_OPT_SELECT thread->selector = NULL; #endif /* CONFIG_XENO_OPT_SELECT */ initpq(&thread->claimq); thread->sched = sched; thread->init_class = sched_class; thread->base_class = NULL; /* xnsched_set_policy() will set it. */ thread->init_schedparam = *sched_param; ret = xnsched_init_tcb(thread); if (ret) goto fail; /* * We must set the scheduling policy last; the scheduling * class implementation code may need the TCB to be fully * initialized to proceed. */ ret = xnsched_set_policy(thread, sched_class, sched_param); if (ret) goto fail; xnarch_init_display_context(thread); return 0; fail: #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0 xnarch_free_stack(tcb); #endif return ret; }
void xnsched_init(struct xnsched *sched, int cpu) { char htimer_name[XNOBJECT_NAME_LEN]; char root_name[XNOBJECT_NAME_LEN]; union xnsched_policy_param param; struct xnthread_init_attr attr; struct xnsched_class *p; sched->cpu = cpu; for_each_xnsched_class(p) { if (p->sched_init) p->sched_init(sched); } #ifdef CONFIG_SMP sprintf(htimer_name, "[host-timer/%u]", cpu); sprintf(root_name, "ROOT/%u", cpu); #else strcpy(htimer_name, "[host-timer]"); strcpy(root_name, "ROOT"); #endif sched->status = 0; sched->inesting = 0; sched->curr = &sched->rootcb; #ifdef CONFIG_XENO_OPT_PRIOCPL xnlock_init(&sched->rpilock); #endif /* * No direct handler here since the host timer processing is * postponed to xnintr_irq_handler(), as part of the interrupt * exit code. */ xntimer_init(&sched->htimer, &nktbase, NULL); xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO); xntimer_set_name(&sched->htimer, htimer_name); xntimer_set_sched(&sched->htimer, sched); sched->zombie = NULL; xnarch_cpus_clear(sched->resched); attr.flags = XNROOT | XNSTARTED | XNFPU; attr.name = root_name; attr.stacksize = 0; attr.tbase = &nktbase; attr.ops = NULL; param.idle.prio = XNSCHED_IDLE_PRIO; xnthread_init(&sched->rootcb, &attr, sched, &xnsched_class_idle, ¶m); sched->rootcb.affinity = xnarch_cpumask_of_cpu(cpu); xnstat_exectime_set_current(sched, &sched->rootcb.stat.account); #ifdef CONFIG_XENO_HW_FPU sched->fpuholder = &sched->rootcb; #endif /* CONFIG_XENO_HW_FPU */ xnarch_init_root_tcb(xnthread_archtcb(&sched->rootcb), &sched->rootcb, xnthread_name(&sched->rootcb)); #ifdef CONFIG_XENO_OPT_WATCHDOG xntimer_init(&sched->wdtimer, &nktbase, xnsched_watchdog_handler); xntimer_set_name(&sched->wdtimer, "[watchdog]"); xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO); xntimer_set_sched(&sched->wdtimer, sched); #endif /* CONFIG_XENO_OPT_WATCHDOG */ xntimerq_init(&sched->timerqueue); }