static int affinity_write_proc(struct file *file, const char __user * buffer, unsigned long count, void *data) { char *end, buf[16]; unsigned long val; xnarch_cpumask_t new_affinity; int n, cpu; n = count > sizeof(buf) - 1 ? sizeof(buf) - 1 : count; if (copy_from_user(buf, buffer, n)) return -EFAULT; buf[n] = '\0'; val = simple_strtol(buf, &end, 0); if (*end != '\0' && !isspace(*end)) return -EINVAL; xnarch_cpus_clear(new_affinity); for (cpu = 0; cpu < sizeof(val) * 8; cpu++, val >>= 1) if (val & 1) xnarch_cpu_set(cpu, new_affinity); xnarch_cpus_and(nkaffinity, new_affinity, xnarch_supported_cpus); return count; }
static ssize_t affinity_vfile_store(struct xnvfile_input *input) { xnarch_cpumask_t new_affinity; ssize_t ret; long val; int cpu; ret = xnvfile_get_integer(input, &val); if (ret < 0) return ret; xnarch_cpus_clear(new_affinity); for (cpu = 0; cpu < BITS_PER_LONG; cpu++, val >>= 1) if (val & 1) xnarch_cpu_set(cpu, new_affinity); xnarch_cpus_and(nkaffinity, new_affinity, xnarch_supported_cpus); return ret; }
int rt_task_init(RT_TASK *task, void (*body) (int), int cookie, int stack_size, int priority, int uses_fpu, void (*sigfn) (void)) { union xnsched_policy_param param; struct xnthread_start_attr sattr; struct xnthread_init_attr iattr; xnflags_t bflags = 0; int ret; spl_t s; if (priority < XNSCHED_LOW_PRIO || priority > XNSCHED_HIGH_PRIO || task->magic == RTAI_TASK_MAGIC) return -EINVAL; priority = XNSCHED_HIGH_PRIO - priority + 1; /* Normalize. */ if (uses_fpu) #ifdef CONFIG_XENO_HW_FPU bflags |= XNFPU; #else /* !CONFIG_XENO_HW_FPU */ return -EINVAL; #endif /* CONFIG_XENO_HW_FPU */ iattr.tbase = rtai_tbase; iattr.name = NULL; iattr.flags = bflags; iattr.ops = &__rtai_task_ops; iattr.stacksize = stack_size; param.rt.prio = priority; if (xnpod_init_thread(&task->thread_base, &iattr, &xnsched_class_rt, ¶m) != 0) /* Assume this is the only possible failure. */ return -ENOMEM; xnarch_cpus_clear(task->affinity); inith(&task->link); task->suspend_depth = 1; task->cookie = cookie; task->body = body; task->sigfn = sigfn; if (xnarch_cpus_empty(task->affinity)) task->affinity = XNPOD_ALL_CPUS; xnlock_get_irqsave(&nklock, s); sattr.mode = XNSUSP; /* Suspend on startup. */ sattr.imask = 0; sattr.affinity = task->affinity; sattr.entry = rt_task_trampoline; sattr.cookie = task; ret = xnpod_start_thread(&task->thread_base, &sattr); if (ret) goto unlock_and_exit; task->magic = RTAI_TASK_MAGIC; appendq(&__rtai_task_q, &task->link); #ifdef CONFIG_XENO_FASTSYNCH /* We need an anonymous registry entry to obtain a handle for fast mutex locking. */ ret = xnthread_register(&task->thread_base, ""); if (ret) { xnpod_abort_thread(&task->thread_base); goto unlock_and_exit; } #endif /* CONFIG_XENO_FASTSYNCH */ /* Add a switch hook only if a signal function has been declared at least once for some created task. */ if (sigfn != NULL && __rtai_task_sig++ == 0) xnpod_add_hook(XNHOOK_THREAD_SWITCH, &__task_switch_hook); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret ? -EINVAL : 0; }
void xnsched_init(struct xnsched *sched, int cpu) { char htimer_name[XNOBJECT_NAME_LEN]; char root_name[XNOBJECT_NAME_LEN]; union xnsched_policy_param param; struct xnthread_init_attr attr; struct xnsched_class *p; sched->cpu = cpu; for_each_xnsched_class(p) { if (p->sched_init) p->sched_init(sched); } #ifdef CONFIG_SMP sprintf(htimer_name, "[host-timer/%u]", cpu); sprintf(root_name, "ROOT/%u", cpu); #else strcpy(htimer_name, "[host-timer]"); strcpy(root_name, "ROOT"); #endif sched->status = 0; sched->inesting = 0; sched->curr = &sched->rootcb; #ifdef CONFIG_XENO_OPT_PRIOCPL xnlock_init(&sched->rpilock); #endif /* * No direct handler here since the host timer processing is * postponed to xnintr_irq_handler(), as part of the interrupt * exit code. */ xntimer_init(&sched->htimer, &nktbase, NULL); xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO); xntimer_set_name(&sched->htimer, htimer_name); xntimer_set_sched(&sched->htimer, sched); sched->zombie = NULL; xnarch_cpus_clear(sched->resched); attr.flags = XNROOT | XNSTARTED | XNFPU; attr.name = root_name; attr.stacksize = 0; attr.tbase = &nktbase; attr.ops = NULL; param.idle.prio = XNSCHED_IDLE_PRIO; xnthread_init(&sched->rootcb, &attr, sched, &xnsched_class_idle, ¶m); sched->rootcb.affinity = xnarch_cpumask_of_cpu(cpu); xnstat_exectime_set_current(sched, &sched->rootcb.stat.account); #ifdef CONFIG_XENO_HW_FPU sched->fpuholder = &sched->rootcb; #endif /* CONFIG_XENO_HW_FPU */ xnarch_init_root_tcb(xnthread_archtcb(&sched->rootcb), &sched->rootcb, xnthread_name(&sched->rootcb)); #ifdef CONFIG_XENO_OPT_WATCHDOG xntimer_init(&sched->wdtimer, &nktbase, xnsched_watchdog_handler); xntimer_set_name(&sched->wdtimer, "[watchdog]"); xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO); xntimer_set_sched(&sched->wdtimer, sched); #endif /* CONFIG_XENO_OPT_WATCHDOG */ xntimerq_init(&sched->timerqueue); }