static inline void asgn_min_prio(int cpuid) { // find minimum priority in timers_struct RT_TASK *timer_manager; struct rt_tasklet_struct *timer, *timerl; spinlock_t *lock; unsigned long flags; int priority; priority = (timer = (timerl = &timers_list[LIST_CPUID])->next)->priority; flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); while ((timer = timer->next) != timerl) { if (timer->priority < priority) { priority = timer->priority; } rt_spin_unlock_irqrestore(flags, lock); flags = rt_spin_lock_irqsave(lock); } rt_spin_unlock_irqrestore(flags, lock); flags = rt_global_save_flags_and_cli(); if ((timer_manager = &timers_manager[LIST_CPUID])->priority > priority) { timer_manager->priority = priority; if (timer_manager->state == RT_SCHED_READY) { rem_ready_task(timer_manager); enq_ready_task(timer_manager); } } rt_global_restore_flags(flags); }
RTAI_SYSCALL_MODE unsigned long rt_bits_signal(BITS *bits, int setfun, unsigned long masks) { unsigned long flags, schedmap; RT_TASK *task; QUEUE *q; CHECK_BITS_MAGIC(bits); schedmap = 0; q = &bits->queue; flags = rt_global_save_flags_and_cli(); exec_fun[setfun](bits, masks); masks = bits->mask; while ((q = q->next) != &bits->queue) { task = q->task; if (test_fun[TEST_FUN(task)](bits, TEST_MASK(task))) { dequeue_blocked(task); rem_timed_task(task); if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(task); #ifdef CONFIG_SMP set_bit(task->runnable_on_cpus & 0x1F, &schedmap); #endif } } } RT_SCHEDULE_MAP(schedmap); rt_global_restore_flags(flags); return masks; }
RTAI_SYSCALL_MODE unsigned long rt_bits_reset(BITS *bits, unsigned long mask) { unsigned long flags, schedmap, oldmask; RT_TASK *task; QUEUE *q; CHECK_BITS_MAGIC(bits); schedmap = 0; q = &bits->queue; flags = rt_global_save_flags_and_cli(); oldmask = bits->mask; bits->mask = mask; while ((q = q->next) != &bits->queue) { dequeue_blocked(task = q->task); rem_timed_task(task); if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(task); #ifdef CONFIG_SMP set_bit(task->runnable_on_cpus & 0x1F, &schedmap); #endif } } bits->queue.prev = bits->queue.next = &bits->queue; RT_SCHEDULE_MAP(schedmap); rt_global_restore_flags(flags); return oldmask; }
int rt_bits_delete(BITS *bits) { unsigned long flags, schedmap; RT_TASK *task; QUEUE *q; CHECK_BITS_MAGIC(bits); schedmap = 0; q = &bits->queue; flags = rt_global_save_flags_and_cli(); bits->magic = 0; while ((q = q->next) != &bits->queue && (task = q->task)) { rem_timed_task(task); if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { task->blocked_on = RTP_OBJREM; enq_ready_task(task); #ifdef CONFIG_SMP set_bit(task->runnable_on_cpus & 0x1F, &schedmap); #endif } } RT_SCHEDULE_MAP(schedmap); rt_global_restore_flags(flags); return 0; }
RTAI_SYSCALL_MODE int rt_wait_signal(RT_TASK *sigtask, RT_TASK *task) { unsigned long flags; if (sigtask->rt_signals != NULL) { flags = rt_global_save_flags_and_cli(); if (!sigtask->suspdepth++) { sigtask->state |= RT_SCHED_SIGSUSP; rem_ready_current(sigtask); if (task->pstate > 0 && !(--task->pstate) && (task->state &= ~RT_SCHED_SIGSUSP) == RT_SCHED_READY) { enq_ready_task(task); } rt_schedule(); } rt_global_restore_flags(flags); return sigtask->retval; } return 0; }
static inline void rt_exec_signal(RT_TASK *sigtask, RT_TASK *task) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if (sigtask->suspdepth > 0 && !(--sigtask->suspdepth)) { if (task) { sigtask->priority = task->priority; if (!task->pstate++) { rem_ready_task(task); task->state |= RT_SCHED_SIGSUSP; } } sigtask->state &= ~RT_SCHED_SIGSUSP; sigtask->retval = (long)task; enq_ready_task(sigtask); RT_SCHEDULE(sigtask, rtai_cpuid()); } rt_global_restore_flags(flags); }