static void drop_flexsc_syspage(int cpu, struct flexsc_syspage *syspage) { struct list_head *list, *next, worker_list; struct task_struct *waiter; flexsc_assert(smp_processor_id() == cpu); syspage->__stat = STAT_EXIT; INIT_LIST_HEAD(&worker_list); list = &(syspage->worker_list); while ((next = list->next) != list) { list_del(next); list_add(next, &worker_list), syspage->worker ++; } waiter = syspage->waiter, syspage->waiter = NULL; list = &worker_list; while ((next = list->next) != list) { list_del_init(next); wakeup_task(link_to_task(next)); } if (waiter != NULL) { wakeup_task(waiter); } }
void signal_semaphore(semaphore_t *sem) { int32_t newval = (int32_t) increment_atomic((volatile uint32_t*)&sem->value); if (newval <= 0) { wakeup_task(&sem->taskwait); } }
void check_timers(void) { readyqueues_t* readyqueue = &readyqueues[CORE_ID]; spinlock_irqsave_lock(&readyqueue->lock); // since IRQs are disabled, get_clock_tick() won't increase here const uint64_t current_tick = get_clock_tick(); // wakeup tasks whose deadline has expired task_t* task; while ((task = readyqueue->timers.first) && (task->timeout <= current_tick)) { // pops task from timer queue, so next iteration has new first element wakeup_task(task->id); } #ifdef DYNAMIC_TICKS task = readyqueue->timers.first; if (task) { update_timer(task); } #endif spinlock_irqsave_unlock(&readyqueue->lock); }
int post_event (int tid, int event) { struct tsk * p = (struct tsk *) find_task (tid); if (!p) return -1; p->event |= event; wakeup_task (p); return 0; }
int task_kill_self(struct task_struct *task) { struct task_struct *system_killer = pid_get_task(0); if (!system_killer) return -EINVAL; /* * when a task try to kill himself, need to * wake up system_killer process to help doing * this */ kernel_debug("Kill self\n"); set_task_state(task, PROCESS_STATE_IDLE); wakeup_task(system_killer); sched(); return 0; }