void mutex_wait(struct mutex_t *mutex) { int irqstate = disableIRQ(); DEBUG("%s: Mutex in use. %u\n", active_thread->name, mutex->val); if (mutex->val == 0) { /* somebody released the mutex. return. */ mutex->val = thread_pid; DEBUG("%s: mutex_wait early out. %u\n", active_thread->name, mutex->val); restoreIRQ(irqstate); return; } sched_set_status((tcb_t*) active_thread, STATUS_MUTEX_BLOCKED); queue_node_t n; n.priority = (unsigned int) active_thread->priority; n.data = (unsigned int) active_thread; n.next = NULL; DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", active_thread->name, n.priority); queue_priority_add(&(mutex->queue), &n); restoreIRQ(irqstate); thread_yield(); /* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */ }
void mutex_unlock(struct mutex_t *mutex) { unsigned irqstate = disableIRQ(); DEBUG("mutex_unlock(): val: %u pid: %" PRIkernel_pid "\n", ATOMIC_VALUE(mutex->val), sched_active_pid); if (ATOMIC_VALUE(mutex->val) == 0) { /* the mutex was not locked */ restoreIRQ(irqstate); return; } priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (!next) { /* the mutex was locked and no thread was waiting for it */ ATOMIC_VALUE(mutex->val) = 0; restoreIRQ(irqstate); return; } tcb_t *process = (tcb_t *) next->data; DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid); sched_set_status(process, STATUS_PENDING); uint16_t process_priority = process->priority; restoreIRQ(irqstate); sched_switch(process_priority); }
static void mutex_wait(struct mutex_t *mutex) { unsigned irqstate = disableIRQ(); DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); if (atomic_set_to_one(&mutex->val)) { /* somebody released the mutex. return. */ DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); restoreIRQ(irqstate); return; } sched_set_status((tcb_t*) sched_active_thread, STATUS_MUTEX_BLOCKED); priority_queue_node_t n; n.priority = (unsigned int) sched_active_thread->priority; n.data = (unsigned int) sched_active_thread; n.next = NULL; DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", sched_active_thread->name, n.priority); priority_queue_add(&(mutex->queue), &n); restoreIRQ(irqstate); thread_yield_higher(); /* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */ }
int msg_reply(msg_t *m, msg_t *reply) { unsigned state = disableIRQ(); tcb_t *target = (tcb_t*) sched_threads[m->sender_pid]; if (!target) { DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid "\" not existing...dropping msg!\n", sched_active_thread->pid, m->sender_pid); return -1; } if (target->status != STATUS_REPLY_BLOCKED) { DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid "\" not waiting for reply.", sched_active_thread->pid, target->pid); restoreIRQ(state); return -1; } DEBUG("msg_reply(): %" PRIkernel_pid ": Direct msg copy.\n", sched_active_thread->pid); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *reply; sched_set_status(target, STATUS_PENDING); uint16_t target_prio = target->priority; restoreIRQ(state); sched_switch(target_prio); return 1; }
int msg_send_int(msg_t *m, kernel_pid_t target_pid) { #ifdef DEVELHELP if (!pid_is_valid(target_pid)) { DEBUG("msg_send(): target_pid is invalid, continuing anyways\n"); } #endif /* DEVELHELP */ tcb_t *target = (tcb_t *) sched_threads[target_pid]; if (target == NULL) { DEBUG("msg_send_int(): target thread does not exist\n"); return -1; } m->sender_pid = KERNEL_PID_ISR; if (target->status == STATUS_RECEIVE_BLOCKED) { DEBUG("msg_send_int: Direct msg copy from %" PRIkernel_pid " to %" PRIkernel_pid ".\n", thread_getpid(), target_pid); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *m; sched_set_status(target, STATUS_PENDING); sched_context_switch_request = 1; return 1; } else { DEBUG("msg_send_int: Receiver not waiting.\n"); return (queue_msg(target, m)); } }
int pthread_cond_broadcast(struct pthread_cond_t *cond) { unsigned old_state = disableIRQ(); int other_prio = -1; while (1) { priority_queue_node_t *head = priority_queue_remove_head(&(cond->queue)); if (head == NULL) { break; } tcb_t *other_thread = (tcb_t *) sched_threads[head->data]; if (other_thread) { other_prio = max_prio(other_prio, other_thread->priority); sched_set_status(other_thread, STATUS_PENDING); } head->data = -1u; } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(other_prio); } return 0; }
static ssize_t pipe_rw(ringbuffer_t *rb, void *buf, size_t n, tcb_t **other_op_blocked, tcb_t **this_op_blocked, ringbuffer_op_t ringbuffer_op) { if (n == 0) { return 0; } while (1) { unsigned old_state = disableIRQ(); unsigned count = ringbuffer_op(rb, buf, n); if (count > 0) { tcb_t *other_thread = *other_op_blocked; int other_prio = -1; if (other_thread) { *other_op_blocked = NULL; other_prio = other_thread->priority; sched_set_status(other_thread, STATUS_PENDING); } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(other_prio); } return count; } else if (*this_op_blocked || inISR()) { restoreIRQ(old_state); return 0; } else { *this_op_blocked = (tcb_t *) sched_active_thread; sched_set_status((tcb_t *) sched_active_thread, STATUS_SLEEPING); restoreIRQ(old_state); thread_yield(); } } }
/* Mostly copied from thread_wakeup() */ static void _thread_wake_wo_yield(kernel_pid_t pid) { unsigned old_state = irq_disable(); thread_t *other_thread = (thread_t *) thread_get(pid); sched_set_status(other_thread, STATUS_RUNNING); irq_restore(old_state); }
static void _thread_flags_wait(thread_flags_t mask, thread_t *thread, unsigned threadstate, unsigned irqstate) { DEBUG("_thread_flags_wait: me->flags=0x%08x me->mask=0x%08x. going blocked.\n", (unsigned)thread->flags, (unsigned)mask); thread->wait_data = (void *)(unsigned)mask; sched_set_status(thread, threadstate); irq_restore(irqstate); thread_yield_higher(); }
int msg_send_receive(msg_t *m, msg_t *reply, unsigned int target_pid) { dINT(); tcb_t *me = (tcb_t*) sched_threads[sched_active_pid]; sched_set_status(me, STATUS_REPLY_BLOCKED); me->wait_data = (void*) reply; /* msg_send blocks until reply received */ return msg_send(m, target_pid, true); }
int msg_send_receive(msg_t *m, msg_t *reply, kernel_pid_t target_pid) { assert(sched_active_pid != target_pid); unsigned state = disableIRQ(); tcb_t *me = (tcb_t*) sched_threads[sched_active_pid]; sched_set_status(me, STATUS_REPLY_BLOCKED); me->wait_data = (void*) reply; /* msg_send blocks until reply received */ return _msg_send(m, target_pid, true, state); }
void thread_sleep(void) { if (inISR()) { return; } dINT(); sched_set_status((tcb_t *)sched_active_thread, STATUS_SLEEPING); eINT(); thread_yield(); }
void sched_task_exit(void) { DEBUG("sched_task_exit(): ending task %s...\n", active_thread->name); dINT(); sched_threads[active_thread->pid] = NULL; num_tasks--; sched_set_status((tcb_t*)active_thread, STATUS_STOPPED); active_thread = NULL; cpu_switch_context_exit(); }
void mutex_unlock_and_sleep(struct mutex_t *mutex) { DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid); unsigned irqstate = disableIRQ(); if (ATOMIC_VALUE(mutex->val) != 0) { priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (next) { thread_t *process = (thread_t *) next->data; DEBUG("%s: waking up waiter.\n", process->name); sched_set_status(process, STATUS_PENDING); } else { ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */ } } DEBUG("%s: going to sleep.\n", sched_active_thread->name); sched_set_status((thread_t*) sched_active_thread, STATUS_SLEEPING); restoreIRQ(irqstate); thread_yield_higher(); }
NORETURN void sched_task_exit(void) { DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n", sched_active_thread->pid); (void) disableIRQ(); sched_threads[sched_active_pid] = NULL; sched_num_threads--; sched_set_status((thread_t *)sched_active_thread, STATUS_STOPPED); sched_active_thread = NULL; cpu_switch_context_exit(); }
int msg_send_receive(msg_t *m, msg_t *reply, kernel_pid_t target_pid) { assert(sched_active_pid != target_pid); unsigned state = irq_disable(); thread_t *me = (thread_t*) sched_threads[sched_active_pid]; sched_set_status(me, STATUS_REPLY_BLOCKED); me->wait_data = (void*) reply; /* we re-use (abuse) reply for sending, because wait_data might be * overwritten if the target is not in RECEIVE_BLOCKED */ *reply = *m; /* msg_send blocks until reply received */ return _msg_send(reply, target_pid, true, state); }
int msg_reply_int(msg_t *m, msg_t *reply) { tcb_t *target = (tcb_t*) sched_threads[m->sender_pid]; if (target->status != STATUS_REPLY_BLOCKED) { DEBUG("msg_reply_int(): %s: Target \"%s\" not waiting for reply.", sched_active_thread->name, target->name); return -1; } msg_t *target_message = (msg_t*) target->wait_data; *target_message = *reply; sched_set_status(target, STATUS_PENDING); sched_context_switch_request = 1; return 1; }
int sem_post(sem_t *sem) { int old_state = disableIRQ(); ++sem->value; queue_node_t *next = queue_remove_head(&sem->queue); if (next) { tcb_t *next_process = (tcb_t*) next->data; DEBUG("%s: waking up %s\n", active_thread->name, next_process->name); sched_set_status(next_process, STATUS_PENDING); sched_switch(active_thread->priority, next_process->priority); } restoreIRQ(old_state); return 1; }
void condition_variable::notify_one() noexcept { unsigned old_state = disableIRQ(); priority_queue_node_t* head = priority_queue_remove_head(&m_queue); int other_prio = -1; if (head != NULL) { tcb_t* other_thread = (tcb_t*)sched_threads[head->data]; if (other_thread) { other_prio = other_thread->priority; sched_set_status(other_thread, STATUS_PENDING); } head->data = -1u; } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(other_prio); } }
inline int __attribute__((always_inline)) thread_flags_wake(thread_t *thread) { unsigned wakeup = 0; thread_flags_t mask = (uint16_t)(unsigned)thread->wait_data; switch(thread->status) { case STATUS_FLAG_BLOCKED_ANY: wakeup = (thread->flags & mask); break; case STATUS_FLAG_BLOCKED_ALL: wakeup = ((thread->flags & mask) == mask); break; } if (wakeup) { DEBUG("_thread_flags_wake(): wakeing up pid %"PRIkernel_pid"\n", thread->pid); sched_set_status(thread, STATUS_RUNNING); } return wakeup; }
static void sem_thread_blocked(sem_t *sem) { /* I'm going blocked */ sched_set_status((tcb_t*) active_thread, STATUS_MUTEX_BLOCKED); queue_node_t n; n.priority = (uint32_t) active_thread->priority; n.data = (size_t) active_thread; n.next = NULL; DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", active_thread->name, n.priority); /* add myself to the waiters queue */ queue_priority_add(&sem->queue, &n); /* scheduler should schedule an other thread, that unlocks the * mutex in the future, when this happens I get scheduled again */ thread_yield(); }
void mutex_unlock(struct mutex_t *mutex) { DEBUG("%s: unlocking mutex. val: %u pid: %u\n", active_thread->name, mutex->val, thread_pid); int irqstate = disableIRQ(); if (mutex->val != 0) { if (mutex->queue.next) { queue_node_t *next = queue_remove_head(&(mutex->queue)); tcb_t *process = (tcb_t*) next->data; DEBUG("%s: waking up waiter.\n", process->name); sched_set_status(process, STATUS_PENDING); sched_switch(active_thread->priority, process->priority); } else { mutex->val = 0; } } restoreIRQ(irqstate); }
void mutex_unlock(struct mutex_t *mutex) { DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid "\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid); unsigned irqstate = disableIRQ(); if (ATOMIC_VALUE(mutex->val) != 0) { priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (next) { tcb_t *process = (tcb_t *) next->data; DEBUG("%s: waking up waiter.\n", process->name); sched_set_status(process, STATUS_PENDING); sched_switch(process->priority); } else { ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */ } } restoreIRQ(irqstate); }
int msg_send_int(msg_t *m, unsigned int target_pid) { tcb_t *target = (tcb_t *) sched_threads[target_pid]; if(target->status == STATUS_RECEIVE_BLOCKED) { DEBUG("msg_send_int: direct msg copy from %i to %i.\n", thread_getpid(), target_pid); m->sender_pid = target_pid; /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *m; sched_set_status(target, STATUS_PENDING); sched_context_switch_request = 1; return 1; } else { DEBUG("msg_send_int: receiver not waiting.\n"); return (queue_msg(target, m)); } }
int msg_reply(msg_t *m, msg_t *reply) { int state = disableIRQ(); tcb_t *target = (tcb_t*) sched_threads[m->sender_pid]; if(target->status != STATUS_REPLY_BLOCKED) { DEBUG("%s: msg_reply(): target \"%s\" not waiting for reply.", active_thread->name, target->name); restoreIRQ(state); return -1; } DEBUG("%s: msg_reply(): direct msg copy.\n", active_thread->name); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *reply; sched_set_status(target, STATUS_PENDING); restoreIRQ(state); thread_yield(); return 1; }
void condition_variable::notify_all() noexcept { unsigned old_state = disableIRQ(); int other_prio = -1; while (true) { priority_queue_node_t* head = priority_queue_remove_head(&m_queue); if (head == NULL) { break; } tcb_t* other_thread = (tcb_t*)sched_threads[head->data]; if (other_thread) { auto max_prio = [](int a, int b) { return (a < 0) ? b : ((a < b) ? a : b); }; other_prio = max_prio(other_prio, other_thread->priority); sched_set_status(other_thread, STATUS_PENDING); } head->data = -1u; } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(other_prio); } }
int pthread_cond_signal(struct pthread_cond_t *cond) { unsigned old_state = disableIRQ(); queue_node_t *head = queue_remove_head(&(cond->queue)); int other_prio = -1; if (head != NULL) { tcb_t *other_thread = (tcb_t *) sched_threads[head->data]; if (other_thread) { other_prio = other_thread->priority; sched_set_status(other_thread, STATUS_PENDING); } head->data = -1u; } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(sched_active_thread->priority, other_prio); } return 0; }
/** * The function is used on task exit to switch to the context to the next * running task. It realizes only the second half of a complete context by * simulating the exit from an interrupt handling where a context switch is * forced. The old context is not saved here since it is no longer needed. */ NORETURN void task_exit(void) { DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n", sched_active_thread ? sched_active_thread->pid : KERNEL_PID_UNDEF); (void) irq_disable(); /* remove old task from scheduling if it is not already done */ if (sched_active_thread) { sched_threads[sched_active_pid] = NULL; sched_num_threads--; sched_set_status((thread_t *)sched_active_thread, STATUS_STOPPED); sched_active_thread = NULL; } /* determine the new running task */ sched_run(); /* set the context switch flag (indicates that context has to be switched is switch on exit from interrupt in _frxt_int_exit */ _frxt_setup_switch(); /* set interrupt nesting level to the right value */ irq_interrupt_nesting++; /* reset windowed registers */ __asm__ volatile ("movi a2, 0\n" "wsr a2, windowstart\n" "wsr a2, windowbase\n" "rsync\n"); /* exit from simulated interrupt to switch to the new context */ __asm__ volatile ("call0 _frxt_int_exit"); /* should not be executed */ UNREACHABLE(); }
int thread_wakeup(kernel_pid_t pid) { DEBUG("thread_wakeup: Trying to wakeup PID %" PRIkernel_pid "...\n", pid); int old_state = disableIRQ(); tcb_t *other_thread = (tcb_t *) sched_threads[pid]; if (other_thread && other_thread->status == STATUS_SLEEPING) { DEBUG("thread_wakeup: Thread is sleeping.\n"); sched_set_status(other_thread, STATUS_RUNNING); restoreIRQ(old_state); sched_switch(other_thread->priority); return 1; } else { DEBUG("thread_wakeup: Thread is not sleeping!\n"); restoreIRQ(old_state); return STATUS_NOT_FOUND; } }
void sched_run() { sched_context_switch_request = 0; tcb_t *my_active_thread = (tcb_t*)active_thread; if (my_active_thread) { if( my_active_thread->status == STATUS_RUNNING) { my_active_thread->status = STATUS_PENDING; } #ifdef SCHED_TEST_STACK if (*((unsigned int*)my_active_thread->stack_start) != (unsigned int) my_active_thread->stack_start) { printf("scheduler(): stack overflow detected, task=%s pid=%u\n", my_active_thread->name, my_active_thread->pid); } #endif } #if SCHEDSTATISTICS extern unsigned long hwtimer_now(void); unsigned int time = hwtimer_now(); if (my_active_thread && (pidlist[my_active_thread->pid].laststart)) { pidlist[my_active_thread->pid].runtime += time - pidlist[my_active_thread->pid].laststart; } #endif DEBUG("\nscheduler: previous task: %s\n", ( my_active_thread == NULL) ? "none" : my_active_thread->name ); if (num_tasks == 0) { DEBUG("scheduler: no tasks left.\n"); while(! num_tasks); DEBUG("scheduler: new task created.\n"); } my_active_thread = NULL; while(! my_active_thread) { // for (int i = 0; i < SCHED_PRIO_LEVELS; i++) { /* TODO: introduce bitfield cache */ // if (runqueues[i]) { int nextrq = number_of_lowest_bit(runqueue_bitcache); clist_node_t next = *(runqueues[nextrq]); DEBUG("scheduler: first in queue: %s\n", ((tcb_t*)next.data)->name); clist_advance(&(runqueues[nextrq])); my_active_thread = (tcb_t*)next.data; thread_pid = (volatile int) my_active_thread->pid; #if SCHEDSTATISTICS pidlist[my_active_thread->pid].laststart = time; pidlist[my_active_thread->pid].schedules ++; #endif // break; // } // } if (active_thread->pid != last_pid) { last_pid = active_thread->pid; } } DEBUG("scheduler: next task: %s\n", my_active_thread->name); if (my_active_thread != active_thread) { if (active_thread != NULL) { //TODO: necessary? if (active_thread->status == STATUS_RUNNING) { active_thread->status = STATUS_PENDING ; } } sched_set_status((tcb_t*)my_active_thread, STATUS_RUNNING); } active_thread = (volatile tcb_t*) my_active_thread; DEBUG("scheduler: done.\n"); }