int up_cpu_paused(int cpu) { FAR struct tcb_s *otcb = this_task(); FAR struct tcb_s *ntcb; /* Update scheduler parameters */ sched_suspend_scheduler(otcb); #ifdef CONFIG_SCHED_INSTRUMENTATION /* Notify that we are paused */ sched_note_cpu_paused(otcb); #endif /* Copy the CURRENT_REGS into the OLD TCB (otcb). The co-processor state * will be saved as part of the return from xtensa_irq_dispatch(). */ xtensa_savestate(otcb->xcp.regs); /* Wait for the spinlock to be released */ spin_unlock(&g_cpu_paused[cpu]); spin_lock(&g_cpu_wait[cpu]); /* Upon return, we will restore the exception context of the new TCB * (ntcb) at the head of the ready-to-run task list. */ ntcb = this_task(); #ifdef CONFIG_SCHED_INSTRUMENTATION /* Notify that we have resumed */ sched_note_cpu_resumed(ntcb); #endif /* Reset scheduler parameters */ sched_resume_scheduler(ntcb); /* Did the task at the head of the list change? */ if (otcb != ntcb) { /* Set CURRENT_REGS to the context save are of the new TCB to start. * This will inform the return-from-interrupt logic that a context * switch must be performed. */ xtensa_restorestate(ntcb->xcp.regs); } spin_unlock(&g_cpu_wait[cpu]); return OK; }
void up_release_pending(void) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; sdbg("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the g_readytorun task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); /* Copy the exception context into the TCB of the task that was * currently active. if up_setjmp returns a non-zero value, then * this is really the previously running task restarting! */ if (!up_setjmp(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; sdbg("New Active Task TCB=%p\n", rtcb); /* The way that we handle signals in the simulation is kind of * a kludge. This would be unsafe in a truly multi-threaded, interrupt * driven environment. */ if (rtcb->xcp.sigdeliver) { sdbg("Delivering signals TCB=%p\n", rtcb); ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); rtcb->xcp.sigdeliver = NULL; } /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_longjmp(rtcb->xcp.regs, 1); } } }
void up_unblock_task(struct tcb_s *tcb) { /* Verify that the context switch can be performed */ if ((tcb->task_state < FIRST_BLOCKED_STATE) || (tcb->task_state > LAST_BLOCKED_STATE)) { warn("%s: task sched error\n", __func__); return; } else { struct tcb_s *rtcb = current_task; /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Add the task in the correct location in the prioritized * ready-to-run task list. */ if (sched_addreadytorun(tcb) && !up_interrupt_context()) { /* The currently active task has changed! */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ struct tcb_s *nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. (void)group_addrenv(nexttcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* context switch */ up_switchcontext(rtcb, nexttcb); } } }
void up_release_pending(void) { struct tcb_s *rtcb = current_task; /* Merge the g_pendingtasks list into the ready-to-run task list */ if (sched_mergepending()) { struct tcb_s *nexttcb = this_task(); /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* context switch */ up_switchcontext(rtcb, nexttcb); } }
void up_unblock_task(struct tcb_s *tcb) { struct tcb_s *rtcb = this_task(); /* Verify that the context switch can be performed */ DEBUGASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Add the task in the correct location in the prioritized * ready-to-run task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! We need to do * a context switch to the new task. */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (CURRENT_REGS) { /* Yes, then we have to do things differently. * Just copy the CURRENT_REGS into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { struct tcb_s *nexttcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Switch context to the context of the task at the head of the * ready to run list. */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_release_pending(void) { struct tcb_s *rtcb = this_task(); sinfo("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the ready-to-run task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (g_current_regs) { /* Yes, then we have to do things differently. * Just copy the g_current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Switch context to the context of the task at the head of the * ready to run list. */ struct tcb_s *nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Then switch contexs */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_unblock_task(struct tcb_s *tcb) { struct tcb_s *rtcb = this_task(); /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Add the task in the correct location in the prioritized * ready-to-run task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! We need to do * a context switch to the new task. */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_copystate(rtcb->xcp.regs, current_regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ current_regs = rtcb->xcp.regs; } /* We are not in an interrupt handler. Copy the user C context * into the TCB of the task that was previously active. if * up_saveusercontext returns a non-zero value, then this is really the * previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the new task that is ready to * run (probably tcb). This is the new rtcb at the head of the * ready-to-run task list. */ rtcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > 0 || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { PANIC(); } else { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; slldbg("TCB=%p PRI=%d\n", tcb, priority); /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just * remove the head of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the specified blocked task list. * sched_addreadytorun will return true if the task was * added to the new list. We will need to perform a context * switch only if the EXCLUSIVE or of the two calls is non-zero * (i.e., one and only one the calls changes the head of the * ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed */ if (switch_needed) { /* If we are going to do a context switch, then now is the right * time to add any pending tasks back into the ready-to-run list. * task list now */ if (g_pendingtasks.head) { sched_mergepending(); } /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* Copy the exception context into the TCB at the (old) head of the * g_readytorun Task list. if up_saveusercontext returns a non-zero * value, then this is really the previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = this_task(); bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the ready-to-run * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any new address environment needed by * the new thread will be instantiated before the return from * interrupt. */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Get the context of the task at the head of the ready to * run list. */ struct tcb_s *nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Reset scheduler parameters */ sched_resume_scheduler(nexttcb); /* Then switch contexts */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state) { FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head; bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* dbg("Blocking TCB=%p\n", tcb); */ /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (IN_INTERRUPT) { /* Yes, then we have to do things differently. * Just copy the current registers into the OLD rtcb. */ SAVE_IRQCONTEXT(rtcb); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (FAR struct tcb_s*)g_readytorun.head; /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then setup so that the context will be performed on exit * from the interrupt. */ SET_IRQCONTEXT(rtcb); } /* Copy the user C context into the TCB at the (old) head of the * g_readytorun Task list. if SAVE_USERCONTEXT returns a non-zero * value, then this is really the previously running task restarting! */ else if (!SAVE_USERCONTEXT(rtcb)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (FAR struct tcb_s*)g_readytorun.head; /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ RESTORE_USERCONTEXT(rtcb); } } }
/**************************************************************************** * Name: up_block_task * * Description: * The currently executing task at the head of * the ready to run list must be stopped. Save its context * and move it to the inactive list specified by task_state. * * This function is called only from the NuttX scheduling * logic. Interrupts will always be disabled when this * function is called. * * Inputs: * tcb: Refers to a task in the ready-to-run list (normally * the task at the head of the list). It most be * stopped, its context saved and moved into one of the * waiting task lists. It it was the task at the head * of the ready-to-run list, then a context to the new * ready to run task must be performed. * task_state: Specifies which waiting task list should be * hold the blocked task TCB. * ****************************************************************************/ void up_block_task(struct tcb_s *tcb, tstate_t task_state) { /* Verify that the context switch can be performed */ if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) || (tcb->task_state > LAST_READY_TO_RUN_STATE)) { warn("%s: task sched error\n", __func__); return; } else { struct tcb_s *rtcb = current_task; bool switch_needed; /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* Now, perform the context switch if one is needed */ if (switch_needed) { struct tcb_s *nexttcb; /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* this part should not be executed in interrupt context */ if (up_interrupt_context()) { panic("%s: %d\n", __func__, __LINE__); } /* If there are any pending tasks, then add them to the ready-to-run * task list now. It should be the up_realease_pending() called from * sched_unlock() to do this for disable preemption. But it block * itself, so it's OK. */ if (g_pendingtasks.head) { warn("Disable preemption failed for task block itself\n"); sched_mergepending(); } nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Reset scheduler parameters */ sched_resume_scheduler(nexttcb); /* context switch */ up_switchcontext(rtcb, nexttcb); } } }
void up_unblock_task(FAR struct tcb_s *tcb) { FAR struct tcb_s *rtcb = this_task(); /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); sdbg("Unblocking TCB=%p\n", tcb); /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Reset scheduler parameters */ sched_resume_scheduler(tcb); /* Add the task in the correct location in the prioritized * ready-to-run task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Copy the exception context into the TCB of the task that was * previously active. if up_setjmp returns a non-zero value, then * this is really the previously running task restarting! */ if (!up_setjmp(rtcb->xcp.regs)) { /* Restore the exception context of the new task that is ready to * run (probably tcb). This is the new rtcb at the head of the * ready-to-run task list. */ rtcb = this_task(); sdbg("New Active Task TCB=%p\n", rtcb); /* The way that we handle signals in the simulation is kind of * a kludge. This would be unsafe in a truly multi-threaded, interrupt * driven environment. */ if (rtcb->xcp.sigdeliver) { sdbg("Delivering signals TCB=%p\n", rtcb); ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); rtcb->xcp.sigdeliver = NULL; } /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_longjmp(rtcb->xcp.regs, 1); } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); sdbg("Blocking TCB=%p\n", tcb); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Copy the exception context into the TCB at the (old) head of the * g_readytorun Task list. if up_setjmp returns a non-zero * value, then this is really the previously running task restarting! */ if (!up_setjmp(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; sdbg("New Active Task TCB=%p\n", rtcb); /* The way that we handle signals in the simulation is kind of * a kludge. This would be unsafe in a truly multi-threaded, interrupt * driven environment. */ if (rtcb->xcp.sigdeliver) { sdbg("Delivering signals TCB=%p\n", rtcb); ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); rtcb->xcp.sigdeliver = NULL; } /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_longjmp(rtcb->xcp.regs, 1); } } }
void up_release_pending(void) { struct tcb_s *rtcb = this_task(); sinfo("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the ready-to-run task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to * switch contexts. */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (CURRENT_REGS) { /* Yes, then we have to do things differently. * Just copy the CURRENT_REGS into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* Copy the exception context into the TCB of the task that * was currently active. if up_saveusercontext returns a non-zero * value, then this is really the previously running task * restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_release_pending(void) { struct tcb_s *rtcb = this_task(); sinfo("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the ready-to-run task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (CURRENT_REGS) { /* Yes, then we have to do things differently. Just copy the * CURRENT_REGS into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { struct tcb_s *nexttcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Switch context to the context of the task at the head of the * ready to run list. */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_unblock_task(struct tcb_s *tcb) { struct tcb_s *rtcb = this_task(); /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) && (tcb->task_state <= LAST_BLOCKED_STATE)); /* Remove the task from the blocked task list */ sched_removeblocked(tcb); /* Add the task in the correct location in the prioritized * ready-to-run task list */ if (sched_addreadytorun(tcb)) { /* The currently active task has changed! We need to do * a context switch to the new task. */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (g_current_regs) { /* Yes, then we have to do things differently. * Just copy the g_current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Restore the exception context of the new task that is ready to * run (probably tcb). This is the new rtcb at the head of the * ready-to-run task list. */ struct tcb_s *nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Then switch contexts */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > 0 || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { PANIC(); } else { FAR struct tcb_s *rtcb = this_task(); bool switch_needed; slldbg("TCB=%p PRI=%d\n", tcb, priority); /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just * remove the head of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the specified blocked task list. * sched_addreadytorun will return true if the task was * added to the new list. We will need to perform a context * switch only if the EXCLUSIVE or of the two calls is non-zero * (i.e., one and only one the calls changes the head of the * ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed */ if (switch_needed) { /* If we are going to do a context switch, then now is the right * time to add any pending tasks back into the ready-to-run list. * task list now */ if (g_pendingtasks.head) { sched_mergepending(); } /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (IN_INTERRUPT) { /* Yes, then we have to do things differently. * Just copy the current context into the OLD rtcb. */ SAVE_IRQCONTEXT(rtcb); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then setup so that the context will be performed on exit * from the interrupt. */ SET_IRQCONTEXT(rtcb); } /* Copy the exception context into the TCB at the (old) head of the * ready-to-run Task list. if SAVE_USERCONTEXT returns a non-zero * value, then this is really the previously running task restarting! */ else if (!SAVE_USERCONTEXT(rtcb)) { /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ RESTORE_USERCONTEXT(rtcb); } } } }
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > UINT8_MIN || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { warn("%s: task sched error\n", __func__); return; } else { struct tcb_s *rtcb = current_task; bool switch_needed; /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just * remove the head of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the specified blocked task list. * sched_addreadytorun will return true if the task was * added to the new list. We will need to perform a context * switch only if the EXCLUSIVE or of the two calls is non-zero * (i.e., one and only one the calls changes the head of the * ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed */ if (switch_needed && !up_interrupt_context()) { struct tcb_s *nexttcb; /* If there are any pending tasks, then add them to the ready-to-run * task list now. It should be the up_realease_pending() called from * sched_unlock() to do this for disable preemption. But it block * itself, so it's OK. */ if (g_pendingtasks.head) { warn("Disable preemption failed for reprioritize task\n"); sched_mergepending(); } /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Get the TCB of the new task to run */ nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* context switch */ up_switchcontext(rtcb, nexttcb); } } }
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > 0 || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { PANIC(); } else { struct tcb_s *rtcb = this_task(); bool switch_needed; slldbg("TCB=%p PRI=%d\n", tcb, priority); /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just removed the head * of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the ready-to-run task list. sched_addreadytorun * will return true if the task was added to the head of ready-to-run * list. We will need to perform a context switch only if the * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one * the calls changes the head of the ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed (i.e. if the head * of the ready-to-run list is no longer the same). */ if (switch_needed) { /* If we are going to do a context switch, then now is the right * time to add any pending tasks back into the ready-to-run list. * task list now */ if (g_pendingtasks.head) { sched_mergepending(); } /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (CURRENT_REGS) { /* Yes, then we have to do things differently. * Just copy the CURRENT_REGS into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { struct tcb_s *nexttcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Switch context to the context of the task at the head of the * ready to run list. */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = this_task(); bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the ready-to-run * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (CURRENT_REGS) { /* Yes, then we have to do things differently. * Just copy the CURRENT_REGS into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. */ up_restorestate(rtcb->xcp.regs); } /* Copy the user C context into the TCB at the (old) head of the * ready-to-run Task list. if up_saveusercontext returns a non-zero * value, then this is really the previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_release_pending(void) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; slldbg("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the g_readytorun task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* Copy the exception context into the TCB of the task that * was currently active. if up_saveusercontext returns a non-zero * value, then this is really the previously running task * restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_release_pending(void) { FAR struct tcb_s *rtcb = this_task(); slldbg("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the ready-to-run task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (IN_INTERRUPT()) { /* Yes, then we have to do things differently. * Just copy the current context into the OLD rtcb. */ SAVE_IRQCONTEXT(rtcb); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then setup so that the context will be performed on exit * from the interrupt. Any necessary address environment * changes will be made when the interrupt returns. */ SET_IRQCONTEXT(rtcb); } /* Copy the exception context into the TCB of the task that * was currently active. if SAVE_USERCONTEXT returns a non-zero * value, then this is really the previously running task * restarting! */ else if (!SAVE_USERCONTEXT(rtcb)) { /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ RESTORE_USERCONTEXT(rtcb); } } }
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > 0 || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { PANIC(); } else { FAR struct tcb_s *rtcb = this_task(); bool switch_needed; sinfo("TCB=%p PRI=%d\n", tcb, priority); /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just * remove the head of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the specified blocked task list. * sched_addreadytorun will return true if the task was * added to the new list. We will need to perform a context * switch only if the EXCLUSIVE or of the two calls is non-zero * (i.e., one and only one the calls changes the head of the * ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed */ if (switch_needed) { /* If we are going to do a context switch, then now is the right * time to add any pending tasks back into the ready-to-run list. * task list now */ if (g_pendingtasks.head) { sched_mergepending(); } /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Copy the exception context into the TCB at the (old) head of the * ready-to-run Task list. if up_setjmp returns a non-zero * value, then this is really the previously running task restarting! */ if (!up_setjmp(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); sinfo("New Active Task TCB=%p\n", rtcb); /* The way that we handle signals in the simulation is kind of * a kludge. This would be unsafe in a truly multi-threaded, interrupt * driven environment. */ if (rtcb->xcp.sigdeliver) { sinfo("Delivering signals TCB=%p\n", rtcb); ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); rtcb->xcp.sigdeliver = NULL; } /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_longjmp(rtcb->xcp.regs, 1); } } } }
void up_release_pending(void) { FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head; slldbg("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the g_readytorun task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (IN_INTERRUPT) { /* Yes, then we have to do things differently. * Just copy the current context into the OLD rtcb. */ SAVE_IRQCONTEXT(rtcb); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (FAR struct tcb_s*)g_readytorun.head; /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then setup so that the context will be performed on exit * from the interrupt. */ SET_IRQCONTEXT(rtcb); } /* Copy the exception context into the TCB of the task that * was currently active. if SAVE_USERCONTEXT returns a non-zero * value, then this is really the previously running task * restarting! */ else if (!SAVE_USERCONTEXT(rtcb)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (FAR struct tcb_s*)g_readytorun.head; /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ RESTORE_USERCONTEXT(rtcb); } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = this_task(); bool switch_needed; /* Verify that the context switch can be performed */ DEBUGASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the ready-to-run * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (g_current_regs) { /* Yes, then we have to do things differently. * Just copy the g_current_regs into the OLD rtcb. */ up_copystate(rtcb->xcp.regs, g_current_regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ g_current_regs = rtcb->xcp.regs; } /* Copy the user C context into the TCB at the (old) head of the * ready-to-run Task list. if up_saveusercontext returns a non-zero * value, then this is really the previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head; /* Reset scheduler parameters */ sched_resume_scheduler(nexttcb); /* Switch context to the context of the task at the head of the * ready to run list. */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }