void up_block_task(struct tcb_s *tcb, tstate_t task_state) { /* Verify that the context switch can be performed */ if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) || (tcb->task_state > LAST_READY_TO_RUN_STATE)) { PANIC(OSERR_BADBLOCKSTATE); } else { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_copystate(rtcb->xcp.regs, current_regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Then switch contexts */ current_regs = rtcb->xcp.regs; } /* Copy the user C context into the TCB at the (old) head of the * g_readytorun Task list. if up_saveusercontext returns a non-zero * value, then this is really the previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } } }
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > 0 || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { PANIC(); } else { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; slldbg("TCB=%p PRI=%d\n", tcb, priority); /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just * remove the head of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the specified blocked task list. * sched_addreadytorun will return true if the task was * added to the new list. We will need to perform a context * switch only if the EXCLUSIVE or of the two calls is non-zero * (i.e., one and only one the calls changes the head of the * ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed */ if (switch_needed) { /* If we are going to do a context switch, then now is the right * time to add any pending tasks back into the ready-to-run list. * task list now */ if (g_pendingtasks.head) { sched_mergepending(); } /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; slldbg("New Active Task TCB=%p\n", rtcb); /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* Copy the exception context into the TCB at the (old) head of the * g_readytorun Task list. if up_saveusercontext returns a non-zero * value, then this is really the previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; slldbg("New Active Task TCB=%p\n", rtcb); /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { /* Verify that the context switch can be performed */ if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) || (tcb->task_state > LAST_READY_TO_RUN_STATE)) { PANIC(OSERR_BADBLOCKSTATE); } else { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Switch context to the context of the task at the head of the * ready to run list. */ struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head; up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } } }
void up_release_pending(void) { struct tcb_s *rtcb = this_task(); sinfo("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the ready-to-run task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. */ /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (CURRENT_REGS) { /* Yes, then we have to do things differently. Just copy the * CURRENT_REGS into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { struct tcb_s *nexttcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Switch context to the context of the task at the head of the * ready to run list. */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) { /* Verify that the caller is sane */ if (tcb->task_state < FIRST_READY_TO_RUN_STATE || tcb->task_state > LAST_READY_TO_RUN_STATE #if SCHED_PRIORITY_MIN > 0 || priority < SCHED_PRIORITY_MIN #endif #if SCHED_PRIORITY_MAX < UINT8_MAX || priority > SCHED_PRIORITY_MAX #endif ) { PANIC(); } else { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; sdbg("TCB=%p PRI=%d\n", tcb, priority); /* Remove the tcb task from the ready-to-run list. * sched_removereadytorun will return true if we just * remove the head of the ready to run list. */ switch_needed = sched_removereadytorun(tcb); /* Setup up the new task priority */ tcb->sched_priority = (uint8_t)priority; /* Return the task to the specified blocked task list. * sched_addreadytorun will return true if the task was * added to the new list. We will need to perform a context * switch only if the EXCLUSIVE or of the two calls is non-zero * (i.e., one and only one the calls changes the head of the * ready-to-run list). */ switch_needed ^= sched_addreadytorun(tcb); /* Now, perform the context switch if one is needed */ if (switch_needed) { /* If we are going to do a context switch, then now is the right * time to add any pending tasks back into the ready-to-run list. * task list now */ if (g_pendingtasks.head) { sched_mergepending(); } /* Copy the exception context into the TCB at the (old) head of the * g_readytorun Task list. if up_setjmp returns a non-zero * value, then this is really the previously running task restarting! */ if (!up_setjmp(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; sdbg("New Active Task TCB=%p\n", rtcb); /* The way that we handle signals in the simulation is kind of * a kludge. This would be unsafe in a truly multi-threaded, interrupt * driven environment. */ if (rtcb->xcp.sigdeliver) { sdbg("Delivering signals TCB=%p\n", rtcb); ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); rtcb->xcp.sigdeliver = NULL; } /* Then switch contexts */ up_longjmp(rtcb->xcp.regs, 1); } } } }
void up_release_pending(void) { struct tcb_s *rtcb = this_task(); sinfo("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the ready-to-run task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to switch * contexts. * * Update scheduler parameters. */ sched_suspend_scheduler(rtcb); /* Are we operating in interrupt context? */ if (g_current_regs) { /* Yes, then we have to do things differently. * Just copy the g_current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Update scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Switch context to the context of the task at the head of the * ready to run list. */ struct tcb_s *nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Update scheduler parameters */ sched_resume_scheduler(nexttcb); /* Then switch contexs */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = this_task(); bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the ready-to-run * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Update scheduler parameters */ sched_suspend_scheduler(rtcb); /* Are we in an interrupt handler? */ if (g_current_regs) { /* Yes, then we have to do things differently. * Just copy the g_current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the ready-to-run task list. */ rtcb = this_task(); /* Reset scheduler parameters */ sched_resume_scheduler(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* No, then we will need to perform the user context switch */ else { /* Get the context of the task at the head of the ready to * run list. */ struct tcb_s *nexttcb = this_task(); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(nexttcb); #endif /* Reset scheduler parameters */ sched_resume_scheduler(nexttcb); /* Then switch contexts */ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs); /* up_switchcontext forces a context switch to the task at the * head of the ready-to-run list. It does not 'return' in the * normal sense. When it does return, it is because the blocked * task is again ready to run and has execution priority. */ } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Are we in an interrupt handler? */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* Copy the user C context into the TCB at the (old) head of the * g_readytorun Task list. if up_saveusercontext returns a non-zero * value, then this is really the previously running task restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_release_pending(void) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; slldbg("From TCB=%p\n", rtcb); /* Merge the g_pendingtasks list into the g_readytorun task list */ /* sched_lock(); */ if (sched_mergepending()) { /* The currently active task has changed! We will need to * switch contexts. First check if we are operating in * interrupt context: */ if (current_regs) { /* Yes, then we have to do things differently. * Just copy the current_regs into the OLD rtcb. */ up_savestate(rtcb->xcp.regs); /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; slldbg("New Active Task TCB=%p\n", rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. */ up_restorestate(rtcb->xcp.regs); } /* Copy the exception context into the TCB of the task that * was currently active. if up_saveusercontext returns a non-zero * value, then this is really the previously running task * restarting! */ else if (!up_saveusercontext(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; slldbg("New Active Task TCB=%p\n", rtcb); #ifdef CONFIG_ARCH_ADDRENV /* Make sure that the address environment for the previously * running task is closed down gracefully (data caches dump, * MMU flushed) and set up the address environment for the new * thread at the head of the ready-to-run list. */ (void)group_addrenv(rtcb); #endif /* Then switch contexts */ up_fullcontextrestore(rtcb->xcp.regs); } } }
void up_block_task(struct tcb_s *tcb, tstate_t task_state) { struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head; bool switch_needed; /* Verify that the context switch can be performed */ ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) && (tcb->task_state <= LAST_READY_TO_RUN_STATE)); sdbg("Blocking TCB=%p\n", tcb); /* Remove the tcb task from the ready-to-run list. If we * are blocking the task at the head of the task list (the * most likely case), then a context switch to the next * ready-to-run task is needed. In this case, it should * also be true that rtcb == tcb. */ switch_needed = sched_removereadytorun(tcb); /* Add the task to the specified blocked task list */ sched_addblocked(tcb, (tstate_t)task_state); /* If there are any pending tasks, then add them to the g_readytorun * task list now */ if (g_pendingtasks.head) { switch_needed |= sched_mergepending(); } /* Now, perform the context switch if one is needed */ if (switch_needed) { /* Copy the exception context into the TCB at the (old) head of the * g_readytorun Task list. if up_setjmp returns a non-zero * value, then this is really the previously running task restarting! */ if (!up_setjmp(rtcb->xcp.regs)) { /* Restore the exception context of the rtcb at the (new) head * of the g_readytorun task list. */ rtcb = (struct tcb_s*)g_readytorun.head; sdbg("New Active Task TCB=%p\n", rtcb); /* The way that we handle signals in the simulation is kind of * a kludge. This would be unsafe in a truly multi-threaded, interrupt * driven environment. */ if (rtcb->xcp.sigdeliver) { sdbg("Delivering signals TCB=%p\n", rtcb); ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); rtcb->xcp.sigdeliver = NULL; } /* Then switch contexts */ up_longjmp(rtcb->xcp.regs, 1); } } }