Exemplo n.º 1
0
void _exit(int status)
{
    struct tcb_s* tcb;

    /* Destroy the task at the head of the ready to run list. */

    (void)task_exit();

    /* Now, perform the context switch to the new ready-to-run task at the
     * head of the list.
     */

    tcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
    /* Make sure that the address environment for the previously running
     * task is closed down gracefully (data caches dump, MMU flushed) and
     * set up the address environment for the new thread at the head of
     * the ready-to-run list.
     */

    (void)group_addrenv(tcb);
#endif

    /* Then switch contexts */

    up_switchcontext(NULL, tcb);
}
Exemplo n.º 2
0
/****************************************************************************
 * Name: up_unblock_task
 *
 * Description:
 *   A task is currently in an inactive task list
 *   but has been prepped to execute.  Move the TCB to the
 *   ready-to-run list, restore its context, and start execution.
 *
 * Inputs:
 *   tcb: Refers to the tcb to be unblocked.  This tcb is
 *     in one of the waiting tasks lists.  It must be moved to
 *     the ready-to-run list and, if it is the highest priority
 *     ready to run taks, executed.
 *
 ****************************************************************************/
void up_unblock_task(struct tcb_s *tcb)
{
    /* Verify that the context switch can be performed */
    if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
        (tcb->task_state > LAST_BLOCKED_STATE)) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;

        /* Remove the task from the blocked task list */
        sched_removeblocked(tcb);

        /* Reset its timeslice.  This is only meaningful for round
         * robin tasks but it doesn't here to do it for everything
         */
#if CONFIG_RR_INTERVAL > 0
        tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
#endif
    
        // Add the task in the correct location in the prioritized
        // g_readytorun task list.
        if (sched_addreadytorun(tcb) && !up_interrupt_context()) {
            /* The currently active task has changed! */
            struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
Exemplo n.º 3
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
    /* Verify that the caller is sane */

    if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
        tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > UINT8_MIN
        || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
        || priority > SCHED_PRIORITY_MAX
#endif
        ) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;
        bool switch_needed;

        /* Remove the tcb task from the ready-to-run list.
         * sched_removereadytorun will return true if we just
         * remove the head of the ready to run list.
         */
        switch_needed = sched_removereadytorun(tcb);

        /* Setup up the new task priority */
        tcb->sched_priority = (uint8_t)priority;

        /* Return the task to the specified blocked task list.
         * sched_addreadytorun will return true if the task was
         * added to the new list.  We will need to perform a context
         * switch only if the EXCLUSIVE or of the two calls is non-zero
         * (i.e., one and only one the calls changes the head of the
         * ready-to-run list).
         */
        switch_needed ^= sched_addreadytorun(tcb);

        /* Now, perform the context switch if one is needed */
        if (switch_needed && !up_interrupt_context()) {
            struct tcb_s *nexttcb;
            // If there are any pending tasks, then add them to the g_readytorun
            // task list now. It should be the up_realease_pending() called from
            // sched_unlock() to do this for disable preemption. But it block 
            // itself, so it's OK.
            if (g_pendingtasks.head) {
                warn("Disable preemption failed for reprioritize task\n");
                sched_mergepending();
            }

            nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
Exemplo n.º 4
0
void up_release_pending(void)
{
  _TCB *rtcb = (_TCB*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.  First check if we are operating in
       * interrupt context:
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head 
           * of the g_readytorun task list.
           */

          rtcb = (_TCB*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          _TCB *nexttcb = (_TCB*)g_readytorun.head;
          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
Exemplo n.º 5
0
void up_unblock_task(struct tcb_s *tcb)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
      (tcb->task_state > LAST_BLOCKED_STATE))
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;

      /* Remove the task from the blocked task list */

      sched_removeblocked(tcb);

      /* Add the task in the correct location in the prioritized
       * ready-to-run task list.
       */

      if (sched_addreadytorun(tcb) && !up_interrupt_context())
        {
          /* The currently active task has changed! */
          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Are we in an interrupt handler? */

          struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.

          (void)group_addrenv(nexttcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
Exemplo n.º 6
0
/**
 * This function is called from sched_unlock() which will check not
 * in interrupt context and disable interrupt.
 */
void up_release_pending(void)
{
    struct tcb_s *rtcb = current_task;

    /* Merge the g_pendingtasks list into the g_readytorun task list */

    if (sched_mergepending()) {
        /* The currently active task has changed! */
        struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

        // context switch
        up_switchcontext(rtcb, nexttcb);
    }
}
Exemplo n.º 7
0
void _exit(int status)
{
    struct tcb_s* tcb;

    /* Destroy the task at the head of the ready to run list. */

    (void)task_exit();

    /* Now, perform the context switch to the new ready-to-run task at the
     * head of the list.
     */

    tcb = (struct tcb_s*)g_readytorun.head;

    /* Then switch contexts */

    up_switchcontext(NULL, tcb);
}
Exemplo n.º 8
0
/****************************************************************************
 * Name: up_block_task
 *
 * Description:
 *   The currently executing task at the head of
 *   the ready to run list must be stopped.  Save its context
 *   and move it to the inactive list specified by task_state.
 *
 *   This function is called only from the NuttX scheduling
 *   logic.  Interrupts will always be disabled when this
 *   function is called.
 *
 * Inputs:
 *   tcb: Refers to a task in the ready-to-run list (normally
 *     the task at the head of the list).  It most be
 *     stopped, its context saved and moved into one of the
 *     waiting task lists.  It it was the task at the head
 *     of the ready-to-run list, then a context to the new
 *     ready to run task must be performed.
 *   task_state: Specifies which waiting task list should be
 *     hold the blocked task TCB.
 *
 ****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
    /* Verify that the context switch can be performed */
    if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
        (tcb->task_state > LAST_READY_TO_RUN_STATE)) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;
        bool switch_needed;

        /* Remove the tcb task from the ready-to-run list.  If we
         * are blocking the task at the head of the task list (the
         * most likely case), then a context switch to the next
         * ready-to-run task is needed. In this case, it should
         * also be true that rtcb == tcb.
         */
        switch_needed = sched_removereadytorun(tcb);

        /* Add the task to the specified blocked task list */
        sched_addblocked(tcb, (tstate_t)task_state);

        /* Now, perform the context switch if one is needed */
        if (switch_needed) {
            struct tcb_s *nexttcb;
            // this part should not be executed in interrupt context
            if (up_interrupt_context()) {
                panic("%s: %d\n", __func__, __LINE__);
            }
            // If there are any pending tasks, then add them to the g_readytorun
            // task list now. It should be the up_realease_pending() called from
            // sched_unlock() to do this for disable preemption. But it block 
            // itself, so it's OK.
            if (g_pendingtasks.head) {
                warn("Disable preemption failed for task block itself\n");
                sched_mergepending();
            }
            nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
Exemplo n.º 9
0
void up_release_pending(void)
{
  struct tcb_s *rtcb = current_task;

  /* Merge the g_pendingtasks list into the ready-to-run task list */

  if (sched_mergepending())
    {
      struct tcb_s *nexttcb = this_task();

      /* The currently active task has changed!  We will need to switch
       * contexts.
       *
       * Update scheduler parameters.
       */

      sched_suspend_scheduler(rtcb);

#ifdef CONFIG_ARCH_ADDRENV
      /* Make sure that the address environment for the previously
       * running task is closed down gracefully (data caches dump,
       * MMU flushed) and set up the address environment for the new
       * thread at the head of the ready-to-run list.
       */

      (void)group_addrenv(nexttcb);
#endif
      /* Update scheduler parameters */

      sched_resume_scheduler(nexttcb);

      /* context switch */

      up_switchcontext(rtcb, nexttcb);
    }
}
Exemplo n.º 10
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       DEBUGPANIC();
    }
  else
    {
      struct tcb_s *rtcb = this_task();
      bool switch_needed;

      sinfo("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

         /* Are we in an interrupt handler? */

          if (g_current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the g_current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head
               * of the ready-to-run task list.
               */

              rtcb = this_task();

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts.  Any necessary address environment
               * changes will be made when the interrupt returns.
               */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

              struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(nexttcb);
#endif
              /* Update scheduler parameters */

              sched_resume_scheduler(nexttcb);

              /* Then switch contexts */

              up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
            }
        }
    }
}
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
         (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Reset its timeslice.  This is only meaningful for round
   * robin tasks but it doesn't here to do it for everything
   */

#if CONFIG_RR_INTERVAL > 0
  tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
#endif

  /* Add the task in the correct location in the prioritized
   * g_readytorun task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       *
       * Are we in an interrupt handler?
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * g_readytorun task list.
           */

          struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(nexttcb);
#endif
          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
Exemplo n.º 12
0
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = this_task();

  /* Verify that the context switch can be performed */

  DEBUGASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
              (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Add the task in the correct location in the prioritized
   * ready-to-run task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (CURRENT_REGS)
        {
          /* Yes, then we have to do things differently.
           * Just copy the CURRENT_REGS into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          struct tcb_s *nexttcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
Exemplo n.º 13
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just removed the head
       * of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the ready-to-run task list. sched_addreadytorun
       * will return true if the task was added to the head of ready-to-run
       * list.  We will need to perform a context switch only if the
       * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
       * the calls changes the head of the ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed (i.e. if the head
       * of the ready-to-run list is no longer the same).
       */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

         /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

              struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
              up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
            }
        }
    }
}
Exemplo n.º 14
0
/****************************************************************************
 * Name: up_block_task
 *
 * Description:
 *   The currently executing task at the head of
 *   the ready to run list must be stopped.  Save its context
 *   and move it to the inactive list specified by task_state.
 *
 *   This function is called only from the NuttX scheduling
 *   logic.  Interrupts will always be disabled when this
 *   function is called.
 *
 * Inputs:
 *   tcb: Refers to a task in the ready-to-run list (normally
 *     the task at the head of the list).  It most be
 *     stopped, its context saved and moved into one of the
 *     waiting task lists.  It it was the task at the head
 *     of the ready-to-run list, then a context to the new
 *     ready to run task must be performed.
 *   task_state: Specifies which waiting task list should be
 *     hold the blocked task TCB.
 *
 ****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          struct tcb_s *nexttcb;

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* this part should not be executed in interrupt context */

          if (up_interrupt_context())
            {
              panic("%s: %d\n", __func__, __LINE__);
            }

          /* If there are any pending tasks, then add them to the ready-to-run
           * task list now. It should be the up_realease_pending() called from
           * sched_unlock() to do this for disable preemption. But it block
           * itself, so it's OK.
           */

          if (g_pendingtasks.head)
            {
              warn("Disable preemption failed for task block itself\n");
              sched_mergepending();
            }

          nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
void up_release_pending(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.  First check if we are operating in
       * interrupt context:
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Then switch contexs */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
Exemplo n.º 16
0
void up_block_task(_TCB *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      PANIC(OSERR_BADBLOCKSTATE);
    }
  else
    {
      _TCB *rtcb = (_TCB*)g_readytorun.head;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* If there are any pending tasks, then add them to the g_readytorun
       * task list now
       */

      if (g_pendingtasks.head)
        {
          switch_needed |= sched_mergepending();
        }

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

               _TCB *nexttcb = (_TCB*)g_readytorun.head;
               up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
           }
        }
    }
}
Exemplo n.º 17
0
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = this_task();
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any new address environment needed by
           * the new thread will be instantiated before the return from
           * interrupt.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Get the context of the task at the head of the ready to
           * run list.
           */

          struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
Exemplo n.º 18
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > UINT8_MIN
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
      )
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed && !up_interrupt_context())
        {
          struct tcb_s *nexttcb;

          /* If there are any pending tasks, then add them to the ready-to-run
           * task list now. It should be the up_realease_pending() called from
           * sched_unlock() to do this for disable preemption. But it block
           * itself, so it's OK.
           */

          if (g_pendingtasks.head)
            {
              warn("Disable preemption failed for reprioritize task\n");
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Get the TCB of the new task to run */

          nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
Exemplo n.º 19
0
void up_unblock_task(struct tcb_s *tcb)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
      (tcb->task_state > LAST_BLOCKED_STATE))
    {
      PANIC(OSERR_BADUNBLOCKSTATE);
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

      /* Remove the task from the blocked task list */

      sched_removeblocked(tcb);

      /* Reset its timeslice.  This is only meaningful for round
       * robin tasks but it doesn't here to do it for everything
       */

#if CONFIG_RR_INTERVAL > 0
      tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
#endif

      /* Add the task in the correct location in the prioritized
       * g_readytorun task list
       */

      if (sched_addreadytorun(tcb))
        {
          /* The currently active task has changed! We need to do
           * a context switch to the new task.
           *
           * Are we in an interrupt handler? 
           */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

               struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
               up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
           }
        }
    }
}
Exemplo n.º 20
0
void up_release_pending(void)
{
  struct tcb_s *rtcb = this_task();

  sinfo("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the ready-to-run task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to switch
       * contexts.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we operating in interrupt context? */

      if (CURRENT_REGS)
        {
          /* Yes, then we have to do things differently. Just copy the
           * CURRENT_REGS into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          struct tcb_s *nexttcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}