Esempio n. 1
0
int task_exit(void)
{
  FAR struct tcb_s *dtcb = (FAR struct tcb_s*)g_readytorun.head;
  FAR struct tcb_s *rtcb;
  int ret;

  /* Remove the TCB of the current task from the ready-to-run list.  A context
   * switch will definitely be necessary -- that must be done by the
   * architecture-specific logic.
   *
   * sched_removereadytorun will mark the task at the head of the ready-to-run
   * with state == TSTATE_TASK_RUNNING
   */

  (void)sched_removereadytorun(dtcb);
  rtcb = (FAR struct tcb_s*)g_readytorun.head;

  /* We are now in a bad state -- the head of the ready to run task list
   * does not correspond to the thread that is running.  Disabling pre-
   * emption on this TCB and marking the new ready-to-run task as not
   * running (see, for example, get_errno_ptr()).
   *
   * We disable pre-emption here by directly incrementing the lockcount
   * (vs. calling sched_lock()).
   */

  rtcb->lockcount++;
  rtcb->task_state = TSTATE_TASK_READYTORUN;

  /* Move the TCB to the specified blocked task list and delete it.  Calling
   * task_terminate with non-blocking true will suppress atexit() and on-exit()
   * calls and will cause buffered I/O to fail to be flushed.  The former
   * is required _exit() behavior; the latter is optional _exit() behavior.
   */

  sched_addblocked(dtcb, TSTATE_TASK_INACTIVE);
  ret = task_terminate(dtcb->pid, true);
  rtcb->task_state = TSTATE_TASK_RUNNING;

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      (void)sched_mergepending();
    }

  /* We can't use sched_unlock() to decrement the lock count because the
   * sched_mergepending() call above might have changed the task at the
   * head of the ready-to-run list.  Furthermore, we should not need to
   * perform the unlock action anyway because we know that the pending
   * task list is empty.  So all we really need to do is to decrement
   * the lockcount on rctb.
   */

  rtcb->lockcount--;
  return ret;
}
Esempio n. 2
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
    /* Verify that the caller is sane */

    if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
        tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > UINT8_MIN
        || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
        || priority > SCHED_PRIORITY_MAX
#endif
        ) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;
        bool switch_needed;

        /* Remove the tcb task from the ready-to-run list.
         * sched_removereadytorun will return true if we just
         * remove the head of the ready to run list.
         */
        switch_needed = sched_removereadytorun(tcb);

        /* Setup up the new task priority */
        tcb->sched_priority = (uint8_t)priority;

        /* Return the task to the specified blocked task list.
         * sched_addreadytorun will return true if the task was
         * added to the new list.  We will need to perform a context
         * switch only if the EXCLUSIVE or of the two calls is non-zero
         * (i.e., one and only one the calls changes the head of the
         * ready-to-run list).
         */
        switch_needed ^= sched_addreadytorun(tcb);

        /* Now, perform the context switch if one is needed */
        if (switch_needed && !up_interrupt_context()) {
            struct tcb_s *nexttcb;
            // If there are any pending tasks, then add them to the g_readytorun
            // task list now. It should be the up_realease_pending() called from
            // sched_unlock() to do this for disable preemption. But it block 
            // itself, so it's OK.
            if (g_pendingtasks.head) {
                warn("Disable preemption failed for reprioritize task\n");
                sched_mergepending();
            }

            nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
Esempio n. 3
0
/****************************************************************************
 * Name: up_block_task
 *
 * Description:
 *   The currently executing task at the head of
 *   the ready to run list must be stopped.  Save its context
 *   and move it to the inactive list specified by task_state.
 *
 *   This function is called only from the NuttX scheduling
 *   logic.  Interrupts will always be disabled when this
 *   function is called.
 *
 * Inputs:
 *   tcb: Refers to a task in the ready-to-run list (normally
 *     the task at the head of the list).  It most be
 *     stopped, its context saved and moved into one of the
 *     waiting task lists.  It it was the task at the head
 *     of the ready-to-run list, then a context to the new
 *     ready to run task must be performed.
 *   task_state: Specifies which waiting task list should be
 *     hold the blocked task TCB.
 *
 ****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
    /* Verify that the context switch can be performed */
    if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
        (tcb->task_state > LAST_READY_TO_RUN_STATE)) {
        warn("%s: task sched error\n", __func__);
        return;
    }
    else {
        struct tcb_s *rtcb = current_task;
        bool switch_needed;

        /* Remove the tcb task from the ready-to-run list.  If we
         * are blocking the task at the head of the task list (the
         * most likely case), then a context switch to the next
         * ready-to-run task is needed. In this case, it should
         * also be true that rtcb == tcb.
         */
        switch_needed = sched_removereadytorun(tcb);

        /* Add the task to the specified blocked task list */
        sched_addblocked(tcb, (tstate_t)task_state);

        /* Now, perform the context switch if one is needed */
        if (switch_needed) {
            struct tcb_s *nexttcb;
            // this part should not be executed in interrupt context
            if (up_interrupt_context()) {
                panic("%s: %d\n", __func__, __LINE__);
            }
            // If there are any pending tasks, then add them to the g_readytorun
            // task list now. It should be the up_realease_pending() called from
            // sched_unlock() to do this for disable preemption. But it block 
            // itself, so it's OK.
            if (g_pendingtasks.head) {
                warn("Disable preemption failed for task block itself\n");
                sched_mergepending();
            }
            nexttcb = (struct tcb_s*)g_readytorun.head;
            // context switch
            up_switchcontext(rtcb, nexttcb);
        }
    }
}
Esempio n. 4
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just removed the head
       * of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the ready-to-run task list. sched_addreadytorun
       * will return true if the task was added to the head of ready-to-run
       * list.  We will need to perform a context switch only if the
       * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
       * the calls changes the head of the ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed (i.e. if the head
       * of the ready-to-run list is no longer the same).
       */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

         /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

              struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
              up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
            }
        }
    }
}
Esempio n. 5
0
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = this_task();
  bool switch_needed;

  /* Verify that the context switch can be performed */

  DEBUGASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
              (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (g_current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the g_current_regs into the OLD rtcb.
           */

          up_copystate(rtcb->xcp.regs, g_current_regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          g_current_regs = rtcb->xcp.regs;
        }

      /* Copy the user C context into the TCB at the (old) head of the
       * ready-to-run Task list. if up_saveusercontext returns a non-zero
       * value, then this is really the previously running task restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
Esempio n. 6
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      FAR struct tcb_s *rtcb = this_task();
      bool switch_needed;

      sinfo("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Copy the exception context into the TCB at the (old) head of the
           * ready-to-run Task list. if up_setjmp returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          if (!up_setjmp(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head
               * of the ready-to-run task list.
               */

              rtcb = this_task();
              sinfo("New Active Task TCB=%p\n", rtcb);

              /* The way that we handle signals in the simulation is kind of
               * a kludge.  This would be unsafe in a truly multi-threaded, interrupt
               * driven environment.
               */

              if (rtcb->xcp.sigdeliver)
                {
                  sinfo("Delivering signals TCB=%p\n", rtcb);
                  ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb);
                  rtcb->xcp.sigdeliver = NULL;
                }

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts */

              up_longjmp(rtcb->xcp.regs, 1);
            }
        }
    }
}
Esempio n. 7
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC(OSERR_BADREPRIORITIZESTATE);
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

         /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the exception context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
Esempio n. 8
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts.  Any necessary address environment
               * changes will be made when the interrupt returns.
               */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the exception context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(rtcb);
#endif
              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
Esempio n. 9
0
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = this_task();
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any new address environment needed by
           * the new thread will be instantiated before the return from
           * interrupt.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Get the context of the task at the head of the ready to
           * run list.
           */

          struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority)
{
    /* Verify that the caller is sane */

    if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
            tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
            || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
            || priority > SCHED_PRIORITY_MAX
#endif
       )
    {
        PANIC();
    }
    else
    {
        FAR struct tcb_s *rtcb = this_task();
        bool switch_needed;

        slldbg("TCB=%p PRI=%d\n", tcb, priority);

        /* Remove the tcb task from the ready-to-run list.
         * sched_removereadytorun will return true if we just
         * remove the head of the ready to run list.
         */

        switch_needed = sched_removereadytorun(tcb);

        /* Setup up the new task priority */

        tcb->sched_priority = (uint8_t)priority;

        /* Return the task to the specified blocked task list.
         * sched_addreadytorun will return true if the task was
         * added to the new list.  We will need to perform a context
         * switch only if the EXCLUSIVE or of the two calls is non-zero
         * (i.e., one and only one the calls changes the head of the
         * ready-to-run list).
         */

        switch_needed ^= sched_addreadytorun(tcb);

        /* Now, perform the context switch if one is needed */

        if (switch_needed)
        {
            /* If we are going to do a context switch, then now is the right
             * time to add any pending tasks back into the ready-to-run list.
             * task list now
             */

            if (g_pendingtasks.head)
            {
                sched_mergepending();
            }

            /* Update scheduler parameters */

            sched_suspend_scheduler(rtcb);

            /* Are we in an interrupt handler? */

            if (IN_INTERRUPT)
            {
                /* Yes, then we have to do things differently.
                 * Just copy the current context into the OLD rtcb.
                 */

                SAVE_IRQCONTEXT(rtcb);

                /* Restore the exception context of the rtcb at the (new) head
                 * of the ready-to-run task list.
                 */

                rtcb = this_task();

                /* Update scheduler parameters */

                sched_resume_scheduler(rtcb);

                /* Then setup so that the context will be performed on exit
                 * from the interrupt.
                 */

                SET_IRQCONTEXT(rtcb);
            }

            /* Copy the exception context into the TCB at the (old) head of the
             * ready-to-run Task list. if SAVE_USERCONTEXT returns a non-zero
             * value, then this is really the previously running task restarting!
             */

            else if (!SAVE_USERCONTEXT(rtcb))
            {
                /* Restore the exception context of the rtcb at the (new) head
                 * of the ready-to-run task list.
                 */

                rtcb = this_task();

                /* Update scheduler parameters */

                sched_resume_scheduler(rtcb);

                /* Then switch contexts */

                RESTORE_USERCONTEXT(rtcb);
            }
        }
    }
}
Esempio n. 11
0
void up_block_task(_TCB *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      PANIC(OSERR_BADBLOCKSTATE);
    }
  else
    {
      _TCB *rtcb = (_TCB*)g_readytorun.head;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* If there are any pending tasks, then add them to the g_readytorun
       * task list now
       */

      if (g_pendingtasks.head)
        {
          switch_needed |= sched_mergepending();
        }

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the user C context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
Esempio n. 12
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > UINT8_MIN
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
      )
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed && !up_interrupt_context())
        {
          struct tcb_s *nexttcb;

          /* If there are any pending tasks, then add them to the ready-to-run
           * task list now. It should be the up_realease_pending() called from
           * sched_unlock() to do this for disable preemption. But it block
           * itself, so it's OK.
           */

          if (g_pendingtasks.head)
            {
              warn("Disable preemption failed for reprioritize task\n");
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Get the TCB of the new task to run */

          nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
Esempio n. 13
0
/****************************************************************************
 * Name: up_block_task
 *
 * Description:
 *   The currently executing task at the head of
 *   the ready to run list must be stopped.  Save its context
 *   and move it to the inactive list specified by task_state.
 *
 *   This function is called only from the NuttX scheduling
 *   logic.  Interrupts will always be disabled when this
 *   function is called.
 *
 * Inputs:
 *   tcb: Refers to a task in the ready-to-run list (normally
 *     the task at the head of the list).  It most be
 *     stopped, its context saved and moved into one of the
 *     waiting task lists.  It it was the task at the head
 *     of the ready-to-run list, then a context to the new
 *     ready to run task must be performed.
 *   task_state: Specifies which waiting task list should be
 *     hold the blocked task TCB.
 *
 ****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          struct tcb_s *nexttcb;

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* this part should not be executed in interrupt context */

          if (up_interrupt_context())
            {
              panic("%s: %d\n", __func__, __LINE__);
            }

          /* If there are any pending tasks, then add them to the ready-to-run
           * task list now. It should be the up_realease_pending() called from
           * sched_unlock() to do this for disable preemption. But it block
           * itself, so it's OK.
           */

          if (g_pendingtasks.head)
            {
              warn("Disable preemption failed for task block itself\n");
              sched_mergepending();
            }

          nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
Esempio n. 14
0
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  sdbg("Blocking TCB=%p\n", tcb);

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the g_readytorun
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Copy the exception context into the TCB at the (old) head of the
       * g_readytorun Task list. if up_setjmp returns a non-zero
       * value, then this is really the previously running task restarting!
       */

      if (!up_setjmp(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;
          sdbg("New Active Task TCB=%p\n", rtcb);

          /* The way that we handle signals in the simulation is kind of
           * a kludge.  This would be unsafe in a truly multi-threaded, interrupt
           * driven environment.
           */

          if (rtcb->xcp.sigdeliver)
            {
              sdbg("Delivering signals TCB=%p\n", rtcb);
              ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb);
              rtcb->xcp.sigdeliver = NULL;
            }

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_longjmp(rtcb->xcp.regs, 1);
        }
    }
}
Esempio n. 15
0
void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* dbg("Blocking TCB=%p\n", tcb); */

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the g_readytorun
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Are we in an interrupt handler? */

      if (IN_INTERRUPT())
        {
          /* Yes, then we have to do things differently.
           * Just copy the current registers into the OLD rtcb.
           */

          SAVE_IRQCONTEXT(rtcb);

          /* Restore the exception context of the rtcb at the (new) head 
           * of the g_readytorun task list.
           */

          rtcb = (FAR struct tcb_s*)g_readytorun.head;
          /* dbg("New Active Task TCB=%p\n", rtcb); */

          /* Then setup so that the context will be performed on exit
           * from the interrupt.
           */

          SET_IRQCONTEXT(rtcb);
        }

      /* Copy the user C context into the TCB at the (old) head of the
       * g_readytorun Task list. if SAVE_USERCONTEXT returns a non-zero
       * value, then this is really the previously running task restarting!
       */

      else if (!SAVE_USERCONTEXT(rtcb))
        {
          /* Restore the exception context of the rtcb at the (new) head 
           * of the g_readytorun task list.
           */

          rtcb = (FAR struct tcb_s*)g_readytorun.head;
          /* dbg("New Active Task TCB=%p\n", rtcb); */

          /* Then switch contexts */

          RESTORE_USERCONTEXT(rtcb);
        }
    }
}
Esempio n. 16
0
int sched_setpriority(FAR struct tcb_s *tcb, int sched_priority)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
  tstate_t task_state;
  irqstate_t saved_state;

  /* Verify that the requested priority is in the valid range */

  if (sched_priority < SCHED_PRIORITY_MIN ||
      sched_priority > SCHED_PRIORITY_MAX)
    {
      set_errno(EINVAL);
      return ERROR;
    }

  /* We need to assure that there there is no interrupt activity while
   * performing the following.
   */

  saved_state = irqsave();

  /* There are four cases that must be considered: */

  task_state = tcb->task_state;
  switch (task_state)
    {
      /* CASE 1. The task is running or ready-to-run and a context switch
       * may be caused by the re-prioritization
       */

      case TSTATE_TASK_RUNNING:

        /* A context switch will occur if the new priority of the running
         * task becomes less than OR EQUAL TO the next highest priority
         * ready to run task.
         */

        if (sched_priority <= tcb->flink->sched_priority)
          {
            /* A context switch will occur. */

            up_reprioritize_rtr(tcb, (uint8_t)sched_priority);
          }

        /* Otherwise, we can just change priority since it has no effect */

        else
          {
            /* Change the task priority */

            tcb->sched_priority = (uint8_t)sched_priority;
          }
        break;

      /* CASE 2. The task is running or ready-to-run and a context switch
       * may be caused by the re-prioritization
       */

      case TSTATE_TASK_READYTORUN:

        /* A context switch will occur if the new priority of the ready-to
         * run task is (strictly) greater than the current running task
         */

        if (sched_priority > rtcb->sched_priority)
          {
            /* A context switch will occur. */

            up_reprioritize_rtr(tcb, (uint8_t)sched_priority);
          }

        /* Otherwise, we can just change priority and re-schedule (since it
         * have no other effect).
         */

        else
          {
            /* Remove the TCB from the ready-to-run task list */

            ASSERT(!sched_removereadytorun(tcb));

            /* Change the task priority */

            tcb->sched_priority = (uint8_t)sched_priority;

            /* Put it back into the ready-to-run task list */

            ASSERT(!sched_addreadytorun(tcb));
          }
        break;

      /* CASE 3. The task is not in the ready to run list.  Changing its
       * Priority cannot effect the currently executing task.
       */

      default:

        /* CASE 3a. The task resides in a prioritized list. */

        if (g_tasklisttable[task_state].prioritized)
          {
            /* Remove the TCB from the prioritized task list */

            dq_rem((FAR dq_entry_t*)tcb, (FAR dq_queue_t*)g_tasklisttable[task_state].list);

            /* Change the task priority */

            tcb->sched_priority = (uint8_t)sched_priority;

            /* Put it back into the prioritized list at the correct
             * position
             */

            sched_addprioritized(tcb, (FAR dq_queue_t*)g_tasklisttable[task_state].list);
          }

        /* CASE 3b. The task resides in a non-prioritized list. */

        else
          {
            /* Just change the task's priority */

            tcb->sched_priority = (uint8_t)sched_priority;
          }
        break;
    }

  irqrestore(saved_state);
  return OK;
}
Esempio n. 17
0
void up_block_task(_TCB *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      PANIC(OSERR_BADBLOCKSTATE);
    }
  else
    {
      _TCB *rtcb = (_TCB*)g_readytorun.head;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* If there are any pending tasks, then add them to the g_readytorun
       * task list now
       */

      if (g_pendingtasks.head)
        {
          switch_needed |= sched_mergepending();
        }

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

               _TCB *nexttcb = (_TCB*)g_readytorun.head;
               up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
           }
        }
    }
}