예제 #1
0
void up_release_pending(void)
{
  _TCB *rtcb = (_TCB*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.  First check if we are operating in
       * interrupt context:
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head 
           * of the g_readytorun task list.
           */

          rtcb = (_TCB*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* Copy the exception context into the TCB of the task that
       * was currently active. if up_saveusercontext returns a non-zero
       * value, then this is really the previously running task 
       * restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head 
           * of the g_readytorun task list.
           */

          rtcb = (_TCB*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

           /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #2
0
void up_release_pending(void)
{
  _TCB *rtcb = (_TCB*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.  First check if we are operating in
       * interrupt context:
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head 
           * of the g_readytorun task list.
           */

          rtcb = (_TCB*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          _TCB *nexttcb = (_TCB*)g_readytorun.head;
          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
예제 #3
0
void up_unblock_task(_TCB *tcb)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
      (tcb->task_state > LAST_BLOCKED_STATE))
    {
      PANIC(OSERR_BADUNBLOCKSTATE);
    }
  else
    {
      _TCB *rtcb = (_TCB*)g_readytorun.head;

      /* Remove the task from the blocked task list */

      sched_removeblocked(tcb);

      /* Reset its timeslice.  This is only meaningful for round
       * robin tasks but it doesn't here to do it for everything
       */

#if CONFIG_RR_INTERVAL > 0
      tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
#endif

      /* Add the task in the correct location in the prioritized
       * g_readytorun task list
       */

      if (sched_addreadytorun(tcb))
        {
          /* The currently active task has changed! We need to do
           * a context switch to the new task.
           *
           * Are we in an interrupt handler? 
           */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* We are not in an interrupt handler.  Copy the user C context
           * into the TCB of the task that was previously active.  if 
           * up_saveusercontext returns a non-zero value, then this is really the
           * previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the new task that is ready to
               * run (probably tcb).  This is the new rtcb at the head of the
               * g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
예제 #4
0
파일: arm_blocktask.c 프로젝트: a1ien/nuttx
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
    struct tcb_s *rtcb = this_task();
    bool switch_needed;

    /* Verify that the context switch can be performed */

    ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
           (tcb->task_state <= LAST_READY_TO_RUN_STATE));

    /* Remove the tcb task from the ready-to-run list.  If we
     * are blocking the task at the head of the task list (the
     * most likely case), then a context switch to the next
     * ready-to-run task is needed. In this case, it should
     * also be true that rtcb == tcb.
     */

    switch_needed = sched_removereadytorun(tcb);

    /* Add the task to the specified blocked task list */

    sched_addblocked(tcb, (tstate_t)task_state);

    /* If there are any pending tasks, then add them to the ready-to-run
     * task list now
     */

    if (g_pendingtasks.head)
    {
        switch_needed |= sched_mergepending();
    }

    /* Now, perform the context switch if one is needed */

    if (switch_needed)
    {
        /* Update scheduler parameters */

        sched_suspend_scheduler(rtcb);

        /* Are we in an interrupt handler? */

        if (CURRENT_REGS)
        {
            /* Yes, then we have to do things differently.
             * Just copy the CURRENT_REGS into the OLD rtcb.
             */

            up_savestate(rtcb->xcp.regs);

            /* Restore the exception context of the rtcb at the (new) head
             * of the ready-to-run task list.
             */

            rtcb = this_task();

            /* Reset scheduler parameters */

            sched_resume_scheduler(rtcb);

            /* Then switch contexts.  Any necessary address environment
             * changes will be made when the interrupt returns.
             */

            up_restorestate(rtcb->xcp.regs);
        }

        /* Copy the user C context into the TCB at the (old) head of the
         * ready-to-run Task list. if up_saveusercontext returns a non-zero
         * value, then this is really the previously running task restarting!
         */

        else if (!up_saveusercontext(rtcb->xcp.regs))
        {
            /* Restore the exception context of the rtcb at the (new) head
             * of the ready-to-run task list.
             */

            rtcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
            /* Make sure that the address environment for the previously
             * running task is closed down gracefully (data caches dump,
             * MMU flushed) and set up the address environment for the new
             * thread at the head of the ready-to-run list.
             */

            (void)group_addrenv(rtcb);
#endif
            /* Reset scheduler parameters */

            sched_resume_scheduler(rtcb);

            /* Then switch contexts */

            up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #5
0
void up_release_pending(void)
{
  struct tcb_s *rtcb = this_task();

  sinfo("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the ready-to-run task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to switch
       * contexts.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we operating in interrupt context? */

      if (CURRENT_REGS)
        {
          /* Yes, then we have to do things differently. Just copy the
           * CURRENT_REGS into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          struct tcb_s *nexttcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
예제 #6
0
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  /* Refuse to handle nested signal actions */

  sdbg("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  if (!tcb->xcp.sigdeliver)
    {
      irqstate_t flags;

      /* Make sure that interrupts are disabled */

      flags = irqsave();

      /* First, handle some special cases when the signal is
       * being delivered to the currently executing task.
       */

      sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);

      if (tcb == (struct tcb_s*)g_readytorun.head)
        {
          /* CASE 1:  We are not in an interrupt handler and
           * a task is signalling itself for some reason.
           */

          if (!current_regs)
            {
              /* In this case just deliver the signal now. */

              sigdeliver(tcb);
            }

          /* CASE 2:  We are in an interrupt handler AND the
           * interrupted task is the same as the one that
           * must receive the signal, then we will have to modify
           * the return state as well as the state in the TCB.
           *
           * Hmmm... there looks like a latent bug here: The following
           * logic would fail in the strange case where we are in an
           * interrupt handler, the thread is signalling itself, but
           * a context switch to another task has occurred so that
           * current_regs does not refer to the thread at g_readytorun.head!
           */

          else
            {
              /* Save the return lr and cpsr and one scratch register
               * These will be restored by the signal trampoline after
               * the signals have been delivered.
               */

              tcb->xcp.sigdeliver       = sigdeliver;
              tcb->xcp.saved_pc         = current_regs[REG_PC];
              tcb->xcp.saved_basepri    = current_regs[REG_BASEPRI];
              tcb->xcp.saved_xpsr       = current_regs[REG_XPSR];

              /* Then set up to vector to the trampoline with interrupts
               * disabled.  The kernel-space trampoline must run in
               * privileged thread mode.
               */

              current_regs[REG_PC]      = (uint32_t)up_sigdeliver;
              current_regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
              current_regs[REG_XPSR]    = ARMV6M_XPSR_T;
#ifdef CONFIG_NUTTX_KERNEL
              current_regs[REG_XPSR]    = EXC_RETURN_PRIVTHR;
#endif

              /* And make sure that the saved context in the TCB
               * is the same as the interrupt return context.
               */

              up_savestate(tcb->xcp.regs);
            }
        }

      /* Otherwise, we are (1) signaling a task is not running
       * from an interrupt handler or (2) we are not in an
       * interrupt handler and the running task is signalling
       * some non-running task.
       */

      else
        {
          /* Save the return lr and cpsr and one scratch register
           * These will be restored by the signal trampoline after
           * the signals have been delivered.
           */

          tcb->xcp.sigdeliver       = sigdeliver;
          tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
          tcb->xcp.saved_basepri    = tcb->xcp.regs[REG_BASEPRI];
          tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];

          /* Then set up to vector to the trampoline with interrupts
           * disabled.  We must already be in privileged thread mode
           * to be here.
           */

          tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
          tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
          tcb->xcp.regs[REG_XPSR]    = ARMV6M_XPSR_T;
        }

      irqrestore(flags);
    }
}
예제 #7
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just removed the head
       * of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the ready-to-run task list. sched_addreadytorun
       * will return true if the task was added to the head of ready-to-run
       * list.  We will need to perform a context switch only if the
       * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
       * the calls changes the head of the ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed (i.e. if the head
       * of the ready-to-run list is no longer the same).
       */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

         /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

              struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
              up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
            }
        }
    }
}
예제 #8
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts.  Any necessary address environment
               * changes will be made when the interrupt returns.
               */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the exception context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(rtcb);
#endif
              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
예제 #9
0
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = this_task();

  /* Verify that the context switch can be performed */

  DEBUGASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
              (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Add the task in the correct location in the prioritized
   * ready-to-run task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (g_current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the g_current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* We are not in an interrupt handler.  Copy the user C context
       * into the TCB of the task that was previously active.  if
       * up_saveusercontext returns a non-zero value, then this is really the
       * previously running task restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * ready-to-run task list.
           */

          rtcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #10
0
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  irqstate_t flags;

  sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
  DEBUGASSERT(tcb != NULL && sigdeliver != NULL);

  /* Make sure that interrupts are disabled */

  flags = enter_critical_section();

  /* Refuse to handle nested signal actions */

  if (tcb->xcp.sigdeliver == NULL)
    {
      /* First, handle some special cases when the signal is being delivered
       * to the currently executing task.
       */

      sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS);

      if (tcb == this_task())
        {
          /* CASE 1:  We are not in an interrupt handler and a task is
           * signaling itself for some reason.
           */

          if (!CURRENT_REGS)
            {
              /* In this case just deliver the signal now.
               * REVISIT:  Signal handle will run in a critical section!
               */

              sigdeliver(tcb);
            }

          /* CASE 2:  We are in an interrupt handler AND the interrupted
           * task is the same as the one that must receive the signal, then
           * we will have to modify the return state as well as the state in
           * the TCB.
           */

          else
            {
              /* Save the return PC, CPSR and either the BASEPRI or PRIMASK
               * registers (and perhaps also the LR).  These will be
               * restored by the signal trampoline after the signal has been
               * delivered.
               */

              tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
              tcb->xcp.saved_pc         = CURRENT_REGS[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
              tcb->xcp.saved_basepri    = CURRENT_REGS[REG_BASEPRI];
#else
              tcb->xcp.saved_primask    = CURRENT_REGS[REG_PRIMASK];
#endif
              tcb->xcp.saved_xpsr       = CURRENT_REGS[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
              tcb->xcp.saved_lr         = CURRENT_REGS[REG_LR];
#endif
              /* Then set up to vector to the trampoline with interrupts
               * disabled.  The kernel-space trampoline must run in
               * privileged thread mode.
               */

              CURRENT_REGS[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
              CURRENT_REGS[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
              CURRENT_REGS[REG_PRIMASK] = 1;
#endif
              CURRENT_REGS[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
              CURRENT_REGS[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
              /* And make sure that the saved context in the TCB is the same
               * as the interrupt return context.
               */

              up_savestate(tcb->xcp.regs);
            }
        }

      /* Otherwise, we are (1) signaling a task is not running from an
       * interrupt handler or (2) we are not in an interrupt handler and the
       * running task is signaling* some non-running task.
       */

      else
        {
          /* Save the return PC, CPSR and either the BASEPRI or PRIMASK
           * registers (and perhaps also the LR).  These will be restored
           * by the signal trampoline after the signal has been delivered.
           */

          tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
          tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
          tcb->xcp.saved_basepri    = tcb->xcp.regs[REG_BASEPRI];
#else
          tcb->xcp.saved_primask    = tcb->xcp.regs[REG_PRIMASK];
#endif
          tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
          tcb->xcp.saved_lr         = tcb->xcp.regs[REG_LR];
#endif
          /* Then set up to vector to the trampoline with interrupts
           * disabled.  We must already be in privileged thread mode to be
           * here.
           */

          tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
          tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
          tcb->xcp.regs[REG_PRIMASK] = 1;
#endif
          tcb->xcp.regs[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
          tcb->xcp.regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
        }
    }

  leave_critical_section(flags);
}
예제 #11
0
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  irqstate_t flags;
  int cpu;
  int me;

  sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  /* Make sure that interrupts are disabled */

  flags = enter_critical_section();

  /* Refuse to handle nested signal actions */

  if (!tcb->xcp.sigdeliver)
    {
      /* First, handle some special cases when the signal is being delivered
       * to task that is currently executing on any CPU.
       */

      sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS);

      if (tcb->task_state == TSTATE_TASK_RUNNING)
        {
          me  = this_cpu();
          cpu = tcb->cpu;

          /* CASE 1:  We are not in an interrupt handler and a task is
           * signaling itself for some reason.
           */

          if (cpu == me && !CURRENT_REGS)
            {
              /* In this case just deliver the signal now.
               * REVISIT:  Signal handler will run in a critical section!
               */

              sigdeliver(tcb);
            }

          /* CASE 2:  The task that needs to receive the signal is running.
           * This could happen if the task is running on another CPU OR if
           * we are in an interrupt handler and the task is running on this
           * CPU.  In the former case, we will have to PAUSE the other CPU
           * first.  But in either case, we will have to modify the return
           * state as well as the state in the TCB.
           */

          else
            {
              /* If we signaling a task running on the other CPU, we have
               * to PAUSE the other CPU.
               */

              if (cpu != me)
                {
                  /* Pause the CPU */

                  up_cpu_pause(cpu);

                  /* Wait while the pause request is pending */

                  while (up_cpu_pausereq(cpu))
                    {
                    }

                  /* Now tcb on the other CPU can be accessed safely */

                  /* Copy tcb->xcp.regs to tcp.xcp.saved. These will be restored
                   * by the signal trampoline after the signal has been delivered.
                   */

                  tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
                  tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  tcb->xcp.saved_basepri    = tcb->xcp.regs[REG_BASEPRI];
#else
                  tcb->xcp.saved_primask    = tcb->xcp.regs[REG_PRIMASK];
#endif
                  tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
                  tcb->xcp.saved_lr         = tcb->xcp.regs[REG_LR];
#endif

                  /* Then set up vector to the trampoline with interrupts
                   * disabled.  We must already be in privileged thread mode
                   * to be here.
                   */

                  tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
                  tcb->xcp.regs[REG_PRIMASK] = 1;
#endif
                  tcb->xcp.regs[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
                  tcb->xcp.regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
                }
              else
                {
                  /* tcb is running on the same CPU */

                  /* Save the return PC, CPSR and either the BASEPRI or PRIMASK
                   * registers (and perhaps also the LR).  These will be
                   * restored by the signal trampoline after the signal has been
                   * delivered.
                   */

                  tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
                  tcb->xcp.saved_pc         = CURRENT_REGS[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  tcb->xcp.saved_basepri    = CURRENT_REGS[REG_BASEPRI];
#else
                  tcb->xcp.saved_primask    = CURRENT_REGS[REG_PRIMASK];
#endif
                  tcb->xcp.saved_xpsr       = CURRENT_REGS[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
                  tcb->xcp.saved_lr         = CURRENT_REGS[REG_LR];
#endif

                  /* Then set up vector to the trampoline with interrupts
                   * disabled.  The kernel-space trampoline must run in
                   * privileged thread mode.
                   */

                  CURRENT_REGS[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  CURRENT_REGS[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
                  CURRENT_REGS[REG_PRIMASK] = 1;
#endif
                  CURRENT_REGS[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
                  CURRENT_REGS[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif

                  /* And make sure that the saved context in the TCB is the same
                   * as the interrupt return context.
                   */

                  up_savestate(tcb->xcp.regs);
                }

              /* Increment the IRQ lock count so that when the task is restarted,
               * it will hold the IRQ spinlock.
               */

              DEBUGASSERT(tcb->irqcount < INT16_MAX);
              tcb->irqcount++;

              /* In an SMP configuration, the interrupt disable logic also
               * involves spinlocks that are configured per the TCB irqcount
               * field.  This is logically equivalent to enter_critical_section().
               * The matching call to leave_critical_section() will be
               * performed in up_sigdeliver().
               */

              spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                          &g_cpu_irqlock);


              /* RESUME the other CPU if it was PAUSED */

              if (cpu != me)
                {
                  up_cpu_resume(cpu);
                }
            }
        }

      /* Otherwise, we are (1) signaling a task is not running from an
       * interrupt handler or (2) we are not in an interrupt handler and the
       * running task is signaling some other non-running task.
       */

      else
        {
          /* Save the return PC, CPSR and either the BASEPRI or PRIMASK
           * registers (and perhaps also the LR).  These will be restored
           * by the signal trampoline after the signal has been delivered.
           */

          tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
          tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
          tcb->xcp.saved_basepri    = tcb->xcp.regs[REG_BASEPRI];
#else
          tcb->xcp.saved_primask    = tcb->xcp.regs[REG_PRIMASK];
#endif
          tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
          tcb->xcp.saved_lr         = tcb->xcp.regs[REG_LR];
#endif
          /* Increment the IRQ lock count so that when the task is restarted,
           * it will hold the IRQ spinlock.
           */

          DEBUGASSERT(tcb->irqcount < INT16_MAX);
          tcb->irqcount++;

          /* Then set up to vector to the trampoline with interrupts
           * disabled.  We must already be in privileged thread mode to be
           * here.
           */

          tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
          tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
          tcb->xcp.regs[REG_PRIMASK] = 1;
#endif
          tcb->xcp.regs[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
          tcb->xcp.regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
        }
    }

  leave_critical_section(flags);
}
예제 #12
0
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  irqstate_t flags;
  uint32_t int_ctx;

  sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  /* Make sure that interrupts are disabled */

  flags = enter_critical_section();

  /* Refuse to handle nested signal actions */

  if (!tcb->xcp.sigdeliver)
    {
      /* First, handle some special cases when the signal is
       * being delivered to the currently executing task.
       */

      sinfo("rtcb=0x%p g_current_regs=0x%p\n",
            this_task(), g_current_regs);

      if (tcb == this_task())
        {
          /* CASE 1:  We are not in an interrupt handler and
           * a task is signalling itself for some reason.
           */

          if (!g_current_regs)
            {
              /* In this case just deliver the signal now. */

              sigdeliver(tcb);
            }

          /* CASE 2:  We are in an interrupt handler AND the
           * interrupted task is the same as the one that
           * must receive the signal, then we will have to modify
           * the return state as well as the state in the TCB.
           *
           * Hmmm... there looks like a latent bug here: The following
           * logic would fail in the strange case where we are in an
           * interrupt handler, the thread is signalling itself, but
           * a context switch to another task has occurred so that
           * g_current_regs does not refer to the thread of this_task()!
           */

          else
            {
              /* Save the return EPC and STATUS registers.  These will be
               * restored by the signal trampoline after the signals have
               * been delivered.
               */

              tcb->xcp.sigdeliver       = sigdeliver;
              tcb->xcp.saved_epc        = g_current_regs[REG_EPC];

              /* Then set up to vector to the trampoline with interrupts
               * disabled
               */

              g_current_regs[REG_EPC]     = (uint32_t)up_sigdeliver;
              int_ctx                     = g_current_regs[REG_INT_CTX];
              int_ctx                     &= ~EPIC_STATUS_INT_PRI_MASK;
              int_ctx                     |= EPIC_STATUS_INT_PRI1;
              g_current_regs[REG_INT_CTX] = int_ctx;

              /* And make sure that the saved context in the TCB
               * is the same as the interrupt return context.
               */

              up_savestate(tcb->xcp.regs);

              sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n",
                    tcb->xcp.saved_epc, tcb->xcp.saved_status,
                    g_current_regs[REG_EPC], g_current_regs[REG_STATUS]);
            }
        }

      /* Otherwise, we are (1) signaling a task is not running
       * from an interrupt handler or (2) we are not in an
       * interrupt handler and the running task is signalling
       * some non-running task.
       */

      else
        {
          /* Save the return EPC and STATUS registers.  These will be
           * restored by the signal trampoline after the signals have
           * been delivered.
           */

          tcb->xcp.sigdeliver       = sigdeliver;
          tcb->xcp.saved_epc        = tcb->xcp.regs[REG_EPC];
          tcb->xcp.saved_int_ctx    = tcb->xcp.regs[REG_INT_CTX];

          /* Then set up to vector to the trampoline with interrupts
           * disabled
           */

          tcb->xcp.regs[REG_EPC]      = (uint32_t)up_sigdeliver;
          int_ctx                     = tcb->xcp.regs[REG_INT_CTX];
          int_ctx                     &= ~EPIC_STATUS_INT_PRI_MASK;
          int_ctx                     |= EPIC_STATUS_INT_PRI1;
          tcb->xcp.regs[REG_INT_CTX]  = int_ctx;

          sinfo("PC/STATUS Saved: %08x/%08x New: %08x/%08x\n",
                tcb->xcp.saved_epc, tcb->xcp.saved_status,
                tcb->xcp.regs[REG_EPC], tcb->xcp.regs[REG_STATUS]);
        }
    }

  leave_critical_section(flags);
}
void up_release_pending(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.  First check if we are operating in
       * interrupt context:
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Then switch contexs */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
예제 #14
0
void up_release_pending(void)
{
  struct tcb_s *rtcb = this_task();

  sinfo("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the ready-to-run task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we operating in interrupt context? */

      if (CURRENT_REGS)
        {
          /* Yes, then we have to do things differently.
           * Just copy the CURRENT_REGS into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* Copy the exception context into the TCB of the task that
       * was currently active. if up_saveusercontext returns a non-zero
       * value, then this is really the previously running task
       * restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #15
0
void up_block_task(_TCB *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      PANIC(OSERR_BADBLOCKSTATE);
    }
  else
    {
      _TCB *rtcb = (_TCB*)g_readytorun.head;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* If there are any pending tasks, then add them to the g_readytorun
       * task list now
       */

      if (g_pendingtasks.head)
        {
          switch_needed |= sched_mergepending();
        }

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

               _TCB *nexttcb = (_TCB*)g_readytorun.head;
               up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
           }
        }
    }
}
예제 #16
0
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = this_task();
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any new address environment needed by
           * the new thread will be instantiated before the return from
           * interrupt.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Get the context of the task at the head of the ready to
           * run list.
           */

          struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  /* Refuse to handle nested signal actions */

  sdbg("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  if (!tcb->xcp.sigdeliver)
    {
      irqstate_t flags;

      /* Make sure that interrupts are disabled */

      flags = irqsave();

      /* First, handle some special cases when the signal is being delivered
       * to the currently executing task.
       */

      sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);

      if (tcb == (struct tcb_s*)g_readytorun.head)
        {
          /* CASE 1:  We are not in an interrupt handler and a task is
           * signalling itself for some reason.
           */

          if (!current_regs)
            {
              /* In this case just deliver the signal now. */

              sigdeliver(tcb);
            }

          /* CASE 2:  We are in an interrupt handler AND the interrupted
           * task is the same as the one that must receive the signal, then
           * we will have to modify the return state as well as the state
           * in the TCB.
           *
           * Hmmm... there looks like a latent bug here: The following logic
           * would fail in the strange case where we are in an interrupt
           * handler, the thread is signalling itself, but a context switch
           * to another task has occurred so that current_regs does not
           * refer to the thread at g_readytorun.head!
           */

          else
            {
              /* Save the return lr and cpsr and one scratch register
               * These will be restored by the signal trampoline after
               * the signals have been delivered.
               */

              tcb->xcp.sigdeliver       = sigdeliver;
              tcb->xcp.saved_pc         = current_regs[REG_PC];
              tcb->xcp.saved_cpsr       = current_regs[REG_CPSR];

              /* Then set up to vector to the trampoline with interrupts
               * disabled
               */

              current_regs[REG_PC]      = (uint32_t)up_sigdeliver;
              current_regs[REG_CPSR]    = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);

              /* And make sure that the saved context in the TCB is the same
               * as the interrupt return context.
               */

              up_savestate(tcb->xcp.regs);
            }
        }

      /* Otherwise, we are (1) signaling a task is not running from an
       * interrupt handler or (2) we are not in an interrupt handler and the
       * running task is signalling some non-running task.
       */

      else
        {
          /* Save the return lr and cpsr and one scratch register.  These
           * will be restored by the signal trampoline after the signals
           * have been delivered.
           */

          tcb->xcp.sigdeliver       = sigdeliver;
          tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
          tcb->xcp.saved_cpsr       = tcb->xcp.regs[REG_CPSR];

          /* Then set up to vector to the trampoline with interrupts
           * disabled
           */

          tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
          tcb->xcp.regs[REG_CPSR]    = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
        }

      irqrestore(flags);
    }
}
예제 #18
0
void up_release_pending(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to switch
       * contexts.
       *
       * Update scheduler parameters.
       */

      sched_suspend_scheduler(rtcb);

      /* Are we operating in interrupt context? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* Copy the exception context into the TCB of the task that
       * was currently active. if up_saveusercontext returns a non-zero
       * value, then this is really the previously running task
       * restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(rtcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #19
0
void up_block_task(_TCB *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      PANIC(OSERR_BADBLOCKSTATE);
    }
  else
    {
      _TCB *rtcb = (_TCB*)g_readytorun.head;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* If there are any pending tasks, then add them to the g_readytorun
       * task list now
       */

      if (g_pendingtasks.head)
        {
          switch_needed |= sched_mergepending();
        }

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the user C context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (_TCB*)g_readytorun.head;

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
예제 #20
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC(OSERR_BADREPRIORITIZESTATE);
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

         /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the exception context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;
              slldbg("New Active Task TCB=%p\n", rtcb);

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
예제 #21
0
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = this_task();

  /* Verify that the context switch can be performed */

  DEBUGASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
              (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Add the task in the correct location in the prioritized
   * ready-to-run task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (CURRENT_REGS)
        {
          /* Yes, then we have to do things differently.
           * Just copy the CURRENT_REGS into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          struct tcb_s *nexttcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
예제 #22
0
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
         (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Reset its timeslice.  This is only meaningful for round
   * robin tasks but it doesn't here to do it for everything
   */

#if CONFIG_RR_INTERVAL > 0
  tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
#endif

  /* Add the task in the correct location in the prioritized
   * g_readytorun task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       *
       * Are we in an interrupt handler?
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
        }

      /* We are not in an interrupt handler.  Copy the user C context
       * into the TCB of the task that was previously active.  if
       * up_saveusercontext returns a non-zero value, then this is really the
       * previously running task restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
         (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Reset its timeslice.  This is only meaningful for round
   * robin tasks but it doesn't here to do it for everything
   */

#if CONFIG_RR_INTERVAL > 0
  tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
#endif

  /* Add the task in the correct location in the prioritized
   * g_readytorun task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       *
       * Are we in an interrupt handler?
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * g_readytorun task list.
           */

          struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(nexttcb);
#endif
          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
예제 #24
0
void up_unblock_task(struct tcb_s *tcb)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
      (tcb->task_state > LAST_BLOCKED_STATE))
    {
      PANIC(OSERR_BADUNBLOCKSTATE);
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

      /* Remove the task from the blocked task list */

      sched_removeblocked(tcb);

      /* Reset its timeslice.  This is only meaningful for round
       * robin tasks but it doesn't here to do it for everything
       */

#if CONFIG_RR_INTERVAL > 0
      tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
#endif

      /* Add the task in the correct location in the prioritized
       * g_readytorun task list
       */

      if (sched_addreadytorun(tcb))
        {
          /* The currently active task has changed! We need to do
           * a context switch to the new task.
           *
           * Are we in an interrupt handler? 
           */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head 
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

              /* Then switch contexts */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

               struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
               up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
           }
        }
    }
}
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  /* Refuse to handle nested signal actions */

  sdbg("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  if (!tcb->xcp.sigdeliver)
    {
      irqstate_t flags;

      /* Make sure that interrupts are disabled */

      flags = irqsave();

      /* First, handle some special cases when the signal is being delivered
       * to the currently executing task.
       */

      sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);

      if (tcb == (struct tcb_s*)g_readytorun.head)
        {
          /* CASE 1:  We are not in an interrupt handler and a task is
           * signalling itself for some reason.
           */

          if (!current_regs)
            {
              /* In this case just deliver the signal now. */

              sigdeliver(tcb);
            }

          /* CASE 2:  We are in an interrupt handler AND the interrupted
           * task is the same as the one that must receive the signal, then
           * we will have to modify the return state as well as the state in
           * the TCB.
           */

          else
            {
              /* Save the return PC,  CPSR, and PRIMASK registers (and
               * perhaps the LR).  These will be restored by the signal
               * trampoline after the signal has been delivered.
               */

              tcb->xcp.sigdeliver       = sigdeliver;
              tcb->xcp.saved_pc         = current_regs[REG_PC];
              tcb->xcp.saved_primask    = current_regs[REG_PRIMASK];
              tcb->xcp.saved_xpsr       = current_regs[REG_XPSR];
#ifdef CONFIG_NUTTX_KERNEL
              tcb->xcp.saved_lr         = current_regs[REG_LR];
#endif
              /* Then set up to vector to the trampoline with interrupts
               * disabled.  The kernel-space trampoline must run in
               * privileged thread mode.
               */

              current_regs[REG_PC]      = (uint32_t)up_sigdeliver;
              current_regs[REG_PRIMASK] = 1;
              current_regs[REG_XPSR]    = ARMV6M_XPSR_T;
#ifdef CONFIG_NUTTX_KERNEL
              current_regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
              /* And make sure that the saved context in the TCB is the same
               * as the interrupt return context.
               */

              up_savestate(tcb->xcp.regs);
            }
        }

      /* Otherwise, we are (1) signalling a task is not running from an
       * interrupt handler or (2) we are not in an interrupt handler and the
       * running task is signalling some non-running task.
       */

      else
        {
          /* Save the return PS, CPSR and PRIMASK register (a perhaps also
           * the LR). These will be restored by the signal trampoline after
           * the signal has been delivered.
           */

          tcb->xcp.sigdeliver       = sigdeliver;
          tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
          tcb->xcp.saved_primask    = tcb->xcp.regs[REG_PRIMASK];
          tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];
#ifdef CONFIG_NUTTX_KERNEL
          tcb->xcp.saved_lr         = tcb->xcp.regs[REG_LR];
#endif

          /* Then set up to vector to the trampoline with interrupts
           * disabled.  We must already be in privileged thread mode to be
           * here.
           */

          tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
          tcb->xcp.regs[REG_PRIMASK] = 1;
          tcb->xcp.regs[REG_XPSR]    = ARMV6M_XPSR_T;
#ifdef CONFIG_NUTTX_KERNEL
          tcb->xcp.regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
        }

      irqrestore(flags);
    }
}