예제 #1
0
static uint32_t *common_handler(int irq, uint32_t *regs)
{
  board_led_on(LED_INIRQ);

  /* Current regs non-zero indicates that we are processing an interrupt;
   * current_regs is also used to manage interrupt level context switches.
   *
   * Nested interrupts are not supported.
   */

  DEBUGASSERT(current_regs == NULL);
  current_regs = regs;

  /* Deliver the IRQ */

  irq_dispatch(irq, regs);

#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
  /* Check for a context switch.  If a context switch occurred, then
   * current_regs will have a different value than it did on entry.  If an
   * interrupt level context switch has occurred, then restore the floating
   * point state and the establish the correct address environment before
   * returning from the interrupt.
   */

  if (regs != current_regs)
    {
#ifdef CONFIG_ARCH_FPU
      /* Restore floating point registers */

      up_restorefpu((uint32_t*)current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
      /* Make sure that the address environment for the previously
       * running task is closed down gracefully (data caches dump,
       * MMU flushed) and set up the address environment for the new
       * thread at the head of the ready-to-run list.
       */

      (void)group_addrenv(NULL);
#endif
    }
#endif

  /* If a context switch occurred while processing the interrupt then
   * current_regs may have change value.  If we return any value different
   * from the input regs, then the lower level will know that a context
   * switch occurred during interrupt processing.
   */

  regs = (uint32_t*)current_regs;

  /* Set current_regs to NULL to indicate that we are no longer in an
   * interrupt handler.
   */

  current_regs = NULL;
  return regs;
}
예제 #2
0
파일: nuttx.c 프로젝트: acassis/ros2_nuttx
void _exit(int status)
{
    struct tcb_s* tcb;

    /* Destroy the task at the head of the ready to run list. */

    (void)task_exit();

    /* Now, perform the context switch to the new ready-to-run task at the
     * head of the list.
     */

    tcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
    /* Make sure that the address environment for the previously running
     * task is closed down gracefully (data caches dump, MMU flushed) and
     * set up the address environment for the new thread at the head of
     * the ready-to-run list.
     */

    (void)group_addrenv(tcb);
#endif

    /* Then switch contexts */

    up_switchcontext(NULL, tcb);
}
예제 #3
0
파일: xtensa_exit.c 프로젝트: a1ien/nuttx
void _exit(int status)
{
    struct tcb_s *tcb;

    /* Disable interrupts.  They will be restored when the next task is
     * started.
     */

    (void)up_irq_save();

    sinfo("TCB=%p exiting\n", this_task());

#ifdef CONFIG_DUMP_ON_EXIT
    sinfo("Other tasks:\n");
    sched_foreach(_xtensa_dumponexit, NULL);
#endif

#if XCHAL_CP_NUM > 0
    /* Disable co-processor support for the task that is exit-ing. */

    tcb = this_task();
    xtensa_coproc_disable(&tcb->xcp.cpstate, XTENSA_CP_ALLSET);
#endif

    /* Destroy the task at the head of the ready to run list. */

    (void)task_exit();

    /* Now, perform the context switch to the new ready-to-run task at the
     * head of the list.
     */

    tcb = this_task();

#if XCHAL_CP_NUM > 0
    /* Set up the co-processor state for the newly started thread. */

    xtensa_coproc_restorestate(&tcb->xcp.cpstate);
#endif

#ifdef CONFIG_ARCH_ADDRENV
    /* Make sure that the address environment for the previously running
     * task is closed down gracefully (data caches dump, MMU flushed) and
     * set up the address environment for the new thread at the head of
     * the ready-to-run list.
     */

    (void)group_addrenv(tcb);
#endif

    /* Then switch contexts */

    xtensa_context_restore(tcb->xcp.regs);

    /* xtensa_full_context_restore() should not return but could if the software
     * interrupts are disabled.
     */

    PANIC();
}
예제 #4
0
void _exit(int status)
{
  struct tcb_s *tcb;

  /* Disable interrupts.  They will be restored when the next
   * task is started.
   */

  (void)irqsave();

  slldbg("TCB=%p exiting\n", this_task());

#if defined(CONFIG_DUMP_ON_EXIT) && defined(CONFIG_DEBUG)
  slldbg("Other tasks:\n");
  sched_foreach(_up_dumponexit, NULL);
#endif

  /* Destroy the task at the head of the ready to run list. */

  (void)task_exit();

  /* Now, perform the context switch to the new ready-to-run task at the
   * head of the list.
   */

  tcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
  /* Make sure that the address environment for the previously running
   * task is closed down gracefully (data caches dump, MMU flushed) and
   * set up the address environment for the new thread at the head of
   * the ready-to-run list.
   */

  (void)group_addrenv(tcb);
#endif

  /* Then switch contexts */

  up_fullcontextrestore(tcb->xcp.regs);

  /* up_fullcontextrestore() should not return but could if the software
   * interrupts are disabled.
   */

  PANIC();
}
예제 #5
0
void _exit(int status)
{
  FAR struct tcb_s* tcb;

  /* Disable interrupts.  Interrupts will remain disabled until
   * the new task is resumed below.
   */

  (void)irqsave();

  slldbg("TCB=%p exiting\n", tcb);

#if defined(CONFIG_DUMP_ON_EXIT) && defined(CONFIG_DEBUG)
  lldbg("Other tasks:\n");
  sched_foreach(_up_dumponexit, NULL);
#endif

  /* Destroy the task at the head of the ready to run list. */

  (void)task_exit();

  /* Now, perform the context switch to the new ready-to-run task at the
   * head of the list.
   */

  tcb = (FAR struct tcb_s*)g_readytorun.head;
  slldbg("New Active Task TCB=%p\n", tcb);

#ifdef CONFIG_ARCH_ADDRENV
  /* Make sure that the address environment for the previously running
   * task is closed down gracefully (data caches dump, MMU flushed) and
   * set up the address environment for the new thread at the head of
   * the ready-to-run list.
   */

  (void)group_addrenv(tcb);
#endif

  /* Then switch contexts */

  RESTORE_USERCONTEXT(tcb);
}
예제 #6
0
void _exit(int status)
{
  struct tcb_s *tcb;

  /* Make sure that we are in a critical section with local interrupts.
   * The IRQ state will be restored when the next task is started.
   */

  (void)enter_critical_section();

  sinfo("TCB=%p exiting\n", this_task());

#ifdef CONFIG_DUMP_ON_EXIT
  sinfo("Other tasks:\n");
  sched_foreach(_up_dumponexit, NULL);
#endif

  /* Destroy the task at the head of the ready to run list. */

  (void)task_exit();

  /* Now, perform the context switch to the new ready-to-run task at the
   * head of the list.
   */

  tcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
  /* Make sure that the address environment for the previously running
   * task is closed down gracefully (data caches dump, MMU flushed) and
   * set up the address environment for the new thread at the head of
   * the ready-to-run list.
   */

  (void)group_addrenv(tcb);
#endif

  /* Then switch contexts */

  up_fullcontextrestore(tcb->xcp.regs);
}
예제 #7
0
파일: nuttx.c 프로젝트: acassis/ros2_nuttx
void up_release_pending(void)
{
  struct tcb_s *rtcb = current_task;

  /* Merge the g_pendingtasks list into the ready-to-run task list */

  if (sched_mergepending())
    {
      struct tcb_s *nexttcb = this_task();

      /* The currently active task has changed!  We will need to switch
       * contexts.
       *
       * Update scheduler parameters.
       */

      sched_suspend_scheduler(rtcb);

#ifdef CONFIG_ARCH_ADDRENV
      /* Make sure that the address environment for the previously
       * running task is closed down gracefully (data caches dump,
       * MMU flushed) and set up the address environment for the new
       * thread at the head of the ready-to-run list.
       */

      (void)group_addrenv(nexttcb);
#endif
      /* Update scheduler parameters */

      sched_resume_scheduler(nexttcb);

      /* context switch */

      up_switchcontext(rtcb, nexttcb);
    }
}
예제 #8
0
void up_release_pending(void)
{
  FAR struct tcb_s *rtcb = this_task();

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the ready-to-run task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to switch
       * contexts.
       *
       * Update scheduler parameters.
       */

      sched_suspend_scheduler(rtcb);

      /* Are we operating in interrupt context? */

      if (IN_INTERRUPT())
        {
          /* Yes, then we have to do things differently.
           * Just copy the current context into the OLD rtcb.
           */

           SAVE_IRQCONTEXT(rtcb);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then setup so that the context will be performed on exit
           * from the interrupt.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          SET_IRQCONTEXT(rtcb);
        }

      /* Copy the exception context into the TCB of the task that
       * was currently active. if SAVE_USERCONTEXT returns a non-zero
       * value, then this is really the previously running task
       * restarting!
       */

      else if (!SAVE_USERCONTEXT(rtcb))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(rtcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          RESTORE_USERCONTEXT(rtcb);
        }
    }
}
예제 #9
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
      bool switch_needed;

      slldbg("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Are we in an interrupt handler? */

          if (current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts.  Any necessary address environment
               * changes will be made when the interrupt returns.
               */

              up_restorestate(rtcb->xcp.regs);
            }

          /* Copy the exception context into the TCB at the (old) head of the
           * g_readytorun Task list. if up_saveusercontext returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          else if (!up_saveusercontext(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head
               * of the g_readytorun task list.
               */

              rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(rtcb);
#endif
              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts */

              up_fullcontextrestore(rtcb->xcp.regs);
            }
        }
    }
}
예제 #10
0
void up_release_pending(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to switch
       * contexts.
       *
       * Update scheduler parameters.
       */

      sched_suspend_scheduler(rtcb);

      /* Are we operating in interrupt context? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* Copy the exception context into the TCB of the task that
       * was currently active. if up_saveusercontext returns a non-zero
       * value, then this is really the previously running task
       * restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(rtcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #11
0
void up_decodeirq(uint32_t* regs)
{
#ifdef CONFIG_SUPPRESS_INTERRUPTS
  lowsyslog("Unexpected IRQ\n");
  current_regs = regs;
  PANIC();
#else
  /* Decode the interrupt.  First, fetch the interrupt id register. */

  uint16_t irqentry = getreg16(DM320_INTC_IRQENTRY0);

  /* The irqentry value is an offset into a table.  Zero means no interrupt. */

  if (irqentry != 0)
    {
      /* If non-zero, then we can map the table offset into an IRQ number */

      int irq = (irqentry >> 2) - 1;

      /* Verify that the resulting IRQ number is valid */

      if ((unsigned)irq < NR_IRQS)
        {
          /* Mask and acknowledge the interrupt */

          up_maskack_irq(irq);

          /* Current regs non-zero indicates that we are processing an interrupt;
           * current_regs is also used to manage interrupt level context switches.
           *
           * Nested interrupts are not supported.
           */

          DEBUGASSERT(current_regs == NULL);
          current_regs = regs;

          /* Deliver the IRQ */

          irq_dispatch(irq, regs);

#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
          /* Check for a context switch.  If a context switch occurred, then
           * current_regs will have a different value than it did on entry.
           * If an interrupt level context switch has occurred, then restore
           * the floating point state and the establish the correct address
           * environment before returning from the interrupt.
           */

          if (regs != current_regs)
            {
#ifdef CONFIG_ARCH_FPU
              /* Restore floating point registers */

              up_restorefpu((uint32_t*)current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(NULL);
#endif
            }
#endif

          /* Set current_regs to NULL to indicate that we are no longer in
           * an interrupt handler.
           */

          current_regs = NULL;

          /* Unmask the last interrupt (global interrupts are still
           * disabled).
           */

          up_enable_irq(irq);
        }
    }
예제 #12
0
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = this_task();
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any new address environment needed by
           * the new thread will be instantiated before the return from
           * interrupt.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Get the context of the task at the head of the ready to
           * run list.
           */

          struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
FAR chipreg_t *up_doirq(uint8_t irq, FAR chipreg_t *regs)
{
  board_led_on(LED_INIRQ);

#ifdef CONFIG_SUPPRESS_INTERRUPTS

  lowsyslog(LOG_ERR, "Unexpected IRQ\n");
  IRQ_ENTER(regs);
  PANIC();
  return NULL; /* Won't get here */

#else
#ifdef CONFIG_ARCH_ADDRENV
  FAR chipreg_t *newregs;
#endif

  if (irq < NR_IRQS)
    {
      DECL_SAVESTATE();

      /* Indicate that we have entered IRQ processing logic */

      IRQ_ENTER(irq, regs);

      /* Deliver the IRQ */

      irq_dispatch(irq, regs);

#ifdef CONFIG_ARCH_ADDRENV
      /* If a context switch occurred, 'newregs' will hold the new context */

      newregs = IRQ_STATE();

      if (newregs != regs)
        {
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully and set up the
           * address environment for the new thread at the head of the
           * ready-to-run list.
           */

          (void)group_addrenv(NULL);
        }

      regs = newregs;

#else
      /* If a context switch occurred, 'regs' will hold the new context */

      regs = IRQ_STATE();
#endif

      /* Indicate that we are no longer in interrupt processing logic */

      IRQ_LEAVE(irq);
    }

  board_led_off(LED_INIRQ);
  return regs;
#endif
}
예제 #14
0
void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
  bool switch_needed;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
         (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* dbg("Blocking TCB=%p\n", tcb); */

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the g_readytorun
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Are we in an interrupt handler? */

      if (IN_INTERRUPT())
        {
          /* Yes, then we have to do things differently.
           * Just copy the current registers into the OLD rtcb.
           */

          SAVE_IRQCONTEXT(rtcb);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (FAR struct tcb_s*)g_readytorun.head;
          /* dbg("New Active Task TCB=%p\n", rtcb); */

          /* Then setup so that the context will be performed on exit
           * from the interrupt.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          SET_IRQCONTEXT(rtcb);
        }

      /* Copy the user C context into the TCB at the (old) head of the
       * g_readytorun Task list. if SAVE_USERCONTEXT returns a non-zero
       * value, then this is really the previously running task restarting!
       */

      else if (!SAVE_USERCONTEXT(rtcb))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (FAR struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Then switch contexts */

          RESTORE_USERCONTEXT(rtcb);
        }
    }
}
예제 #15
0
uint32_t *up_doirq(int irq, uint32_t* regs)
{
  board_autoled_on(LED_INIRQ);
#ifdef CONFIG_SUPPRESS_INTERRUPTS
  PANIC();
#else
  if ((unsigned)irq < NR_IRQS)
    {
      /* Current regs non-zero indicates that we are processing
       * an interrupt; g_current_regs is also used to manage
       * interrupt level context switches.
       *
       * Nested interrupts are not supported.
       */

      DEBUGASSERT(g_current_regs == NULL);
      g_current_regs = regs;

      /* Deliver the IRQ */

      irq_dispatch(irq, regs);

#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
      /* Check for a context switch.  If a context switch occurred, then
       * g_current_regs will have a different value than it did on entry.  If an
       * interrupt level context switch has occurred, then restore the floating
       * point state and the establish the correct address environment before
       * returning from the interrupt.
       */

      if (regs != g_current_regs)
        {
#ifdef CONFIG_ARCH_FPU
          /* Restore floating point registers */

          up_restorefpu((uint32_t*)g_current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(NULL);
#endif
        }
#endif
      /* Get the current value of regs... it may have changed because
       * of a context switch performed during interrupt processing.
       */

      regs = g_current_regs;

      /* Set g_current_regs to NULL to indicate that we are no longer in an
       * interrupt handler.
       */

      g_current_regs = NULL;
    }

  board_autoled_off(LED_INIRQ);
#endif
  return regs;
}
예제 #16
0
uint32_t *arm_doirq(int irq, uint32_t *regs)
{
  board_led_on(LED_INIRQ);
#ifdef CONFIG_SUPPRESS_INTERRUPTS
  PANIC();
#else
  /* Nested interrupts are not supported */

  DEBUGASSERT(current_regs == NULL);

  /* Current regs non-zero indicates that we are processing an interrupt;
   * current_regs is also used to manage interrupt level context switches.
   */

  current_regs = regs;

  /* Mask and acknowledge the interrupt */

  up_maskack_irq(irq);

  /* Deliver the IRQ */

  irq_dispatch(irq, regs);

#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
  /* Check for a context switch.  If a context switch occurred, then
   * current_regs will have a different value than it did on entry.  If an
   * interrupt level context switch has occurred, then restore the floating
   * point state and the establish the correct address environment before
   * returning from the interrupt.
   */

  if (regs != current_regs)
    {
#ifdef CONFIG_ARCH_FPU
      /* Restore floating point registers */

      up_restorefpu((uint32_t*)current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
      /* Make sure that the address environment for the previously
       * running task is closed down gracefully (data caches dump,
       * MMU flushed) and set up the address environment for the new
       * thread at the head of the ready-to-run list.
       */

      (void)group_addrenv(NULL);
#endif
    }
#endif

  /* Set current_regs to NULL to indicate that we are no longer in an
   * interrupt handler.
   */

  regs         = (uint32_t *)current_regs;
  current_regs = NULL;

  /* Unmask the last interrupt (global interrupts are still disabled) */

  up_enable_irq(irq);
#endif

  board_led_off(LED_INIRQ);
  return regs;
}
예제 #17
0
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
         (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Reset its timeslice.  This is only meaningful for round
   * robin tasks but it doesn't here to do it for everything
   */

#if CONFIG_RR_INTERVAL > 0
  tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
#endif

  /* Add the task in the correct location in the prioritized
   * g_readytorun task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       *
       * Are we in an interrupt handler?
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Then switch contexts */

          up_restorestate(rtcb->xcp.regs);

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
        }

      /* We are not in an interrupt handler.  Copy the user C context
       * into the TCB of the task that was previously active.  if
       * up_saveusercontext returns a non-zero value, then this is really the
       * previously running task restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
예제 #18
0
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = this_task();

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
         (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Add the task in the correct location in the prioritized
   * ready-to-run task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       */

      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_copystate(rtcb->xcp.regs, current_regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          current_regs = rtcb->xcp.regs;
        }

      /* We are not in an interrupt handler.  Copy the user C context
       * into the TCB of the task that was previously active.  if
       * up_saveusercontext returns a non-zero value, then this is really the
       * previously running task restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * ready-to-run task list.
           */

          rtcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
void up_release_pending(void)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  slldbg("From TCB=%p\n", rtcb);

  /* Merge the g_pendingtasks list into the g_readytorun task list */

  /* sched_lock(); */
  if (sched_mergepending())
    {
      /* The currently active task has changed!  We will need to
       * switch contexts.  First check if we are operating in
       * interrupt context:
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

           up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;
          slldbg("New Active Task TCB=%p\n", rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Switch context to the context of the task at the head of the
           * ready to run list.
           */

          struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Then switch contexs */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
void up_decodeirq(uint32_t *regs)
{
#ifdef CONFIG_SUPPRESS_INTERRUPTS
    lowsyslog("Unexpected IRQ\n");
    current_regs = regs;
    PANIC();
#else
    int index;
    int irq;

    /* Read the IRQ vector status register.  Bits 3-10 provide the IRQ number
     * of the interrupt (the TABLE_ADDR was initialized to zero, so the
     * following masking should be unnecessary)
     */

    index = getreg32(LPC31_INTC_VECTOR0) & INTC_VECTOR_INDEX_MASK;
    if (index != 0)
    {
        /* Shift the index so that the range of IRQ numbers are in bits 0-7 (values
         * 1-127) and back off the IRQ number by 1 so that the numbering is zero-based
         */

        irq = (index >> INTC_VECTOR_INDEX_SHIFT) -1;

        /* Verify that the resulting IRQ number is valid */

        if ((unsigned)irq < NR_IRQS)
        {
            /* Mask and acknowledge the interrupt */

            up_maskack_irq(irq);

            /* Current regs non-zero indicates that we are processing an interrupt;
             * current_regs is also used to manage interrupt level context switches.
             *
             * Nested interrupts are not supported.
             */

            DEBUGASSERT(current_regs == NULL);
            current_regs = regs;

            /* Deliver the IRQ */

            irq_dispatch(irq, regs);

#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
            /* Check for a context switch.  If a context switch occurred, then
             * current_regs will have a different value than it did on entry.
             * If an interrupt level context switch has occurred, then restore
             * the floating point state and the establish the correct address
             * environment before returning from the interrupt.
             */

            if (regs != current_regs)
            {
#ifdef CONFIG_ARCH_FPU
                /* Restore floating point registers */

                up_restorefpu((uint32_t*)current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
                /* Make sure that the address environment for the previously
                 * running task is closed down gracefully (data caches dump,
                 * MMU flushed) and set up the address environment for the new
                 * thread at the head of the ready-to-run list.
                 */

                (void)group_addrenv(NULL);
#endif
            }
#endif
            /* Set current_regs to NULL to indicate that we are no longer in an
             * interrupt handler.
             */

            current_regs = NULL;

            /* Unmask the last interrupt (global interrupts are still
             * disabled).
             */

            up_enable_irq(irq);
        }
    }
예제 #21
0
파일: nuttx.c 프로젝트: acassis/ros2_nuttx
/****************************************************************************
 * Name: up_block_task
 *
 * Description:
 *   The currently executing task at the head of
 *   the ready to run list must be stopped.  Save its context
 *   and move it to the inactive list specified by task_state.
 *
 *   This function is called only from the NuttX scheduling
 *   logic.  Interrupts will always be disabled when this
 *   function is called.
 *
 * Inputs:
 *   tcb: Refers to a task in the ready-to-run list (normally
 *     the task at the head of the list).  It most be
 *     stopped, its context saved and moved into one of the
 *     waiting task lists.  It it was the task at the head
 *     of the ready-to-run list, then a context to the new
 *     ready to run task must be performed.
 *   task_state: Specifies which waiting task list should be
 *     hold the blocked task TCB.
 *
 ****************************************************************************/
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  /* Verify that the context switch can be performed */

  if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
      (tcb->task_state > LAST_READY_TO_RUN_STATE))
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.  If we
       * are blocking the task at the head of the task list (the
       * most likely case), then a context switch to the next
       * ready-to-run task is needed. In this case, it should
       * also be true that rtcb == tcb.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Add the task to the specified blocked task list */

      sched_addblocked(tcb, (tstate_t)task_state);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          struct tcb_s *nexttcb;

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* this part should not be executed in interrupt context */

          if (up_interrupt_context())
            {
              panic("%s: %d\n", __func__, __LINE__);
            }

          /* If there are any pending tasks, then add them to the ready-to-run
           * task list now. It should be the up_realease_pending() called from
           * sched_unlock() to do this for disable preemption. But it block
           * itself, so it's OK.
           */

          if (g_pendingtasks.head)
            {
              warn("Disable preemption failed for task block itself\n");
              sched_mergepending();
            }

          nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
예제 #22
0
파일: nuttx.c 프로젝트: acassis/ros2_nuttx
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > UINT8_MIN
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
      )
    {
      warn("%s: task sched error\n", __func__);
      return;
    }
  else
    {
      struct tcb_s *rtcb = current_task;
      bool switch_needed;

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed && !up_interrupt_context())
        {
          struct tcb_s *nexttcb;

          /* If there are any pending tasks, then add them to the ready-to-run
           * task list now. It should be the up_realease_pending() called from
           * sched_unlock() to do this for disable preemption. But it block
           * itself, so it's OK.
           */

          if (g_pendingtasks.head)
            {
              warn("Disable preemption failed for reprioritize task\n");
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Get the TCB of the new task to run */

          nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
          /* Make sure that the address environment for the previously
           * running task is closed down gracefully (data caches dump,
           * MMU flushed) and set up the address environment for the new
           * thread at the head of the ready-to-run list.
           */

          (void)group_addrenv(nexttcb);
#endif
          /* Update scheduler parameters */

          sched_resume_scheduler(nexttcb);

          /* context switch */

          up_switchcontext(rtcb, nexttcb);
        }
    }
}
예제 #23
0
파일: up_blocktask.c 프로젝트: dagar/NuttX
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
  struct tcb_s *rtcb = this_task();
  bool switch_needed;

  /* Verify that the context switch can be performed */

  DEBUGASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
              (tcb->task_state <= LAST_READY_TO_RUN_STATE));

  /* Remove the tcb task from the ready-to-run list.  If we
   * are blocking the task at the head of the task list (the
   * most likely case), then a context switch to the next
   * ready-to-run task is needed. In this case, it should
   * also be true that rtcb == tcb.
   */

  switch_needed = sched_removereadytorun(tcb);

  /* Add the task to the specified blocked task list */

  sched_addblocked(tcb, (tstate_t)task_state);

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      switch_needed |= sched_mergepending();
    }

  /* Now, perform the context switch if one is needed */

  if (switch_needed)
    {
      /* Update scheduler parameters */

      sched_suspend_scheduler(rtcb);

      /* Are we in an interrupt handler? */

      if (g_current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the g_current_regs into the OLD rtcb.
           */

          up_copystate(rtcb->xcp.regs, g_current_regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          g_current_regs = rtcb->xcp.regs;
        }

      /* Copy the user C context into the TCB at the (old) head of the
       * ready-to-run Task list. if up_saveusercontext returns a non-zero
       * value, then this is really the previously running task restarting!
       */

      else if (!up_saveusercontext(rtcb->xcp.regs))
        {
          /* Restore the exception context of the rtcb at the (new) head
           * of the ready-to-run task list.
           */

          rtcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(rtcb);
#endif
          /* Reset scheduler parameters */

          sched_resume_scheduler(rtcb);

          /* Then switch contexts */

          up_fullcontextrestore(rtcb->xcp.regs);
        }
    }
}
void up_unblock_task(struct tcb_s *tcb)
{
  struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;

  /* Verify that the context switch can be performed */

  ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
         (tcb->task_state <= LAST_BLOCKED_STATE));

  /* Remove the task from the blocked task list */

  sched_removeblocked(tcb);

  /* Reset its timeslice.  This is only meaningful for round
   * robin tasks but it doesn't here to do it for everything
   */

#if CONFIG_RR_INTERVAL > 0
  tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
#endif

  /* Add the task in the correct location in the prioritized
   * g_readytorun task list
   */

  if (sched_addreadytorun(tcb))
    {
      /* The currently active task has changed! We need to do
       * a context switch to the new task.
       *
       * Are we in an interrupt handler?
       */

      if (current_regs)
        {
          /* Yes, then we have to do things differently.
           * Just copy the current_regs into the OLD rtcb.
           */

          up_savestate(rtcb->xcp.regs);

          /* Restore the exception context of the rtcb at the (new) head
           * of the g_readytorun task list.
           */

          rtcb = (struct tcb_s*)g_readytorun.head;

          /* Then switch contexts.  Any necessary address environment
           * changes will be made when the interrupt returns.
           */

          up_restorestate(rtcb->xcp.regs);
        }

      /* No, then we will need to perform the user context switch */

      else
        {
          /* Restore the exception context of the new task that is ready to
           * run (probably tcb).  This is the new rtcb at the head of the
           * g_readytorun task list.
           */

          struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;

#ifdef CONFIG_ARCH_ADDRENV
         /* Make sure that the address environment for the previously
          * running task is closed down gracefully (data caches dump,
          * MMU flushed) and set up the address environment for the new
          * thread at the head of the ready-to-run list.
          */

         (void)group_addrenv(nexttcb);
#endif
          /* Then switch contexts */

          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

          /* up_switchcontext forces a context switch to the task at the
           * head of the ready-to-run list.  It does not 'return' in the
           * normal sense.  When it does return, it is because the blocked
           * task is again ready to run and has execution priority.
           */
        }
    }
}
예제 #25
0
void up_decodeirq(uint32_t* regs)
{
#ifdef CONFIG_SUPPRESS_INTERRUPTS
  lowsyslog(LOG_ERR, "Unexpected IRQ\n");
  current_regs = regs;
  PANIC();
#else
  uint32_t regval;
  int irq;

  /* Current regs non-zero indicates that we are processing an interrupt;
   * current_regs is also used to manage interrupt level context switches.
   *
   * Nested interrupts are not supported.
   */

  DEBUGASSERT(current_regs == NULL);
  current_regs = regs;

  /* Loop while there are pending interrupts to be processed */

  do
    {
      /* Decode the interrupt.  First, fetch the NIVECSR register. */

      regval = getreg32(IMX_AITC_NIVECSR);

      /* The MS 16 bits of the NIVECSR register contains vector index for the
       * highest pending normal interrupt.
       */

      irq = regval >> AITC_NIVECSR_NIVECTOR_SHIFT;

      /* If irq < 64, then this is the IRQ.  If there is no pending interrupt,
       * then irq will be >= 64 (it will be 0xffff for illegal source).
       */

      if (irq < NR_IRQS)
        {
          /* Mask and acknowledge the interrupt */

          up_maskack_irq(irq);

          /* Deliver the IRQ */

          irq_dispatch(irq, regs);

#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
          /* Check for a context switch.  If a context switch occurred, then
           * current_regs will have a different value than it did on entry.
           * If an interrupt level context switch has occurred, then restore
           * the floating point state and the establish the correct address
           * environment before returning from the interrupt.
           */

          if (regs != current_regs)
            {
#ifdef CONFIG_ARCH_FPU
              /* Restore floating point registers */

              up_restorefpu((uint32_t*)current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(NULL);
#endif
            }
#endif
          /* Unmask the last interrupt (global interrupts are still
           * disabled).
           */

          up_enable_irq(irq);
        }
    }
  while (irq < NR_IRQS);

  /* Set current_regs to NULL to indicate that we are no longer in
   * an interrupt handler.
   */

  current_regs = NULL;
#endif
}
예제 #26
0
파일: lm32_swint.c 프로젝트: a1ien/nuttx
int lm32_swint(int irq, FAR void *context)
{
  uint32_t *regs = (uint32_t *)context;

  DEBUGASSERT(g_current_regs == NULL);
  g_current_regs = regs;
 
  /* Software interrupt 0 is invoked with REG_A0 (REG_X10) = system call
   * command and REG_A1-6 = variable number of
   * arguments depending on the system call.
   */

#ifdef CONFIG_DEBUG_SYSCALL_INFO
  svcinfo("Entry: regs: %p cmd: %d\n", regs, regs[REG_A0]);
  up_registerdump(regs);
#endif

  /* Handle the SWInt according to the command in $a0 */

  switch (regs[REG_A0])
    {
      /* A0=SYS_restore_context: This a restore context command:
       *
       *   void up_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
       *
       * At this point, the following values are saved in context:
       *
       *   A0 = SYS_restore_context
       *   A1 = restoreregs
       *
       * In this case, we simply need to set g_current_regs to restore register
       * area referenced in the saved R1. context == g_current_regs is the normal
       * exception return.  By setting g_current_regs = context[R1], we force
       * the return to the saved context referenced in $a1.
       */

      case SYS_restore_context:
        {
          DEBUGASSERT(regs[REG_A1] != 0);
          g_current_regs = (uint32_t *)regs[REG_A1];
        }
        break;

      /* A0=SYS_switch_context: This a switch context command:
       *
       *   void up_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
       *
       * At this point, the following values are saved in context:
       *
       *   A0 = SYS_switch_context
       *   A1 = saveregs
       *   A2 = restoreregs
       *
       * In this case, we save the context registers to the save register
       * area reference by the saved contents of R5 and then set
       * g_current_regs to to the save register area referenced by the saved
       * contents of R6.
       */

      case SYS_switch_context:
        {
          DEBUGASSERT(regs[REG_A1] != 0 && regs[REG_A2] != 0);
          lm32_copystate((uint32_t *)regs[REG_A1], regs);
          g_current_regs = (uint32_t *)regs[REG_A2];
        }
        break;

      /* A0=SYS_syscall_return: This a switch context command:
       *
       *   void up_sycall_return(void);
       *
       * At this point, the following values are saved in context:
       *
       *   A0 = SYS_syscall_return
       *
       * We need to restore the saved return address and return in
       * unprivileged thread mode.
       */

#ifdef CONFIG_BUILD_KERNEL
      case SYS_syscall_return:
        {
          struct tcb_s *rtcb = sched_self();
          int index = (int)rtcb->xcp.nsyscalls - 1;

          /* Make sure that there is a saved syscall return address. */

          DEBUGASSERT(index >= 0);

          /* Setup to return to the saved syscall return address in
           * the original mode.
           */

          g_current_regs[REG_EPC] = rtcb->xcp.syscall[index].sysreturn;
#error "Missing logic -- need to restore the original mode"
          rtcb->xcp.nsyscalls   = index;
        }
        break;
#endif

      /* This is not an architecture-specify system call.  If NuttX is built
       * as a standalone kernel with a system call interface, then all of the
       * additional system calls must be handled as in the default case.
       */

      default:
        {
#ifdef CONFIG_BUILD_KERNEL
          FAR struct tcb_s *rtcb = sched_self();
          int index = rtcb->xcp.nsyscalls;

          /* Verify that the SYS call number is within range */

          DEBUGASSERT(g_current_regs[REG_A0] < SYS_maxsyscall);

          /* Make sure that we got here that there is a no saved syscall
           * return address.  We cannot yet handle nested system calls.
           */

          DEBUGASSERT(index < CONFIG_SYS_NNEST);

          /* Setup to return to dispatch_syscall in privileged mode. */

          rtcb->xcpsyscall[index].sysreturn = regs[REG_EPC];
#error "Missing logic -- Need to save mode"
          rtcb->xcp.nsyscalls  = index + 1;

          regs[REG_EPC] = (uint32_t)dispatch_syscall;
#error "Missing logic -- Need to set privileged mode"

          /* Offset R0 to account for the reserved values */

          g_current_regs[REG_A0] -= CONFIG_SYS_RESERVED;
#else
          svcerr("ERROR: Bad SYS call: %d\n", regs[REG_A0]);
#endif
        }
        break;
    }

  /* Report what happened.  That might difficult in the case of a context switch */

#ifdef CONFIG_DEBUG_SYSCALL_INFO
  if (regs != g_current_regs)
    {
      svcinfo("SWInt Return: Context switch!\n");
      up_registerdump((const uint32_t *)g_current_regs);
    }
  else
    {
      svcinfo("SWInt Return: %d\n", regs[REG_A0]);
    }
#endif


#if defined(CONFIG_ARCH_FPU) || defined(CONFIG_ARCH_ADDRENV)
  /* Check for a context switch.  If a context switch occurred, then
   * g_current_regs will have a different value than it did on entry.  If an
   * interrupt level context switch has occurred, then restore the floating
   * point state and the establish the correct address environment before
   * returning from the interrupt.
   */

  if (regs != g_current_regs)
    {
#ifdef CONFIG_ARCH_FPU
      /* Restore floating point registers */

      up_restorefpu((uint32_t *)g_current_regs);
#endif

#ifdef CONFIG_ARCH_ADDRENV
      /* Make sure that the address environment for the previously
       * running task is closed down gracefully (data caches dump,
       * MMU flushed) and set up the address environment for the new
       * thread at the head of the ready-to-run list.
       */

      (void)group_addrenv(NULL);
#endif
    }
#endif

  return OK;
}