Пример #1
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  FAR dq_queue_t *tasklist;
  irqstate_t flags;
  int errcode;
#ifdef CONFIG_SMP
  int cpu;
#endif
  int ret;

  /* Check if the task to restart is the calling task */

  rtcb = this_task();
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      errcode = ENOSYS;
      goto errout;
    }

  /* We are restarting some other task than ourselves.  Make sure that the
   * task does not change its state while we are executing.  In the single
   * CPU state this could be done by disabling pre-emption.  But we will
   * a little stronger medicine on the SMP case:  The task make be running
   * on another CPU.
   */

  flags = enter_critical_section();

  /* Find for the TCB associated with matching pid  */

  tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
  if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
  if (!tcb)
#endif
    {
      /* There is no TCB with this pid or, if there is, it is not a task. */

      errcode = ESRCH;
      goto errout_with_lock;
    }

#ifdef CONFIG_SMP
  /* If the task is running on another CPU, then pause that CPU.  We can
   * then manipulate the TCB of the restarted task and when we resume the
   * that CPU, the restart take effect.
   */

  cpu = sched_cpu_pause(&tcb->cmn);
#endif /* CONFIG_SMP */

  /* Try to recover from any bad states */

  task_recover((FAR struct tcb_s *)tcb);

  /* Kill any children of this thread */

#ifdef HAVE_GROUP_MEMBERS
  (void)group_killchildren(tcb);
#endif

  /* Remove the TCB from whatever list it is in.  After this point, the TCB
   * should no longer be accessible to the system
   */

#ifdef CONFIG_SMP
  tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu);
#else
  tasklist = TLIST_HEAD(tcb->cmn.task_state);
#endif

  dq_rem((FAR dq_entry_t *)tcb, tasklist);
  tcb->cmn.task_state = TSTATE_TASK_INVALID;

  /* Deallocate anything left in the TCB's queues */

  sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

  /* Reset the current task priority  */

  tcb->cmn.sched_priority = tcb->cmn.init_priority;

  /* The task should restart with pre-emption disabled and not in a critical
   * secton.
   */

  tcb->cmn.lockcount = 0;
#ifdef CONFIG_SMP
  tcb->cmn.irqcount  = 0;
#endif

  /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
  tcb->cmn.base_priority = tcb->cmn.init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
  tcb->cmn.npend_reprio = 0;
#  endif
#endif

  /* Re-initialize the processor-specific portion of the TCB.  This will
   * reset the entry point and the start-up parameters
   */

  up_initial_state((FAR struct tcb_s *)tcb);

  /* Add the task to the inactive task list */

  dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks);
  tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

#ifdef CONFIG_SMP
  /* Resume the paused CPU (if any) */

  if (cpu >= 0)
    {
      ret = up_cpu_resume(cpu);
      if (ret < 0)
        {
          errcode = -ret;
          goto errout_with_lock;
        }
    }
#endif /* CONFIG_SMP */

  leave_critical_section(flags);

  /* Activate the task. */

  ret = task_activate((FAR struct tcb_s *)tcb);
  if (ret != OK)
    {
      (void)task_terminate(pid, true);
      errcode = -ret;
      goto errout_with_lock;
    }

  return OK;

errout_with_lock:
  leave_critical_section(flags);
errout:
  set_errno(errcode);
  return ERROR;
}
Пример #2
0
bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
  FAR dq_queue_t *tasklist;
  bool doswitch = false;
  int cpu;

  /* Which CPU (if any) is the task running on?  Which task list holds the
   * TCB?
   */

  cpu      = rtcb->cpu;
  tasklist = TLIST_HEAD(rtcb->task_state, cpu);

  /* Check if the TCB to be removed is at the head of a ready-to-run list.
   * For the case of SMP, there are two lists involved:  (1) the
   * g_readytorun list that holds non-running tasks that have not been
   * assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
   * tasks assigned a CPU, including the task that is currently running on
   * that CPU.  Only this latter list contains the currently active task
   * only only removing the head of that list can result in a context
   * switch.
   *
   * rtcb->blink == NULL will tell us if the TCB is at the head of the
   * ready-to-run list and, hence, a candidate for the new running task.
   *
   * If so, then the tasklist RUNNABLE attribute will inform us if the list
   * holds the currently executing task and, hence, if a context switch
   * should occur.
   */

  if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state))
    {
      FAR struct tcb_s *nxttcb;
      FAR struct tcb_s *rtrtcb;
      int me;

      /* There must always be at least one task in the list (the IDLE task)
       * after the TCB being removed.
       */

      nxttcb = (FAR struct tcb_s *)rtcb->flink;
      DEBUGASSERT(nxttcb != NULL);

      /* If we are modifying the head of some assigned task list other than
       * our own, we will need to stop that CPU.
       */

      me = this_cpu();
      if (cpu != me)
        {
          DEBUGVERIFY(up_cpu_pause(cpu));
        }

      /* The task is running but the CPU that it was running on has been
       * paused.  We can now safely remove its TCB from the ready-to-run
       * task list.  In the SMP case this may be either the g_readytorun()
       * or the g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);

      /* Which task will go at the head of the list?  It will be either the
       * next tcb in the assigned task list (nxttcb) or a TCB in the
       * g_readytorun list.  We can only select a task from that list if
       * the affinity mask includes the current CPU.
       *
       * REVISIT: What should we do, if anything, if pre-emption is locked
       * by the another CPU?  Should just used nxttcb?  Should we select
       * from the pending task list instead of the g_readytorun list?
       */

      for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
           rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
           rtrtcb = (FAR struct tcb_s *)rtrtcb->flink);

      /* Did we find a task in the g_readytorun list?  Which task should
       * we use?  We decide strictly by the priority of the two tasks:
       * Either (1) the task currently at the head of the g_assignedtasks[cpu]
       * list (nexttcb) or (2) the highest priority task from the
       * g_readytorun list with matching affinity (rtrtcb).
       */

      if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
        {
          FAR struct tcb_s *tmptcb;

          /* The TCB at the head of the ready to run list has the higher
           * priority.  Remove that task from the head of the g_readytorun
           * list and add to the head of the g_assignedtasks[cpu] list.
           */

          tmptcb = (FAR struct tcb_s *)
            dq_remfirst((FAR dq_queue_t *)&g_readytorun);

          DEBUGASSERT(tmptcb == rtrtcb);

          dq_addfirst((FAR dq_entry_t *)tmptcb, tasklist);

          tmptcb->cpu = cpu;
          nxttcb = tmptcb;
        }

      /* Will pre-emption be disabled after the switch?  If the lockcount is
       * greater than zero, then this task/this CPU holds the scheduler lock.
       */

      if (nxttcb->lockcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }
      else
        {
          /* No.. we may need to perform release our hold on the lock. */

          spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }

      /* Interrupts may be disabled after the switch.  If irqcount is greater
       * than zero, then this task/this CPU holds the IRQ lock
       */

      if (nxttcb->irqcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }
      else
        {
          /* No.. we may need to release our hold on the irq state. */

          spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }

      nxttcb->task_state = TSTATE_TASK_RUNNING;

      /* All done, restart the other CPU (if it was paused). */

      doswitch = true;
      if (cpu != me)
        {
          /* In this we will not want to report a context switch to this
           * CPU.  Only the other CPU is affected.
           */

          DEBUGVERIFY(up_cpu_resume(cpu));
          doswitch = false;
        }
    }
  else
    {
      /* The task is not running.  Just remove its TCB from the ready-to-run
       * list.  In the SMP case this may be either the g_readytorun() or the
       * g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);
    }

  /* Since the TCB is no longer in any list, it is now invalid */

  rtcb->task_state = TSTATE_TASK_INVALID;
  return doswitch;
}
Пример #3
0
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
  irqstate_t flags;
  int cpu;
  int me;

  sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  /* Make sure that interrupts are disabled */

  flags = enter_critical_section();

  /* Refuse to handle nested signal actions */

  if (!tcb->xcp.sigdeliver)
    {
      /* First, handle some special cases when the signal is being delivered
       * to task that is currently executing on any CPU.
       */

      sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS);

      if (tcb->task_state == TSTATE_TASK_RUNNING)
        {
          me  = this_cpu();
          cpu = tcb->cpu;

          /* CASE 1:  We are not in an interrupt handler and a task is
           * signaling itself for some reason.
           */

          if (cpu == me && !CURRENT_REGS)
            {
              /* In this case just deliver the signal now.
               * REVISIT:  Signal handler will run in a critical section!
               */

              sigdeliver(tcb);
            }

          /* CASE 2:  The task that needs to receive the signal is running.
           * This could happen if the task is running on another CPU OR if
           * we are in an interrupt handler and the task is running on this
           * CPU.  In the former case, we will have to PAUSE the other CPU
           * first.  But in either case, we will have to modify the return
           * state as well as the state in the TCB.
           */

          else
            {
              /* If we signaling a task running on the other CPU, we have
               * to PAUSE the other CPU.
               */

              if (cpu != me)
                {
                  /* Pause the CPU */

                  up_cpu_pause(cpu);

                  /* Wait while the pause request is pending */

                  while (up_cpu_pausereq(cpu))
                    {
                    }

                  /* Now tcb on the other CPU can be accessed safely */

                  /* Copy tcb->xcp.regs to tcp.xcp.saved. These will be restored
                   * by the signal trampoline after the signal has been delivered.
                   */

                  tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
                  tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  tcb->xcp.saved_basepri    = tcb->xcp.regs[REG_BASEPRI];
#else
                  tcb->xcp.saved_primask    = tcb->xcp.regs[REG_PRIMASK];
#endif
                  tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
                  tcb->xcp.saved_lr         = tcb->xcp.regs[REG_LR];
#endif

                  /* Then set up vector to the trampoline with interrupts
                   * disabled.  We must already be in privileged thread mode
                   * to be here.
                   */

                  tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
                  tcb->xcp.regs[REG_PRIMASK] = 1;
#endif
                  tcb->xcp.regs[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
                  tcb->xcp.regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
                }
              else
                {
                  /* tcb is running on the same CPU */

                  /* Save the return PC, CPSR and either the BASEPRI or PRIMASK
                   * registers (and perhaps also the LR).  These will be
                   * restored by the signal trampoline after the signal has been
                   * delivered.
                   */

                  tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
                  tcb->xcp.saved_pc         = CURRENT_REGS[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  tcb->xcp.saved_basepri    = CURRENT_REGS[REG_BASEPRI];
#else
                  tcb->xcp.saved_primask    = CURRENT_REGS[REG_PRIMASK];
#endif
                  tcb->xcp.saved_xpsr       = CURRENT_REGS[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
                  tcb->xcp.saved_lr         = CURRENT_REGS[REG_LR];
#endif

                  /* Then set up vector to the trampoline with interrupts
                   * disabled.  The kernel-space trampoline must run in
                   * privileged thread mode.
                   */

                  CURRENT_REGS[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
                  CURRENT_REGS[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
                  CURRENT_REGS[REG_PRIMASK] = 1;
#endif
                  CURRENT_REGS[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
                  CURRENT_REGS[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif

                  /* And make sure that the saved context in the TCB is the same
                   * as the interrupt return context.
                   */

                  up_savestate(tcb->xcp.regs);
                }

              /* Increment the IRQ lock count so that when the task is restarted,
               * it will hold the IRQ spinlock.
               */

              DEBUGASSERT(tcb->irqcount < INT16_MAX);
              tcb->irqcount++;

              /* In an SMP configuration, the interrupt disable logic also
               * involves spinlocks that are configured per the TCB irqcount
               * field.  This is logically equivalent to enter_critical_section().
               * The matching call to leave_critical_section() will be
               * performed in up_sigdeliver().
               */

              spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                          &g_cpu_irqlock);


              /* RESUME the other CPU if it was PAUSED */

              if (cpu != me)
                {
                  up_cpu_resume(cpu);
                }
            }
        }

      /* Otherwise, we are (1) signaling a task is not running from an
       * interrupt handler or (2) we are not in an interrupt handler and the
       * running task is signaling some other non-running task.
       */

      else
        {
          /* Save the return PC, CPSR and either the BASEPRI or PRIMASK
           * registers (and perhaps also the LR).  These will be restored
           * by the signal trampoline after the signal has been delivered.
           */

          tcb->xcp.sigdeliver       = (FAR void *)sigdeliver;
          tcb->xcp.saved_pc         = tcb->xcp.regs[REG_PC];
#ifdef CONFIG_ARMV7M_USEBASEPRI
          tcb->xcp.saved_basepri    = tcb->xcp.regs[REG_BASEPRI];
#else
          tcb->xcp.saved_primask    = tcb->xcp.regs[REG_PRIMASK];
#endif
          tcb->xcp.saved_xpsr       = tcb->xcp.regs[REG_XPSR];
#ifdef CONFIG_BUILD_PROTECTED
          tcb->xcp.saved_lr         = tcb->xcp.regs[REG_LR];
#endif
          /* Increment the IRQ lock count so that when the task is restarted,
           * it will hold the IRQ spinlock.
           */

          DEBUGASSERT(tcb->irqcount < INT16_MAX);
          tcb->irqcount++;

          /* Then set up to vector to the trampoline with interrupts
           * disabled.  We must already be in privileged thread mode to be
           * here.
           */

          tcb->xcp.regs[REG_PC]      = (uint32_t)up_sigdeliver;
#ifdef CONFIG_ARMV7M_USEBASEPRI
          tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
#else
          tcb->xcp.regs[REG_PRIMASK] = 1;
#endif
          tcb->xcp.regs[REG_XPSR]    = ARMV7M_XPSR_T;
#ifdef CONFIG_BUILD_PROTECTED
          tcb->xcp.regs[REG_LR]      = EXC_RETURN_PRIVTHR;
#endif
        }
    }

  leave_critical_section(flags);
}
Пример #4
0
int task_terminate(pid_t pid, bool nonblocking)
{
  FAR struct tcb_s *dtcb;
  FAR dq_queue_t *tasklist;
  irqstate_t flags;
#ifdef CONFIG_SMP
  int cpu;
#endif
  int ret;

  /* Make sure the task does not become ready-to-run while we are futzing
   * with its TCB.  Within the critical section, no new task may be started
   * or terminated (even in the SMP case).
   */

  flags = enter_critical_section();

  /* Find for the TCB associated with matching PID */

  dtcb = sched_gettcb(pid);
  if (!dtcb)
    {
      /* This PID does not correspond to any known task */

      ret = -ESRCH;
      goto errout_with_lock;
    }

  /* Verify our internal sanity */

#ifdef CONFIG_SMP
  DEBUGASSERT(dtcb->task_state < NUM_TASK_STATES);
#else
  DEBUGASSERT(dtcb->task_state != TSTATE_TASK_RUNNING &&
              dtcb->task_state < NUM_TASK_STATES);
#endif

  /* Remove the task from the OS's task lists.  We must be in a critical
   * section and the must must not be running to do this.
   */

#ifdef CONFIG_SMP
  /* In the SMP case, the thread may be running on another CPU.  If that is
   * the case, then we will pause the CPU that the thread is running on.
   */

  cpu = sched_cpu_pause(dtcb);

  /* Get the task list associated with the the thread's state and CPU */

  tasklist = TLIST_HEAD(dtcb->task_state, cpu);
#else
  /* In the non-SMP case, we can be assured that the task to be terminated
   * is not running.  get the task list associated with the task state.
   */

  tasklist = TLIST_HEAD(dtcb->task_state);
#endif

  /* Remove the task from the task list */

  dq_rem((FAR dq_entry_t *)dtcb, tasklist);
  dtcb->task_state = TSTATE_TASK_INVALID;

  /* At this point, the TCB should no longer be accessible to the system */

#ifdef CONFIG_SMP
  /* Resume the paused CPU (if any) */

  if (cpu >= 0)
    {
      /* I am not yet sure how to handle a failure here. */

      DEBUGVERIFY(up_cpu_resume(cpu));
    }
#endif /* CONFIG_SMP */

  leave_critical_section(flags);

  /* Perform common task termination logic (flushing streams, calling
   * functions registered by at_exit/on_exit, etc.).  We need to do
   * this as early as possible so that higher level clean-up logic
   * can run in a healthy tasking environment.
   *
   * In the case where the task exits via exit(), task_exithook()
   * may be called twice.
   *
   * I suppose EXIT_SUCCESS is an appropriate return value???
   */

  task_exithook(dtcb, EXIT_SUCCESS, nonblocking);

  /* Since all tasks pass through this function as the final step in their
   * exit sequence, this is an appropriate place to inform any instrumentation
   * layer that the task no longer exists.
   */

  sched_note_stop(dtcb);

  /* Deallocate its TCB */

  return sched_releasetcb(dtcb, dtcb->flags & TCB_FLAG_TTYPE_MASK);

errout_with_lock:
  leave_critical_section(flags);
  return ret;
}