Exemple #1
0
int up_cpu_pause(int cpu)
{
  int ret;

#ifdef CONFIG_SCHED_INSTRUMENTATION
  /* Notify of the pause event */

  sched_note_cpu_pause(this_task(), cpu);
#endif

  DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());

  /* Take the both spinlocks.  The g_cpu_wait spinlock will prevent the SGI2
   * handler from returning until up_cpu_resume() is called; g_cpu_paused
   * is a handshake that will prefent this function from returning until
   * the CPU is actually paused.
   */

  DEBUGASSERT(!spin_islocked(&g_cpu_wait[cpu]) &&
              !spin_islocked(&g_cpu_paused[cpu]));

  spin_lock(&g_cpu_wait[cpu]);
  spin_lock(&g_cpu_paused[cpu]);

  /* Execute SGI2 */

  ret = xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE);
  if (ret < 0)
    {
      /* What happened?  Unlock the g_cpu_wait spinlock */

      spin_unlock(&g_cpu_wait[cpu]);
    }
  else
    {
      /* Wait for the other CPU to unlock g_cpu_paused meaning that
       * it is fully paused and ready for up_cpu_resume();
       */

      spin_lock(&g_cpu_paused[cpu]);
    }

  spin_unlock(&g_cpu_paused[cpu]);

  /* On successful return g_cpu_wait will be locked, the other CPU will be
   * spinninf on g_cpu_wait and will not continue until g_cpu_resume() is
   * called.  g_cpu_paused will be unlocked in any case.
   */

 return ret;
}
Exemple #2
0
int up_cpu_resume(int cpu)
{
#ifdef CONFIG_SCHED_INSTRUMENTATION
  /* Notify of the resume event */

  sched_note_cpu_resume(this_task(), cpu);
#endif

  DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());

  /* Release the spinlock.  Releasing the spinlock will cause the SGI2
   * handler on 'cpu' to continue and return from interrupt to the newly
   * established thread.
   */

  DEBUGASSERT(spin_islocked(&g_cpu_wait[cpu]) &&
              !spin_islocked(&g_cpu_paused[cpu]));

  spin_unlock(&g_cpu_wait[cpu]);
  return OK;
}
Exemple #3
0
void leave_critical_section(irqstate_t flags)
{
  /* Do nothing if called from an interrupt handler */

  if (!up_interrupt_context())
    {
      FAR struct tcb_s *rtcb = this_task();
      DEBUGASSERT(rtcb != 0 && rtcb->irqcount > 0);

      /* Will we still have interrupts disabled after decrementing the
       * count?
       */

      if (rtcb->irqcount > 1)
        {
          /* Yes... make sure that the spinlock is set */

          DEBUGASSERT(g_cpu_irqlock == SP_LOCKED);
          rtcb->irqcount--;
        }
      else
        {
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
          /* No.. Note that we have entered the critical section */

          sched_note_csection(rtcb, false);
#endif
          /* Decrement our count on the lock.  If all CPUs have released,
           * then unlock the spinlock.
           */

          rtcb->irqcount = 0;
          spin_clrbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
                      &g_cpu_irqlock);

          /* Have all CPUs release the lock? */

          if (!spin_islocked(&g_cpu_irqlock))
            {
              /* Check if there are pending tasks and that pre-emption is
               * also enabled.
               */

              if (g_pendingtasks.head != NULL && !spin_islocked(&g_cpu_schedlock))
                {
                  /* Release any ready-to-run tasks that have collected in
                   * g_pendingtasks if the scheduler is not locked.
                   *
                   * NOTE: This operation has a very high likelihood of causing
                   * this task to be switched out!
                   */

                  up_release_pending();
                }
            }
        }

      /* Restore the previous interrupt state which may still be interrupts
       * disabled (but we don't have a mechanism to verify that now)
       */

      up_irq_restore(flags);
    }
}
Exemple #4
0
bool up_cpu_pausereq(int cpu)
{
  return spin_islocked(&g_cpu_paused[cpu]);
}