Esempio n. 1
0
int vnc_start_updater(FAR struct vnc_session_s *session)
{
  pthread_attr_t attr;
  struct sched_param param;
  int status;

  gvdbg("Starting updater for Display %d\n", session->display);

  /* Create thread that is gonna send rectangles to the client */

  session->state = VNCSERVER_RUNNING;

  DEBUGVERIFY(pthread_attr_init(&attr));
  DEBUGVERIFY(pthread_attr_setstacksize(&attr, CONFIG_VNCSERVER_UPDATER_STACKSIZE));

  param.sched_priority = CONFIG_VNCSERVER_UPDATER_PRIO;
  DEBUGVERIFY(pthread_attr_setschedparam(&attr, &param));

  status = pthread_create(&session->updater, &attr, vnc_updater,
                          (pthread_addr_t)session);
  if (status != 0)
    {
      return -status;
    }

  return OK;
}
Esempio n. 2
0
void arm_gic0_initialize(void)
{
  unsigned int nlines = arm_gic_nlines();
  unsigned int irq;

  arm_gic_dump("Entry arm_gic0_initialize", true, 0);

  /* Initialize SPIs.  The following should be done only by CPU0. */

  /* A processor in Secure State sets:
   *
   * 1. Which interrupts are non-secure (ICDISR).  All set to zero (group
   *    0).
   * 2. Trigger mode of the SPI (ICDICFR). All fields set to 0b01->Level
   *    sensitive, 1-N model.
   * 3. Interrupt Clear-Enable (ICDICER)
   * 3. Priority of the SPI using the priority set register (ICDIPR).
   *    Priority values are 8-bit unsigned binary. A GIC supports a
   *    minimum of 16 and a maximum of 256 priority levels. Here all
   *    are set to the middle priority 128 (0x80).
   * 4. Target that receives the SPI interrupt (ICDIPTR).  Set all to
   *    CPU0.
   */

  /* Registers with 1-bit per interrupt */

  for (irq = GIC_IRQ_SPI; irq < nlines; irq += 32)
    {
      putreg32(0x00000000, GIC_ICDISR(irq));   /* SPIs group 0 */
      putreg32(0xffffffff, GIC_ICDICER(irq));  /* SPIs disabled */
    }

  /* Registers with 2-bits per interrupt */

  for (irq = GIC_IRQ_SPI; irq < nlines; irq += 16)
    {
    //putreg32(0xffffffff, GIC_ICDICFR(irq));  /* SPIs edge sensitive */
      putreg32(0x55555555, GIC_ICDICFR(irq));  /* SPIs level sensitive */
    }

  /* Registers with 8-bits per interrupt */

  for (irq = GIC_IRQ_SPI; irq < nlines; irq += 4)
    {
      putreg32(0x80808080, GIC_ICDIPR(irq));   /* SPI priority */
      putreg32(0x01010101, GIC_ICDIPTR(irq));  /* SPI on CPU0 */
    }

#ifdef CONFIG_SMP
  /* Attach SGI interrupt handlers */

  DEBUGVERIFY(irq_attach(GIC_IRQ_SGI1, arm_start_handler));
  DEBUGVERIFY(irq_attach(GIC_IRQ_SGI2, arm_pause_handler));
#endif

  arm_gic_dump("Exit arm_gic0_initialize", true, 0);
}
Esempio n. 3
0
static int nsh_waiter(int argc, char *argv[])
{
  bool connected = false;

  message("nsh_waiter: Running\n");
  for (;;)
    {
      /* Wait for the device to change state */

      DEBUGVERIFY(CONN_WAIT(g_usbconn, &connected));

      connected = !connected;
      message("nsh_waiter: %s\n", connected ? "connected" : "disconnected");

      /* Did we just become connected? */

      if (connected)
        {
          /* Yes.. enumerate the newly connected device */

          (void)CONN_ENUMERATE(g_usbconn, 0);
        }
    }

  /* Keep the compiler from complaining */

  return 0;
}
Esempio n. 4
0
static int ehci_waiter(int argc, char *argv[])
{
  FAR struct usbhost_hubport_s *hport;

  uinfo("ehci_waiter:  Running\n");
  for (;;)
    {
      /* Wait for the device to change state */

      DEBUGVERIFY(CONN_WAIT(g_ehciconn, &hport));
      syslog(LOG_INFO, "ehci_waiter: %s\n",
             hport->connected ? "connected" : "disconnected");

      /* Did we just become connected? */

      if (hport->connected)
        {
          /* Yes.. enumerate the newly connected device */

          (void)CONN_ENUMERATE(g_ehciconn, hport);
        }
    }

  /* Keep the compiler from complaining */

  return 0;
}
void sched_suspend_scheduler(FAR struct tcb_s *tcb)
{
  if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
    {
      DEBUGVERIFY(sched_sporadic_suspend(tcb));
    }
}
Esempio n. 6
0
static int usbhost_waiter(struct usbhost_connection_s *dev)
#endif
{
  struct usbhost_hubport_s *hport;

  uinfo("Running\n");
  for (;;)
    {
      /* Wait for the device to change state */

      DEBUGVERIFY(CONN_WAIT(dev, &hport));
      uinfo("%s\n", hport->connected ? "connected" : "disconnected");

      /* Did we just become connected? */

      if (hport->connected)
        {
          /* Yes.. enumerate the newly connected device */

          (void)CONN_ENUMERATE(dev, hport);
        }
    }

  /* Keep the compiler from complaining */

  return 0;
}
Esempio n. 7
0
FAR struct tcp_wrbuffer_s *tcp_wrbuffer_alloc(void)
{
  FAR struct tcp_wrbuffer_s *wrb;

  /* We need to allocate two things:  (1) A write buffer structure and (2)
   * at least one I/O buffer to start the chain.
   *
   * Allocate the write buffer structure first then the IOBG.  In order to
   * avoid deadlocks, we will need to free the IOB first, then the write
   * buffer
   */

  DEBUGVERIFY(net_lockedwait(&g_wrbuffer.sem));

  /* Now, we are guaranteed to have a write buffer structure reserved
   * for us in the free list.
   */

  wrb = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&g_wrbuffer.freebuffers);
  DEBUGASSERT(wrb);
  memset(wrb, 0, sizeof(struct tcp_wrbuffer_s));

  /* Now get the first I/O buffer for the write buffer structure */

  wrb->wb_iob = iob_alloc(false);
  if (!wrb->wb_iob)
    {
      ndbg("ERROR: Failed to allocate I/O buffer\n");
      tcp_wrbuffer_release(wrb);
      return NULL;
    }

  return wrb;
}
Esempio n. 8
0
void efm32_gpioirqinitialize(void)
{
  /* Initialize GPIO interrupt registers, disabling GPIO interrupts at the source */

  putreg32(0, EFM32_GPIO_EXTIRISE);
  putreg32(0, EFM32_GPIO_EXTIFALL);
  putreg32(0, EFM32_GPIO_IEN);

  /* Attach the even and odd interrupt handlers */

  DEBUGVERIFY(irq_attach(EFM32_IRQ_GPIO_EVEN, efm32_even_interrupt));
  DEBUGVERIFY(irq_attach(EFM32_IRQ_GPIO_ODD, efm32_odd_interrupt));

  /* Enable GPIO even and odd interrupts at the NVIC */

  up_enable_irq(EFM32_IRQ_GPIO_ODD);
  up_enable_irq(EFM32_IRQ_GPIO_EVEN);
}
Esempio n. 9
0
int sem_reset(FAR sem_t *sem, int16_t count)
{
  irqstate_t flags;

  DEBUGASSERT(sem != NULL && count >= 0);

  /* Don't allow any context switches that may result from the following
   * sem_post() operations.
   */

  sched_lock();

  /* Prevent any access to the semaphore by interrupt handlers while we are
   * performing this operation.
   */

  flags = enter_critical_section();

  /* A negative count indicates that the negated number of threads are
   * waiting to take a count from the semaphore.  Loop here, handing
   * out counts to any waiting threads.
   */

  while (sem->semcount < 0 && count > 0)
    {
      /* Give out one counting, waking up one of the waiting threads
       * and, perhaps, kicking off a lot of priority inheritance
       * logic (REVISIT).
       */

      DEBUGVERIFY(sem_post(sem));
      count--;
    }

  /* We exit the above loop with either (1) no threads waiting for the
   * (i.e., with sem->semcount >= 0).  In this case, 'count' holds the
   * the new value of the semaphore count.  OR (2) with threads still
   * waiting but all of the semaphore counts exhausted:  The current
   * value of sem->semcount is already correct in this case.
   */

  if (sem->semcount >= 0)
    {
      sem->semcount = count;
    }

  /* Allow any pending context switches to occur now */

  leave_critical_section(flags);
  sched_unlock();
  return OK;
}
Esempio n. 10
0
int arp_wait(FAR struct arp_notify_s *notify, FAR struct timespec *timeout)
{
  struct timespec abstime;
  irqstate_t flags;
  int errcode;
  int ret;

  /* And wait for the ARP response (or a timeout).  Interrupts will be re-
   * enabled while we wait.
   */

  flags = irqsave();
  DEBUGVERIFY(clock_gettime(CLOCK_REALTIME, &abstime));

  abstime.tv_sec  += timeout->tv_sec;
  abstime.tv_nsec += timeout->tv_nsec;
  if (abstime.tv_nsec >= 1000000000)
    {
      abstime.tv_sec++;
      abstime.tv_nsec -= 1000000000;
    }

  /* Wait to get either the correct response or a timeout. */

  do
    {
      /* The only errors that we expect would be if the abstime timeout
       * expires or if the wait were interrupted by a signal.
       */

      ret     = net_timedwait(&notify->nt_sem, &abstime);
      errcode = ((ret < 0) ? get_errno() : 0);
    }
  while (ret < 0 && errcode == EINTR);

  /* Then get the real result of the transfer */

  ret = notify->nt_result;

  /* Remove our wait structure from the list (we may no longer be at the
   * head of the list).
   */

  (void)arp_wait_cancel(notify);

  /* Re-enable interrupts and return the result of the wait */

  irqrestore(flags);
  return ret;
}
Esempio n. 11
0
int icmpv6_rwait(FAR struct icmpv6_rnotify_s *notify,
                 FAR struct timespec *timeout)
{
  struct timespec abstime;
  irqstate_t flags;
  int ret;

  ninfo("Waiting...\n");

  /* And wait for the Neighbor Advertisement (or a timeout).  Interrupts will
   * be re-enabled while we wait.
   */

  flags = enter_critical_section();
  DEBUGVERIFY(clock_gettime(CLOCK_REALTIME, &abstime));

  abstime.tv_sec  += timeout->tv_sec;
  abstime.tv_nsec += timeout->tv_nsec;
  if (abstime.tv_nsec >= 1000000000)
    {
      abstime.tv_sec++;
      abstime.tv_nsec -= 1000000000;
    }

  /* REVISIT:  If net_timedwait() is awakened with  signal, we will return
   * the wrong error code.
   */

  (void)net_timedwait(&notify->rn_sem, &abstime);
  ret = notify->rn_result;

  /* Remove our wait structure from the list (we may no longer be at the
   * head of the list).
   */

  (void)icmpv6_rwait_cancel(notify);

  /* Re-enable interrupts and return the result of the wait */

  leave_critical_section(flags);
  return ret;
}
Esempio n. 12
0
File: mm_sem.c Progetto: dagar/NuttX
void mm_givesemaphore(FAR struct mm_heap_s *heap)
{
#ifdef CONFIG_SMP
  irqstate_t flags = enter_critical_section();
#endif
#if defined(CONFIG_DEBUG_ASSERTIONS) || \
   (defined(MONITOR_MM_SEMAPHORE) && defined(CONFIG_DEBUG_INFO))
  pid_t my_pid = getpid();
#endif

  /* I better be holding at least one reference to the semaphore */

  DEBUGASSERT(heap->mm_holder == my_pid);

  /* Do I hold multiple references to the semphore */

  if (heap->mm_counts_held > 1)
    {
      /* Yes, just release one count and return */

      heap->mm_counts_held--;
      mseminfo("Holder=%d count=%d\n", heap->mm_holder,
               heap->mm_counts_held);
    }
  else
    {
      /* Nope, this is the last reference I have */

      mseminfo("PID=%d giving\n", my_pid);

      heap->mm_holder      = -1;
      heap->mm_counts_held = 0;
      DEBUGVERIFY(_SEM_POST(&heap->mm_semaphore));
    }

#ifdef CONFIG_SMP
  leave_critical_section(flags);
#endif
}
Esempio n. 13
0
int arp_wait(FAR struct arp_notify_s *notify, FAR struct timespec *timeout)
{
  struct timespec abstime;
  irqstate_t flags;
  int ret;

  /* And wait for the ARP response (or a timeout).  Interrupts will be re-
   * enabled while we wait.
   */

  flags = irqsave();
  DEBUGVERIFY(clock_gettime(CLOCK_REALTIME, &abstime));

  abstime.tv_sec  += timeout->tv_sec;
  abstime.tv_nsec += timeout->tv_nsec;
  if (abstime.tv_nsec > 1000000000)
    {
      abstime.tv_sec++;
      abstime.tv_nsec -= 1000000000;
    }

   /* REVISIT:  If sem_timedwait() is awakened with  signal, we will return
    * the wrong error code.
    */

  (void)sem_timedwait(&notify->nt_sem, &abstime);
  ret = notify->nt_result;

  /* Remove our wait structure from the list (we may no longer be at the
   * head of the list).
   */

  (void)arp_wait_cancel(notify);

  /* Re-enable interrupts and return the result of the wait */

  irqrestore(flags);
  return ret;
}
Esempio n. 14
0
int mq_dosend(mqd_t mqdes, FAR struct mqueue_msg_s *mqmsg, FAR const char *msg,
              size_t msglen, int prio)
{
  FAR struct tcb_s *btcb;
  FAR struct mqueue_inode_s *msgq;
  FAR struct mqueue_msg_s *next;
  FAR struct mqueue_msg_s *prev;
  irqstate_t saved_state;

  /* Get a pointer to the message queue */

  sched_lock();
  msgq = mqdes->msgq;

  /* Construct the message header info */

  mqmsg->priority = prio;
  mqmsg->msglen   = msglen;

  /* Copy the message data into the message */

  memcpy((FAR void *)mqmsg->mail, (FAR const void *)msg, msglen);

  /* Insert the new message in the message queue */

  saved_state = irqsave();

  /* Search the message list to find the location to insert the new
   * message. Each is list is maintained in ascending priority order.
   */

  for (prev = NULL, next = (FAR struct mqueue_msg_s *)msgq->msglist.head;
       next && prio <= next->priority;
       prev = next, next = next->next);

  /* Add the message at the right place */

  if (prev)
    {
      sq_addafter((FAR sq_entry_t *)prev, (FAR sq_entry_t *)mqmsg,
                  &msgq->msglist);
    }
  else
    {
      sq_addfirst((FAR sq_entry_t *)mqmsg, &msgq->msglist);
    }

  /* Increment the count of messages in the queue */

  msgq->nmsgs++;
  irqrestore(saved_state);

  /* Check if we need to notify any tasks that are attached to the
   * message queue
   */

#ifndef CONFIG_DISABLE_SIGNALS
  if (msgq->ntmqdes)
    {
      struct sigevent event;
      pid_t pid;

      /* Remove the message notification data from the message queue. */

      memcpy(&event, &msgq->ntevent, sizeof(struct sigevent));
      pid = msgq->ntpid;

      /* Detach the notification */

      memset(&msgq->ntevent, 0, sizeof(struct sigevent));
      msgq->ntpid   = INVALID_PROCESS_ID;
      msgq->ntmqdes = NULL;

      /* Notification the client via signal? */

      if (event.sigev_notify == SIGEV_SIGNAL)
        {
          /* Yes... Queue the signal -- What if this returns an error? */

#ifdef CONFIG_CAN_PASS_STRUCTS
          DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo,
                      event.sigev_value));
#else
          DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo,
                      event.sigev_value.sival_ptr));
#endif
        }

#ifdef CONFIG_SIG_EVTHREAD
      /* Notify the client via a function call */

      else if (event.sigev_notify == SIGEV_THREAD)
        {
          DEBUGVERIFY(sig_notification(pid, &event));
        }
#endif

    }
#endif

  /* Check if any tasks are waiting for the MQ not empty event. */

  saved_state = irqsave();
  if (msgq->nwaitnotempty > 0)
    {
      /* Find the highest priority task that is waiting for
       * this queue to be non-empty in g_waitingformqnotempty
       * list. sched_lock() should give us sufficent protection since
       * interrupts should never cause a change in this list
       */

      for (btcb = (FAR struct tcb_s *)g_waitingformqnotempty.head;
           btcb && btcb->msgwaitq != msgq;
           btcb = btcb->flink);

      /* If one was found, unblock it */

      ASSERT(btcb);

      btcb->msgwaitq = NULL;
      msgq->nwaitnotempty--;
      up_unblock_task(btcb);
    }

  irqrestore(saved_state);
  sched_unlock();
  return OK;
}
Esempio n. 15
0
void os_start(void)
{
  int i;

  slldbg("Entry\n");

  /* Initialize RTOS Data ***************************************************/
  /* Initialize all task lists */

  dq_init(&g_readytorun);
  dq_init(&g_pendingtasks);
  dq_init(&g_waitingforsemaphore);
#ifndef CONFIG_DISABLE_SIGNALS
  dq_init(&g_waitingforsignal);
#endif
#ifndef CONFIG_DISABLE_MQUEUE
  dq_init(&g_waitingformqnotfull);
  dq_init(&g_waitingformqnotempty);
#endif
#ifdef CONFIG_PAGING
  dq_init(&g_waitingforfill);
#endif
  dq_init(&g_inactivetasks);
  sq_init(&g_delayed_kufree);
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
     defined(CONFIG_MM_KERNEL_HEAP)
  sq_init(&g_delayed_kfree);
#endif

  /* Initialize the logic that determine unique process IDs. */

  g_lastpid = 0;
  for (i = 0; i < CONFIG_MAX_TASKS; i++)
    {
      g_pidhash[i].tcb = NULL;
      g_pidhash[i].pid = INVALID_PROCESS_ID;
    }

  /* Assign the process ID of ZERO to the idle task */

  g_pidhash[PIDHASH(0)].tcb = &g_idletcb.cmn;
  g_pidhash[PIDHASH(0)].pid = 0;

  /* Initialize the IDLE task TCB *******************************************/
  /* Initialize a TCB for this thread of execution.  NOTE:  The default
   * value for most components of the g_idletcb are zero.  The entire
   * structure is set to zero.  Then only the (potentially) non-zero
   * elements are initialized. NOTE:  The idle task is the only task in
   * that has pid == 0 and sched_priority == 0.
   */

  bzero((void*)&g_idletcb, sizeof(struct task_tcb_s));
  g_idletcb.cmn.task_state = TSTATE_TASK_RUNNING;
  g_idletcb.cmn.entry.main = (main_t)os_start;
  g_idletcb.cmn.flags      = TCB_FLAG_TTYPE_KERNEL;

  /* Set the IDLE task name */

#if CONFIG_TASK_NAME_SIZE > 0
  strncpy(g_idletcb.cmn.name, g_idlename, CONFIG_TASK_NAME_SIZE);
  g_idletcb.cmn.name[CONFIG_TASK_NAME_SIZE] = '\0';
#endif /* CONFIG_TASK_NAME_SIZE */

  /* Configure the task name in the argument list.  The IDLE task does
   * not really have an argument list, but this name is still useful
   * for things like the NSH PS command.
   *
   * In the kernel mode build, the arguments are saved on the task's stack
   * and there is no support that yet.
   */

#if CONFIG_TASK_NAME_SIZE > 0
  g_idleargv[0]  = g_idletcb.cmn.name;
#else
  g_idleargv[0]  = (FAR char *)g_idlename;
#endif /* CONFIG_TASK_NAME_SIZE */
  g_idleargv[1]  = NULL;
  g_idletcb.argv = g_idleargv;

  /* Then add the idle task's TCB to the head of the ready to run list */

  dq_addfirst((FAR dq_entry_t*)&g_idletcb, (FAR dq_queue_t*)&g_readytorun);

  /* Initialize the processor-specific portion of the TCB */

  up_initial_state(&g_idletcb.cmn);

  /* Initialize RTOS facilities *********************************************/
  /* Initialize the semaphore facility.  This has to be done very early
   * because many subsystems depend upon fully functional semaphores.
   */

  sem_initialize();

#if defined(MM_KERNEL_USRHEAP_INIT) || defined(CONFIG_MM_KERNEL_HEAP) || defined(CONFIG_MM_PGALLOC)
  /* Initialize the memory manager */

  {
    FAR void *heap_start;
    size_t heap_size;

#ifdef MM_KERNEL_USRHEAP_INIT
    /* Get the user-mode heap from the platform specific code and configure
     * the user-mode memory allocator.
     */

    up_allocate_heap(&heap_start, &heap_size);
    kumm_initialize(heap_start, heap_size);
#endif

#ifdef CONFIG_MM_KERNEL_HEAP
    /* Get the kernel-mode heap from the platform specific code and configure
     * the kernel-mode memory allocator.
     */

    up_allocate_kheap(&heap_start, &heap_size);
    kmm_initialize(heap_start, heap_size);
#endif

#ifdef CONFIG_MM_PGALLOC
    /* If there is a page allocator in the configuration, then get the page
     * heap information from the platform-specific code and configure the
     * page allocator.
     */

    up_allocate_pgheap(&heap_start, &heap_size);
    mm_pginitialize(heap_start, heap_size);
#endif
  }
#endif

#if defined(CONFIG_SCHED_HAVE_PARENT) && defined(CONFIG_SCHED_CHILD_STATUS)
  /* Initialize tasking data structures */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (task_initialize != NULL)
#endif
    {
      task_initialize();
    }
#endif

  /* Initialize the interrupt handling subsystem (if included) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (irq_initialize != NULL)
#endif
    {
      irq_initialize();
    }

  /* Initialize the watchdog facility (if included in the link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (wd_initialize != NULL)
#endif
    {
      wd_initialize();
    }

  /* Initialize the POSIX timer facility (if included in the link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (clock_initialize != NULL)
#endif
    {
      clock_initialize();
    }

#ifndef CONFIG_DISABLE_POSIX_TIMERS
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (timer_initialize != NULL)
#endif
    {
      timer_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_SIGNALS
  /* Initialize the signal facility (if in link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (sig_initialize != NULL)
#endif
    {
      sig_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_MQUEUE
  /* Initialize the named message queue facility (if in link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (mq_initialize != NULL)
#endif
    {
      mq_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_PTHREAD
  /* Initialize the thread-specific data facility (if in link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (pthread_initialize != NULL)
#endif
    {
      pthread_initialize();
    }
#endif

#if CONFIG_NFILE_DESCRIPTORS > 0
  /* Initialize the file system (needed to support device drivers) */

  fs_initialize();
#endif

#ifdef CONFIG_NET
  /* Initialize the networking system.  Network initialization is
   * performed in two steps:  (1) net_setup() initializes static
   * configuration of the network support.  This must be done prior
   * to registering network drivers by up_initialize().  This step
   * cannot require upon any hardware-depending features such as
   * timers or interrupts.
   */

  net_setup();
#endif

  /* The processor specific details of running the operating system
   * will be handled here.  Such things as setting up interrupt
   * service routines and starting the clock are some of the things
   * that are different for each  processor and hardware platform.
   */

  up_initialize();

#ifdef CONFIG_NET
  /* Complete initialization the networking system now that interrupts
   * and timers have been configured by up_initialize().
   */

  net_initialize();
#endif

#ifdef CONFIG_MM_SHM
  /* Initialize shared memory support */

  shm_initialize();
#endif

  /* Initialize the C libraries.  This is done last because the libraries
   * may depend on the above.
   */

  lib_initialize();

  /* IDLE Group Initialization **********************************************/
#ifdef HAVE_TASK_GROUP
  /* Allocate the IDLE group */

  DEBUGVERIFY(group_allocate(&g_idletcb, g_idletcb.cmn.flags));
#endif

#if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0
  /* Create stdout, stderr, stdin on the IDLE task.  These will be
   * inherited by all of the threads created by the IDLE task.
   */

  DEBUGVERIFY(group_setupidlefiles(&g_idletcb));
#endif

#ifdef HAVE_TASK_GROUP
  /* Complete initialization of the IDLE group.  Suppress retention
   * of child status in the IDLE group.
   */

  DEBUGVERIFY(group_initialize(&g_idletcb));
  g_idletcb.cmn.group->tg_flags = GROUP_FLAG_NOCLDWAIT;
#endif

  /* Bring Up the System ****************************************************/
  /* Create initial tasks and bring-up the system */

  DEBUGVERIFY(os_bringup());

  /* The IDLE Loop **********************************************************/
  /* When control is return to this point, the system is idle. */

  sdbg("Beginning Idle Loop\n");
  for (;;)
    {
      /* Perform garbage collection (if it is not being done by the worker
       * thread).  This cleans-up memory de-allocations that were queued
       * because they could not be freed in that execution context (for
       * example, if the memory was freed from an interrupt handler).
       */

#ifndef CONFIG_SCHED_WORKQUEUE
      /* We must have exclusive access to the memory manager to do this
       * BUT the idle task cannot wait on a semaphore.  So we only do
       * the cleanup now if we can get the semaphore -- this should be
       * possible because if the IDLE thread is running, no other task is!
       *
       * WARNING: This logic could have undesirable side-effects if priority
       * inheritance is enabled.  Imaginee the possible issues if the
       * priority of the IDLE thread were to get boosted!  Moral: If you
       * use priority inheritance, then you should also enable the work
       * queue so that is done in a safer context.
       */

      if (kmm_trysemaphore() == 0)
        {
          sched_garbagecollection();
          kmm_givesemaphore();
        }
#endif

      /* Perform any processor-specific idle state operations */

      up_idle();
    }
}
Esempio n. 16
0
static int pty_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
{
  FAR struct inode *inode;
  FAR struct pty_dev_s *dev;
  FAR struct pty_devpair_s *devpair;
  int ret;

  DEBUGASSERT(filep != NULL && filep->f_inode != NULL);
  inode   = filep->f_inode;
  dev     = inode->i_private;
  DEBUGASSERT(dev != NULL && dev->pd_devpair != NULL);
  devpair = dev->pd_devpair;

  /* Get exclusive access */

  pty_semtake(devpair);

  /* Handle IOCTL commands */

  switch (cmd)
    {
      /* PTY IOCTL commands would be handled here */

      case TIOCGPTN:    /* Get Pty Number (of pty-mux device): FAR int* */
        {
#ifdef CONFIG_DISABLE_PSEUDOFS_OPERATIONS
          ret = -ENOSYS;
#else
          FAR int *ptyno = (FAR int *)((uintptr_t)arg);
          if (ptyno == NULL)
            {
              ret = -EINVAL;
            }
          else
            {
              *ptyno = (int)devpair->pp_minor;
              ret = OK;
            }
#endif
        }
        break;

      case TIOCSPTLCK:  /* Lock/unlock Pty: int */
        {
          if (arg == 0)
            {
               int sval;

               /* Unlocking */

               sched_lock();
               devpair->pp_locked = false;

               /* Release any waiting threads */

               do
                 {
                   DEBUGVERIFY(nxsem_getvalue(&devpair->pp_slavesem, &sval));
                   if (sval < 0)
                     {
                       nxsem_post(&devpair->pp_slavesem);
                     }
                 }
               while (sval < 0);

               sched_unlock();
               ret = OK;
            }
          else
            {
              /* Locking */

               devpair->pp_locked = true;
               ret = OK;
            }
        }
        break;

      case TIOCGPTLCK:  /* Get Pty lock state: FAR int* */
        {
          FAR int *ptr = (FAR int *)((uintptr_t)arg);
          if (ptr == NULL)
            {
              ret = -EINVAL;
            }
          else
            {
              *ptr = (int)devpair->pp_locked;
              ret = OK;
            }
        }
        break;

#ifdef CONFIG_SERIAL_TERMIOS
      case TCGETS:
        {
          FAR struct termios *termiosp = (FAR struct termios *)arg;

          if (!termiosp)
            {
              ret = -EINVAL;
              break;
            }

          /* And update with flags from this layer */

          termiosp->c_iflag = dev->pd_iflag;
          termiosp->c_oflag = dev->pd_oflag;
          termiosp->c_lflag = 0;
          ret = OK;
        }
        break;

      case TCSETS:
        {
          FAR struct termios *termiosp = (FAR struct termios *)arg;

          if (!termiosp)
            {
              ret = -EINVAL;
              break;
            }

          /* Update the flags we keep at this layer */

          dev->pd_iflag = termiosp->c_iflag;
          dev->pd_oflag = termiosp->c_oflag;
          ret = OK;
        }
        break;
#endif

      /* Get the number of bytes that are immediately available for reading
       * from the source pipe.
       */

      case FIONREAD:
        {
          ret = file_ioctl(&dev->pd_src, cmd, arg);
        }
        break;

      /* Get the number of bytes waiting in the sink pipe (FIONWRITE) or the
       * number of unused bytes in the sink pipe (FIONSPACE).
       */

      case FIONWRITE:
      case FIONSPACE:
        {
          ret = file_ioctl(&dev->pd_sink, cmd, arg);
        }
        break;

      /* Any unrecognized IOCTL commands will be passed to the contained
       * pipe driver.
       *
       * REVISIT:  We know for a fact that the pipe driver only supports
       * FIONREAD, FIONWRITE, FIONSPACE and PIPEIOC_POLICY.  The first two
       * are handled above and PIPEIOC_POLICY should not be managed by
       * applications -- it can break the PTY!
       */

      default:
        {
#if 0
          ret = file_ioctl(&dev->pd_src, cmd, arg);
          if (ret >= 0 || ret == -ENOTTY)
            {
              ret = file_ioctl(&dev->pd_sink, cmd, arg);
            }
#else
          ret = ENOTTY;
#endif
        }
        break;
    }

  pty_semgive(devpair);
  return ret;
}
Esempio n. 17
0
FAR struct aio_container_s *aio_contain(FAR struct aiocb *aiocbp)
{
  FAR struct aio_container_s *aioc;
  union
  {
#ifdef AIO_HAVE_FILEP
    FAR struct file *filep;
#endif
#ifdef AIO_HAVE_FILEP
    FAR struct socket *psock;
#endif
    FAR void *ptr;
  } u;
#ifdef CONFIG_PRIORITY_INHERITANCE
  struct sched_param param;
#endif

#if defined(AIO_HAVE_FILEP) && defined(AIO_HAVE_PSOCK)
  if (aiocbp->aio_fildes  >= CONFIG_NFILE_DESCRIPTORS)
#endif
#ifdef AIO_HAVE_FILEP
    {
      /* Get the file structure corresponding to the file descriptor. */

      u.filep = fs_getfilep(aiocbp->aio_fildes);
      if (!u.filep)
        {
          /* The errno value has already been set */

          return NULL;
        }
    }
#endif
#if defined(AIO_HAVE_FILEP) && defined(AIO_HAVE_PSOCK)
  else
#endif
#ifdef AIO_HAVE_PSOCK
    {
      /* Get the socket structure corresponding to the socket descriptor */

      u.psock = sockfd_socket(aiocbp->aio_fildes);
      if (!u.psock)
        {
          /* Does not set the errno.  EBADF is the most likely explanation. */

          set_errno(EBADF);
          return NULL;
        }
    }
#endif

  /* Allocate the AIO control block container, waiting for one to become
   * available if necessary.  This should never fail.
   */

  aioc = aioc_alloc();
  DEBUGASSERT(aioc);

  /* Initialize the container */

  memset(aioc, 0, sizeof(struct aio_container_s));
  aioc->aioc_aiocbp = aiocbp;
  aioc->u.aioc_filep = u.ptr;
  aioc->aioc_pid = getpid();

#ifdef CONFIG_PRIORITY_INHERITANCE
  DEBUGVERIFY(sched_getparam (aioc->aioc_pid, &param));
  aioc->aioc_prio = param.sched_priority;
#endif

  /* Add the container to the pending transfer list. */

  aio_lock();
  dq_addlast(&aioc->aioc_link, &g_aio_pending);
  aio_unlock();
  return aioc;
}
Esempio n. 18
0
bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
  FAR dq_queue_t *tasklist;
  bool doswitch = false;
  int cpu;

  /* Which CPU (if any) is the task running on?  Which task list holds the
   * TCB?
   */

  cpu      = rtcb->cpu;
  tasklist = TLIST_HEAD(rtcb->task_state, cpu);

  /* Check if the TCB to be removed is at the head of a ready-to-run list.
   * For the case of SMP, there are two lists involved:  (1) the
   * g_readytorun list that holds non-running tasks that have not been
   * assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
   * tasks assigned a CPU, including the task that is currently running on
   * that CPU.  Only this latter list contains the currently active task
   * only only removing the head of that list can result in a context
   * switch.
   *
   * rtcb->blink == NULL will tell us if the TCB is at the head of the
   * ready-to-run list and, hence, a candidate for the new running task.
   *
   * If so, then the tasklist RUNNABLE attribute will inform us if the list
   * holds the currently executing task and, hence, if a context switch
   * should occur.
   */

  if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state))
    {
      FAR struct tcb_s *nxttcb;
      FAR struct tcb_s *rtrtcb;
      int me;

      /* There must always be at least one task in the list (the IDLE task)
       * after the TCB being removed.
       */

      nxttcb = (FAR struct tcb_s *)rtcb->flink;
      DEBUGASSERT(nxttcb != NULL);

      /* If we are modifying the head of some assigned task list other than
       * our own, we will need to stop that CPU.
       */

      me = this_cpu();
      if (cpu != me)
        {
          DEBUGVERIFY(up_cpu_pause(cpu));
        }

      /* The task is running but the CPU that it was running on has been
       * paused.  We can now safely remove its TCB from the ready-to-run
       * task list.  In the SMP case this may be either the g_readytorun()
       * or the g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);

      /* Which task will go at the head of the list?  It will be either the
       * next tcb in the assigned task list (nxttcb) or a TCB in the
       * g_readytorun list.  We can only select a task from that list if
       * the affinity mask includes the current CPU.
       *
       * REVISIT: What should we do, if anything, if pre-emption is locked
       * by the another CPU?  Should just used nxttcb?  Should we select
       * from the pending task list instead of the g_readytorun list?
       */

      for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
           rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
           rtrtcb = (FAR struct tcb_s *)rtrtcb->flink);

      /* Did we find a task in the g_readytorun list?  Which task should
       * we use?  We decide strictly by the priority of the two tasks:
       * Either (1) the task currently at the head of the g_assignedtasks[cpu]
       * list (nexttcb) or (2) the highest priority task from the
       * g_readytorun list with matching affinity (rtrtcb).
       */

      if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
        {
          FAR struct tcb_s *tmptcb;

          /* The TCB at the head of the ready to run list has the higher
           * priority.  Remove that task from the head of the g_readytorun
           * list and add to the head of the g_assignedtasks[cpu] list.
           */

          tmptcb = (FAR struct tcb_s *)
            dq_remfirst((FAR dq_queue_t *)&g_readytorun);

          DEBUGASSERT(tmptcb == rtrtcb);

          dq_addfirst((FAR dq_entry_t *)tmptcb, tasklist);

          tmptcb->cpu = cpu;
          nxttcb = tmptcb;
        }

      /* Will pre-emption be disabled after the switch?  If the lockcount is
       * greater than zero, then this task/this CPU holds the scheduler lock.
       */

      if (nxttcb->lockcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }
      else
        {
          /* No.. we may need to perform release our hold on the lock. */

          spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }

      /* Interrupts may be disabled after the switch.  If irqcount is greater
       * than zero, then this task/this CPU holds the IRQ lock
       */

      if (nxttcb->irqcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }
      else
        {
          /* No.. we may need to release our hold on the irq state. */

          spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }

      nxttcb->task_state = TSTATE_TASK_RUNNING;

      /* All done, restart the other CPU (if it was paused). */

      doswitch = true;
      if (cpu != me)
        {
          /* In this we will not want to report a context switch to this
           * CPU.  Only the other CPU is affected.
           */

          DEBUGVERIFY(up_cpu_resume(cpu));
          doswitch = false;
        }
    }
  else
    {
      /* The task is not running.  Just remove its TCB from the ready-to-run
       * list.  In the SMP case this may be either the g_readytorun() or the
       * g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);
    }

  /* Since the TCB is no longer in any list, it is now invalid */

  rtcb->task_state = TSTATE_TASK_INVALID;
  return doswitch;
}
Esempio n. 19
0
static int pty_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
{
  FAR struct inode *inode;
  FAR struct pty_dev_s *dev;
  FAR struct pty_devpair_s *devpair;
  int ret;

  DEBUGASSERT(filep != NULL && filep->f_inode != NULL);
  inode   = filep->f_inode;
  dev     = inode->i_private;
  DEBUGASSERT(dev != NULL && dev->pd_devpair != NULL);
  devpair = dev->pd_devpair;

  /* Get exclusive access */

  pty_semtake(devpair);

  /* Handle IOCTL commands */

  switch (cmd)
    {
      /* PTY IOCTL commands would be handled here */

      case TIOCGPTN:    /* Get Pty Number (of pty-mux device): FAR int* */
        {
#ifdef CONFIG_DISABLE_PSEUDOFS_OPERATIONS
          ret = -ENOSYS;
#else
          FAR int *ptyno = (FAR int *)((uintptr_t)arg);
          if (ptyno == NULL)
            {
              ret = -EINVAL;
            }
          else
            {
              *ptyno = (int)devpair->pp_minor;
              ret = OK;
            }
#endif
        }
        break;

      case TIOCSPTLCK:  /* Lock/unlock Pty: int */
        {
          if (arg == 0)
            {
               int sval;

               /* Unlocking */

               sched_lock();
               devpair->pp_locked = false;

               /* Release any waiting threads */

               do
                 {
                   DEBUGVERIFY(sem_getvalue(&devpair->pp_slavesem, &sval));
                   if (sval < 0)
                     {
                       sem_post(&devpair->pp_slavesem);
                     }
                 }
               while (sval < 0);

               sched_unlock();
               ret = OK;
            }
          else
            {
              /* Locking */

               devpair->pp_locked = true;
               ret = OK;
            }
        }
        break;

      case TIOCGPTLCK:  /* Get Pty lock state: FAR int* */
        {
          FAR int *ptr = (FAR int *)((uintptr_t)arg);
          if (ptr == NULL)
            {
              ret = -EINVAL;
            }
          else
            {
              *ptr = (int)devpair->pp_locked;
              ret = OK;
            }
        }
        break;

      /* Any unrecognized IOCTL commands will be passed to the contained
       * pipe driver.
       */

      default:
        {
          ret = file_ioctl(&dev->pd_src, cmd, arg);
          if (ret >= 0 || ret == -ENOTTY)
            {
              ret = file_ioctl(&dev->pd_sink, cmd, arg);
            }
        }
        break;
    }

  pty_semgive(devpair);
  return ret;
}
Esempio n. 20
0
void imxrt_gpioirq_initialize(void)
{
  /* Disable all GPIO interrupts at the source */

  putreg32(0, IMXRT_GPIO1_IMR);
  putreg32(0, IMXRT_GPIO2_IMR);
  putreg32(0, IMXRT_GPIO3_IMR);
  putreg32(0, IMXRT_GPIO4_IMR);
  putreg32(0, IMXRT_GPIO5_IMR);

  /* Disable all unconfigured GPIO interrupts at the NVIC */

#ifndef CONFIG_IMXRT_GPIO1_0_15_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO1_0_15);
#endif
#ifndef CONFIG_IMXRT_GPIO1_16_31_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO1_16_31);
#endif
#ifndef CONFIG_IMXRT_GPIO2_0_15_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO2_0_15);
#endif
#ifndef CONFIG_IMXRT_GPIO2_16_31_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO2_16_31);
#endif
#ifndef CONFIG_IMXRT_GPIO3_0_15_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO3_0_15);
#endif
#ifndef CONFIG_IMXRT_GPIO3_16_31_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO3_16_31);
#endif
#ifndef CONFIG_IMXRT_GPIO4_0_15_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO4_0_15);
#endif
#ifndef CONFIG_IMXRT_GPIO4_16_31_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO4_16_31);
#endif
#ifndef CONFIG_IMXRT_GPIO5_0_15_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO5_0_15);
#endif
#ifndef CONFIG_IMXRT_GPIO5_16_31_IRQ
  up_disable_irq(IMXRT_IRQ_GPIO5_16_31);
#endif

  /* Attach all configured GPIO interrupts and enable the interrupt at the
   * NVIC
   */

#ifdef CONFIG_IMXRT_GPIO1_0_15_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO1_0_15,
                         imxrt_gpio1_0_15_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO1_0_15);
#endif

#ifdef CONFIG_IMXRT_GPIO1_16_31_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO1_16_31,
                         imxrt_gpio1_16_31_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO1_16_31);
#endif

#ifdef CONFIG_IMXRT_GPIO2_0_15_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO2_0_15,
                         imxrt_gpio2_0_15_interrupt,NULL));
  up_enable_irq(IMXRT_IRQ_GPIO2_0_15);
#endif

#ifdef CONFIG_IMXRT_GPIO2_16_31_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO2_16_31,
                         imxrt_gpio2_16_31_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO2_16_31);
#endif

#ifdef CONFIG_IMXRT_GPIO3_0_15_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO3_0_15,
                         imxrt_gpio3_0_15_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO3_0_15);
#endif

#ifdef CONFIG_IMXRT_GPIO3_16_31_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO3_16_31,
                         imxrt_gpio3_16_31_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO3_16_31);
#endif

#ifdef CONFIG_IMXRT_GPIO4_0_15_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO4_0_15,
                         imxrt_gpio4_0_15_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO4_0_15);
#endif

#ifdef CONFIG_IMXRT_GPIO4_16_31_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO4_16_31,
                         imxrt_gpio4_16_31_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO4_16_31);
#endif

#ifdef CONFIG_IMXRT_GPIO5_0_15_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO5_0_15,
                         imxrt_gpio5_0_15_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO5_0_15);
#endif

#ifdef CONFIG_IMXRT_GPIO5_16_31_IRQ
  DEBUGVERIFY(irq_attach(IMXRT_IRQ_GPIO5_16_31,
                         imxrt_gpio5_16_31_interrupt, NULL));
  up_enable_irq(IMXRT_IRQ_GPIO5_16_31);
#endif
}
Esempio n. 21
0
static ssize_t cpuload_read(FAR struct file *filep, FAR char *buffer,
                           size_t buflen)
{
  FAR struct cpuload_file_s *attr;
  size_t linesize;
  off_t offset;
  ssize_t ret;

  fvdbg("buffer=%p buflen=%d\n", buffer, (int)buflen);

  /* Recover our private data from the struct file instance */

  attr = (FAR struct cpuload_file_s *)filep->f_priv;
  DEBUGASSERT(attr);

  /* If f_pos is zero, then sample the system time.  Otherwise, use
   * the cached system time from the previous read().  It is necessary
   * save the cached value in case, for example, the user is reading
   * the time one byte at a time.  In that case, the time must remain
   * stable throughout the reads.
   */

  if (filep->f_pos == 0)
    {
      struct cpuload_s cpuload;
      uint32_t intpart;
      uint32_t fracpart;

      /* Sample the counts for the IDLE thread.  clock_cpuload should only
       * fail if the PID is not valid.  This, however, should never happen
       * for the IDLE thread.
       */

      DEBUGVERIFY(clock_cpuload(0, &cpuload));

      /* On the simulator, you may hit cpuload.total == 0, but probably never on
       * real hardware.
       */

      if (cpuload.total > 0)
        {
          uint32_t tmp;

          tmp      = 1000 - (1000 * cpuload.active) / cpuload.total;
          intpart  = tmp / 10;
          fracpart = tmp - 10 * intpart;
        }
      else
        {
          intpart  = 0;
          fracpart = 0;
        }

      linesize = snprintf(attr->line, CPULOAD_LINELEN, "%3d.%01d%%",
                          intpart, fracpart);

      /* Save the linesize in case we are re-entered with f_pos > 0 */

      attr->linesize = linesize;
    }

  /* Transfer the system up time to user receive buffer */

  offset = filep->f_pos;
  ret    = procfs_memcpy(attr->line, attr->linesize, buffer, buflen, &offset);

  /* Update the file offset */

  if (ret > 0)
    {
      filep->f_pos += ret;
    }

  return ret;
}
Esempio n. 22
0
void work_process(FAR struct kwork_wqueue_s *wqueue, systime_t period, int wndx)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  systime_t elapsed;
  systime_t remaining;
  systime_t stick;
  systime_t ctick;
  systime_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = period;
  flags = irqsave();

  /* Get the time that we started this polling cycle in clock ticks. */

  stick = clock_systimer();

  /* And check each entry in the work queue.  Since we have disabled
   * interrupts we know:  (1) we will not be suspended unless we do
   * so ourselves, and (2) there will be no changes to the work queue
   */

  work = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      ctick   = clock_systimer();
      elapsed = ctick - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long this will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else /* elapsed < work->delay */
        {
          /* This one is not ready.
           *
           * NOTE that elapsed is relative to the the current time,
           * not the time of beginning of this queue processing pass.
           * So it may need an adjustment.
           */

          elapsed += (ctick - stick);
          if (elapsed > work->delay)
            {
              /* The delay has expired while we are processing */

              elapsed = work->delay;
            }

          /* Will it be ready before the next scheduled wakeup interval? */

          remaining = work->delay - elapsed;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

#if defined(CONFIG_SCHED_LPWORK) && CONFIG_SCHED_LPNTHREADS > 0
  /* Value of zero for period means that we should wait indefinitely until
   * signalled.  This option is used only for the case where there are
   * multiple, low-priority worker threads.  In that case, only one of
   * the threads does the poll... the others simple.  In all other cases
   * period will be non-zero and equal to wqueue->delay.
   */

   if (period == 0)
     {
       sigset_t set;

       /* Wait indefinitely until signalled with SIGWORK */

       sigemptyset(&set);
       sigaddset(&set, SIGWORK);

       wqueue->worker[wndx].busy = false;
       DEBUGVERIFY(sigwaitinfo(&set, NULL));
       wqueue->worker[wndx].busy = true;
     }
   else
#endif
    {
      /* Get the delay (in clock ticks) since we started the sampling */

      elapsed = clock_systimer() - stick;
      if (elapsed < period && next > 0)
        {
          /* How much time would we need to delay to get to the end of the
           * sampling period?  The amount of time we delay should be the smaller
           * of the time to the end of the sampling period and the time to the
           * next work expiry.
           */

          remaining = period - elapsed;
          next      = MIN(next, remaining);

          /* Wait awhile to check the work list.  We will wait here until
           * either the time elapses or until we are awakened by a signal.
           * Interrupts will be re-enabled while we wait.
           */

          wqueue->worker[wndx].busy = false;
          usleep(next * USEC_PER_TICK);
          wqueue->worker[wndx].busy = true;
        }
    }

  irqrestore(flags);
}
Esempio n. 23
0
int task_terminate(pid_t pid, bool nonblocking)
{
  FAR struct tcb_s *dtcb;
  FAR dq_queue_t *tasklist;
  irqstate_t flags;
#ifdef CONFIG_SMP
  int cpu;
#endif
  int ret;

  /* Make sure the task does not become ready-to-run while we are futzing
   * with its TCB.  Within the critical section, no new task may be started
   * or terminated (even in the SMP case).
   */

  flags = enter_critical_section();

  /* Find for the TCB associated with matching PID */

  dtcb = sched_gettcb(pid);
  if (!dtcb)
    {
      /* This PID does not correspond to any known task */

      ret = -ESRCH;
      goto errout_with_lock;
    }

  /* Verify our internal sanity */

#ifdef CONFIG_SMP
  DEBUGASSERT(dtcb->task_state < NUM_TASK_STATES);
#else
  DEBUGASSERT(dtcb->task_state != TSTATE_TASK_RUNNING &&
              dtcb->task_state < NUM_TASK_STATES);
#endif

  /* Remove the task from the OS's task lists.  We must be in a critical
   * section and the must must not be running to do this.
   */

#ifdef CONFIG_SMP
  /* In the SMP case, the thread may be running on another CPU.  If that is
   * the case, then we will pause the CPU that the thread is running on.
   */

  cpu = sched_cpu_pause(dtcb);

  /* Get the task list associated with the the thread's state and CPU */

  tasklist = TLIST_HEAD(dtcb->task_state, cpu);
#else
  /* In the non-SMP case, we can be assured that the task to be terminated
   * is not running.  get the task list associated with the task state.
   */

  tasklist = TLIST_HEAD(dtcb->task_state);
#endif

  /* Remove the task from the task list */

  dq_rem((FAR dq_entry_t *)dtcb, tasklist);
  dtcb->task_state = TSTATE_TASK_INVALID;

  /* At this point, the TCB should no longer be accessible to the system */

#ifdef CONFIG_SMP
  /* Resume the paused CPU (if any) */

  if (cpu >= 0)
    {
      /* I am not yet sure how to handle a failure here. */

      DEBUGVERIFY(up_cpu_resume(cpu));
    }
#endif /* CONFIG_SMP */

  leave_critical_section(flags);

  /* Perform common task termination logic (flushing streams, calling
   * functions registered by at_exit/on_exit, etc.).  We need to do
   * this as early as possible so that higher level clean-up logic
   * can run in a healthy tasking environment.
   *
   * In the case where the task exits via exit(), task_exithook()
   * may be called twice.
   *
   * I suppose EXIT_SUCCESS is an appropriate return value???
   */

  task_exithook(dtcb, EXIT_SUCCESS, nonblocking);

  /* Since all tasks pass through this function as the final step in their
   * exit sequence, this is an appropriate place to inform any instrumentation
   * layer that the task no longer exists.
   */

  sched_note_stop(dtcb);

  /* Deallocate its TCB */

  return sched_releasetcb(dtcb, dtcb->flags & TCB_FLAG_TTYPE_MASK);

errout_with_lock:
  leave_critical_section(flags);
  return ret;
}