Ejemplo n.º 1
0
int work_hpthread(int argc, char *argv[])
{
  /* Loop forever */

  for (;;)
    {
      /* First, perform garbage collection.  This cleans-up memory de-allocations
       * that were queued because they could not be freed in that execution
       * context (for example, if the memory was freed from an interrupt handler).
       * NOTE: If the work thread is disabled, this clean-up is performed by
       * the IDLE thread (at a very, very low priority).
       */

#ifndef CONFIG_SCHED_LPWORK
      sched_garbagecollection();
#endif

      /* Then process queued work.  We need to keep interrupts disabled while
       * we process items in the work list.
       */

      work_process(&g_work[HPWORK]);
    }

  return OK; /* To keep some compilers happy */
}
Ejemplo n.º 2
0
void os_start(void)
{
  int i;

  slldbg("Entry\n");

  /* Initialize all task lists */

  dq_init(&g_readytorun);
  dq_init(&g_pendingtasks);
  dq_init(&g_waitingforsemaphore);
#ifndef CONFIG_DISABLE_SIGNALS
  dq_init(&g_waitingforsignal);
#endif
#ifndef CONFIG_DISABLE_MQUEUE
  dq_init(&g_waitingformqnotfull);
  dq_init(&g_waitingformqnotempty);
#endif
#ifdef CONFIG_PAGING
  dq_init(&g_waitingforfill);
#endif
  dq_init(&g_inactivetasks);
  sq_init(&g_delayeddeallocations);

  /* Initialize the logic that determine unique process IDs. */

  g_lastpid = 0;
  for (i = 0; i < CONFIG_MAX_TASKS; i++)
    {
      g_pidhash[i].tcb = NULL;
      g_pidhash[i].pid = INVALID_PROCESS_ID;
    }

  /* Assign the process ID of ZERO to the idle task */

  g_pidhash[ PIDHASH(0)].tcb = &g_idletcb;
  g_pidhash[ PIDHASH(0)].pid = 0;

  /* Initialize a TCB for this thread of execution.  NOTE:  The default
   * value for most components of the g_idletcb are zero.  The entire
   * structure is set to zero.  Then only the (potentially) non-zero
   * elements are initialized. NOTE:  The idle task is the only task in
   * that has pid == 0 and sched_priority == 0.
   */

  bzero((void*)&g_idletcb, sizeof(_TCB));
  g_idletcb.task_state = TSTATE_TASK_RUNNING;
  g_idletcb.entry.main = (main_t)os_start;

#if CONFIG_TASK_NAME_SIZE > 0
  strncpy(g_idletcb.name, g_idlename, CONFIG_TASK_NAME_SIZE-1);
  g_idletcb.argv[0] = g_idletcb.name;
#else
  g_idletcb.argv[0] = (char*)g_idlename;
#endif /* CONFIG_TASK_NAME_SIZE */

  /* Then add the idle task's TCB to the head of the ready to run list */

  dq_addfirst((FAR dq_entry_t*)&g_idletcb, (FAR dq_queue_t*)&g_readytorun);

  /* Initialize the processor-specific portion of the TCB */

  g_idletcb.flags = TCB_FLAG_TTYPE_KERNEL;
  up_initial_state(&g_idletcb);

  /* Initialize the semaphore facility(if in link).  This has to be done
   * very early because many subsystems depend upon fully functional
   * semaphores.
   */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (sem_initialize != NULL)
#endif
    {
      sem_initialize();
    }

  /* Initialize the memory manager */

#ifndef CONFIG_HEAP_BASE
  {
    FAR void *heap_start;
    size_t heap_size;
    up_allocate_heap(&heap_start, &heap_size);
    kmm_initialize(heap_start, heap_size);
  }
#else
  kmm_initialize((void*)CONFIG_HEAP_BASE, CONFIG_HEAP_SIZE);
#endif

  /* Initialize the interrupt handling subsystem (if included) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (irq_initialize != NULL)
#endif
    {
      irq_initialize();
    }

  /* Initialize the watchdog facility (if included in the link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (wd_initialize != NULL)
#endif
    {
      wd_initialize();
    }

  /* Initialize the POSIX timer facility (if included in the link) */

#ifndef CONFIG_DISABLE_CLOCK
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (clock_initialize != NULL)
#endif
    {
      clock_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_POSIX_TIMERS
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (timer_initialize != NULL)
#endif
    {
      timer_initialize();
    }
#endif

  /* Initialize the signal facility (if in link) */

#ifndef CONFIG_DISABLE_SIGNALS
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (sig_initialize != NULL)
#endif
    {
      sig_initialize();
    }
#endif

  /* Initialize the named message queue facility (if in link) */

#ifndef CONFIG_DISABLE_MQUEUE
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (mq_initialize != NULL)
#endif
    {
      mq_initialize();
    }
#endif

  /* Initialize the thread-specific data facility (if in link) */

#ifndef CONFIG_DISABLE_PTHREAD
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (pthread_initialize != NULL)
#endif
    {
      pthread_initialize();
    }
#endif

  /* Initialize the file system (needed to support device drivers) */

#if CONFIG_NFILE_DESCRIPTORS > 0
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (fs_initialize != NULL)
#endif
    {
      fs_initialize();
    }
#endif

  /* Initialize the network system */

#ifdef CONFIG_NET
#if 0
  if (net_initialize != NULL)
#endif
    {
      net_initialize();
    }
#endif

  /* The processor specific details of running the operating system
   * will be handled here.  Such things as setting up interrupt
   * service routines and starting the clock are some of the things
   * that are different for each  processor and hardware platform.
   */

  up_initialize();

  /* Initialize the C libraries (if included in the link).  This
   * is done last because the libraries may depend on the above.
   */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (lib_initialize != NULL)
#endif
    {
      lib_initialize();
    }

  /* Create stdout, stderr, stdin on the IDLE task.  These will be
   * inherited by all of the threads created by the IDLE task.
   */

  (void)sched_setupidlefiles(&g_idletcb);

  /* Create initial tasks and bring-up the system */

  (void)os_bringup();

  /* When control is return to this point, the system is idle. */

  sdbg("Beginning Idle Loop\n");
  for (;;)
    {
      /* Perform garbage collection (if it is not being done by the worker
       * thread).  This cleans-up memory de-allocations that were queued
       * because they could not be freed in that execution context (for
       * example, if the memory was freed from an interrupt handler).
       */

#ifndef CONFIG_SCHED_WORKQUEUE
      /* We must have exclusive access to the memory manager to do this
       * BUT the idle task cannot wait on a semaphore.  So we only do
       * the cleanup now if we can get the semaphore -- this should be
       * possible because if the IDLE thread is running, no other task is!
       */

      if (kmm_trysemaphore() == 0)
        {
          sched_garbagecollection();
          kmm_givesemaphore();
        }
#endif

      /* Perform any processor-specific idle state operations */

      up_idle();
    }
}
Ejemplo n.º 3
0
int work_thread(int argc, char *argv[])
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;
  int usec;
  irqstate_t flags;

  /* Loop forever */

  usec = CONFIG_SCHED_WORKPERIOD;
  flags = irqsave();
  for (;;)
    {
      /* Wait awhile to check the work list.  We will wait here until either
       * the time elapses or until we are awakened by a signal.
       */

      usleep(usec);
      irqrestore(flags);

      /* First, perform garbage collection.  This cleans-up memory de-allocations
       * that were queued because they could not be freed in that execution
       * context (for example, if the memory was freed from an interrupt handler).
       * NOTE: If the work thread is disabled, this clean-up is performed by
       * the IDLE thread (at a very, very lower priority).
       */

      sched_garbagecollection();

      /* Then process queued work.  We need to keep interrupts disabled while
       * we process items in the work list.
       */

      next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
      flags = irqsave();
      work  = (FAR struct work_s *)g_work.head;
      while (work)
        {
          /* Is this work ready?  It is ready if there is no delay or if
           * the delay has elapsed. qtime is the time that the work was added
           * to the work queue.  It will always be greater than or equal to
           * zero.  Therefore a delay of zero will always execute immediately.
           */

          elapsed = clock_systimer() - work->qtime;
          if (elapsed >= work->delay)
            {
              /* Remove the ready-to-execute work from the list */

              (void)dq_rem((struct dq_entry_s *)work, &g_work);

              /* Extract the work description from the entry (in case the work
               * instance by the re-used after it has been de-queued).
               */

              worker = work->worker;
              arg    = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)g_work.head;
            }
          else
            {
              /* This one is not ready.. will it be ready before the next
               * scheduled wakeup interval?
               */

              remaining = elapsed - work->delay;
              if (remaining < next)
                {
                  /* Yes.. Then schedule to wake up when the work is ready */

                  next = remaining;
                }
              
              /* Then try the next in the list. */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }

      /* Now calculate the microsecond delay we should wait */

      usec = next * USEC_PER_TICK;
    }

  return OK; /* To keep some compilers happy */
}
Ejemplo n.º 4
0
void os_start(void)
{
  int i;

  slldbg("Entry\n");

  /* Initialize RTOS Data ***************************************************/
  /* Initialize all task lists */

  dq_init(&g_readytorun);
  dq_init(&g_pendingtasks);
  dq_init(&g_waitingforsemaphore);
#ifndef CONFIG_DISABLE_SIGNALS
  dq_init(&g_waitingforsignal);
#endif
#ifndef CONFIG_DISABLE_MQUEUE
  dq_init(&g_waitingformqnotfull);
  dq_init(&g_waitingformqnotempty);
#endif
#ifdef CONFIG_PAGING
  dq_init(&g_waitingforfill);
#endif
  dq_init(&g_inactivetasks);
  sq_init(&g_delayed_kufree);
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
     defined(CONFIG_MM_KERNEL_HEAP)
  sq_init(&g_delayed_kfree);
#endif

  /* Initialize the logic that determine unique process IDs. */

  g_lastpid = 0;
  for (i = 0; i < CONFIG_MAX_TASKS; i++)
    {
      g_pidhash[i].tcb = NULL;
      g_pidhash[i].pid = INVALID_PROCESS_ID;
    }

  /* Assign the process ID of ZERO to the idle task */

  g_pidhash[PIDHASH(0)].tcb = &g_idletcb.cmn;
  g_pidhash[PIDHASH(0)].pid = 0;

  /* Initialize the IDLE task TCB *******************************************/
  /* Initialize a TCB for this thread of execution.  NOTE:  The default
   * value for most components of the g_idletcb are zero.  The entire
   * structure is set to zero.  Then only the (potentially) non-zero
   * elements are initialized. NOTE:  The idle task is the only task in
   * that has pid == 0 and sched_priority == 0.
   */

  bzero((void*)&g_idletcb, sizeof(struct task_tcb_s));
  g_idletcb.cmn.task_state = TSTATE_TASK_RUNNING;
  g_idletcb.cmn.entry.main = (main_t)os_start;
  g_idletcb.cmn.flags      = TCB_FLAG_TTYPE_KERNEL;

  /* Set the IDLE task name */

#if CONFIG_TASK_NAME_SIZE > 0
  strncpy(g_idletcb.cmn.name, g_idlename, CONFIG_TASK_NAME_SIZE);
  g_idletcb.cmn.name[CONFIG_TASK_NAME_SIZE] = '\0';
#endif /* CONFIG_TASK_NAME_SIZE */

  /* Configure the task name in the argument list.  The IDLE task does
   * not really have an argument list, but this name is still useful
   * for things like the NSH PS command.
   *
   * In the kernel mode build, the arguments are saved on the task's stack
   * and there is no support that yet.
   */

#if CONFIG_TASK_NAME_SIZE > 0
  g_idleargv[0]  = g_idletcb.cmn.name;
#else
  g_idleargv[0]  = (FAR char *)g_idlename;
#endif /* CONFIG_TASK_NAME_SIZE */
  g_idleargv[1]  = NULL;
  g_idletcb.argv = g_idleargv;

  /* Then add the idle task's TCB to the head of the ready to run list */

  dq_addfirst((FAR dq_entry_t*)&g_idletcb, (FAR dq_queue_t*)&g_readytorun);

  /* Initialize the processor-specific portion of the TCB */

  up_initial_state(&g_idletcb.cmn);

  /* Initialize RTOS facilities *********************************************/
  /* Initialize the semaphore facility.  This has to be done very early
   * because many subsystems depend upon fully functional semaphores.
   */

  sem_initialize();

#if defined(MM_KERNEL_USRHEAP_INIT) || defined(CONFIG_MM_KERNEL_HEAP) || defined(CONFIG_MM_PGALLOC)
  /* Initialize the memory manager */

  {
    FAR void *heap_start;
    size_t heap_size;

#ifdef MM_KERNEL_USRHEAP_INIT
    /* Get the user-mode heap from the platform specific code and configure
     * the user-mode memory allocator.
     */

    up_allocate_heap(&heap_start, &heap_size);
    kumm_initialize(heap_start, heap_size);
#endif

#ifdef CONFIG_MM_KERNEL_HEAP
    /* Get the kernel-mode heap from the platform specific code and configure
     * the kernel-mode memory allocator.
     */

    up_allocate_kheap(&heap_start, &heap_size);
    kmm_initialize(heap_start, heap_size);
#endif

#ifdef CONFIG_MM_PGALLOC
    /* If there is a page allocator in the configuration, then get the page
     * heap information from the platform-specific code and configure the
     * page allocator.
     */

    up_allocate_pgheap(&heap_start, &heap_size);
    mm_pginitialize(heap_start, heap_size);
#endif
  }
#endif

#if defined(CONFIG_SCHED_HAVE_PARENT) && defined(CONFIG_SCHED_CHILD_STATUS)
  /* Initialize tasking data structures */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (task_initialize != NULL)
#endif
    {
      task_initialize();
    }
#endif

  /* Initialize the interrupt handling subsystem (if included) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (irq_initialize != NULL)
#endif
    {
      irq_initialize();
    }

  /* Initialize the watchdog facility (if included in the link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (wd_initialize != NULL)
#endif
    {
      wd_initialize();
    }

  /* Initialize the POSIX timer facility (if included in the link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (clock_initialize != NULL)
#endif
    {
      clock_initialize();
    }

#ifndef CONFIG_DISABLE_POSIX_TIMERS
#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (timer_initialize != NULL)
#endif
    {
      timer_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_SIGNALS
  /* Initialize the signal facility (if in link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (sig_initialize != NULL)
#endif
    {
      sig_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_MQUEUE
  /* Initialize the named message queue facility (if in link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (mq_initialize != NULL)
#endif
    {
      mq_initialize();
    }
#endif

#ifndef CONFIG_DISABLE_PTHREAD
  /* Initialize the thread-specific data facility (if in link) */

#ifdef CONFIG_HAVE_WEAKFUNCTIONS
  if (pthread_initialize != NULL)
#endif
    {
      pthread_initialize();
    }
#endif

#if CONFIG_NFILE_DESCRIPTORS > 0
  /* Initialize the file system (needed to support device drivers) */

  fs_initialize();
#endif

#ifdef CONFIG_NET
  /* Initialize the networking system.  Network initialization is
   * performed in two steps:  (1) net_setup() initializes static
   * configuration of the network support.  This must be done prior
   * to registering network drivers by up_initialize().  This step
   * cannot require upon any hardware-depending features such as
   * timers or interrupts.
   */

  net_setup();
#endif

  /* The processor specific details of running the operating system
   * will be handled here.  Such things as setting up interrupt
   * service routines and starting the clock are some of the things
   * that are different for each  processor and hardware platform.
   */

  up_initialize();

#ifdef CONFIG_NET
  /* Complete initialization the networking system now that interrupts
   * and timers have been configured by up_initialize().
   */

  net_initialize();
#endif

#ifdef CONFIG_MM_SHM
  /* Initialize shared memory support */

  shm_initialize();
#endif

  /* Initialize the C libraries.  This is done last because the libraries
   * may depend on the above.
   */

  lib_initialize();

  /* IDLE Group Initialization **********************************************/
#ifdef HAVE_TASK_GROUP
  /* Allocate the IDLE group */

  DEBUGVERIFY(group_allocate(&g_idletcb, g_idletcb.cmn.flags));
#endif

#if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0
  /* Create stdout, stderr, stdin on the IDLE task.  These will be
   * inherited by all of the threads created by the IDLE task.
   */

  DEBUGVERIFY(group_setupidlefiles(&g_idletcb));
#endif

#ifdef HAVE_TASK_GROUP
  /* Complete initialization of the IDLE group.  Suppress retention
   * of child status in the IDLE group.
   */

  DEBUGVERIFY(group_initialize(&g_idletcb));
  g_idletcb.cmn.group->tg_flags = GROUP_FLAG_NOCLDWAIT;
#endif

  /* Bring Up the System ****************************************************/
  /* Create initial tasks and bring-up the system */

  DEBUGVERIFY(os_bringup());

  /* The IDLE Loop **********************************************************/
  /* When control is return to this point, the system is idle. */

  sdbg("Beginning Idle Loop\n");
  for (;;)
    {
      /* Perform garbage collection (if it is not being done by the worker
       * thread).  This cleans-up memory de-allocations that were queued
       * because they could not be freed in that execution context (for
       * example, if the memory was freed from an interrupt handler).
       */

#ifndef CONFIG_SCHED_WORKQUEUE
      /* We must have exclusive access to the memory manager to do this
       * BUT the idle task cannot wait on a semaphore.  So we only do
       * the cleanup now if we can get the semaphore -- this should be
       * possible because if the IDLE thread is running, no other task is!
       *
       * WARNING: This logic could have undesirable side-effects if priority
       * inheritance is enabled.  Imaginee the possible issues if the
       * priority of the IDLE thread were to get boosted!  Moral: If you
       * use priority inheritance, then you should also enable the work
       * queue so that is done in a safer context.
       */

      if (kmm_trysemaphore() == 0)
        {
          sched_garbagecollection();
          kmm_givesemaphore();
        }
#endif

      /* Perform any processor-specific idle state operations */

      up_idle();
    }
}
Ejemplo n.º 5
0
static int work_lpthread(int argc, char *argv[])
{
#if CONFIG_SCHED_LPNTHREADS > 0
  int wndx;
  pid_t me = getpid();
  int i;

  /* Find out thread index by search the workers in g_lpwork */

  for (wndx = 0, i = 0; i < CONFIG_SCHED_LPNTHREADS; i++)
    {
      if (g_lpwork.worker[i].pid == me)
        {
          wndx = i;
          break;
        }
    }

  DEBUGASSERT(i < CONFIG_SCHED_LPNTHREADS);
#endif

  /* Loop forever */

  for (; ; )
    {
#if CONFIG_SCHED_LPNTHREADS > 0
      /* Thread 0 is special.  Only thread 0 performs period garbage collection */

      if (wndx > 0)
        {
          /* The other threads will perform work, waiting indefinitely until
           * signalled for the next work availability.
           *
           * The special value of zero for the poll period instructs work_process
           * to wait indefinitely until a signal is received.
           */

          work_process((FAR struct kwork_wqueue_s *)&g_lpwork, 0, wndx);
        }
      else
#endif
        {
          /* Perform garbage collection.  This cleans-up memory de-allocations
           * that were queued because they could not be freed in that execution
           * context (for example, if the memory was freed from an interrupt handler).
           * NOTE: If the work thread is disabled, this clean-up is performed by
           * the IDLE thread (at a very, very low priority).
           *
           * In the event of multiple low priority threads, on index == 0 will do
           * the garbage collection.
           */

          sched_garbagecollection();

          /* Then process queued work.  work_process will not return until:
           * (1) there is no further work in the work queue, and (2) the polling
           * period provided by g_lpwork.delay expires.
           */

          work_process((FAR struct kwork_wqueue_s *)&g_lpwork, g_lpwork.delay, 0);
        }
    }

  return OK; /* To keep some compilers happy */
}