Exemplo n.º 1
0
int sig_dispatch(pid_t pid, FAR siginfo_t *info)
{
#ifdef HAVE_GROUP_MEMBERS

  FAR struct tcb_s *stcb;
  FAR struct task_group_s *group;

  /* Get the TCB associated with the pid */

  stcb = sched_gettcb(pid);
  if (stcb)
    {
      /* The task/thread associated with this PID is still active.  Get its
       * task group.
       */

      group = stcb->group;
    }
  else
    {
      /* The task/thread associated with this PID has exited.  In the normal
       * usage model, the PID should correspond to the PID of the task that
       * created the task group.  Try looking it up.
       */

      group = group_findbypid(pid);
    }

  /* Did we locate the group? */

  if (group)
    {
      /* Yes.. call group_signal() to send the signal to the correct group
       * member.
       */

      return group_signal(group, info);
    }
  else
    {
      return -ESRCH;
    }

#else

  FAR struct tcb_s *stcb;

  /* Get the TCB associated with the pid */

  stcb = sched_gettcb(pid);
  if (!stcb)
    {
      return -ESRCH;
    }

  return sig_tcbdispatch(stcb, info);

#endif
}
Exemplo n.º 2
0
static void sem_timeout(int argc, uint32_t pid)
{
  FAR struct tcb_s *wtcb;
  irqstate_t flags;

  /* Disable interrupts to avoid race conditions */

  flags = irqsave();

  /* Get the TCB associated with this pid.  It is possible that
   * task may no longer be active when this watchdog goes off.
   */

  wtcb = sched_gettcb((pid_t)pid);

  /* It is also possible that an interrupt/context switch beat us to the
   * punch and already changed the task's state.
   */

  if (wtcb && wtcb->task_state == TSTATE_WAIT_SEM)
    {
      /* Cancel the semaphore wait */
 
      sem_waitirq(wtcb, ETIMEDOUT);
    }

  /* Interrupts may now be enabled. */

  irqrestore(flags);
}
Exemplo n.º 3
0
static void mq_rcvtimeout(int argc, uint32_t pid)
{
  FAR struct tcb_s *wtcb;
  irqstate_t saved_state;

  /* Disable interrupts.  This is necessary because an interrupt handler may
   * attempt to send a message while we are doing this.
   */

  saved_state = irqsave();

  /* Get the TCB associated with this pid.  It is possible that task may no
   * longer be active when this watchdog goes off.
   */

  wtcb = sched_gettcb((pid_t)pid);

  /* It is also possible that an interrupt/context switch beat us to the
   * punch and already changed the task's state.
   */

  if (wtcb && wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY)
    {
      /* Restart with task with a timeout error */

      mq_waitirq(wtcb, ETIMEDOUT);
    }

  /* Interrupts may now be re-enabled. */

  irqrestore(saved_state);
}
Exemplo n.º 4
0
int sched_getscheduler(pid_t pid)
{
  struct tcb_s *tcb;

  /* Verify that the pid corresponds to a real task */

  if (!pid)
    {
      tcb = (struct tcb_s*)g_readytorun.head;
    }
  else
    {
      tcb = sched_gettcb(pid);
    }

  if (!tcb)
    {
      set_errno(ESRCH);
      return ERROR;
    }
#if CONFIG_RR_INTERVAL > 0
  else if ((tcb->flags & TCB_FLAG_ROUND_ROBIN) != 0)
    {
      return SCHED_RR;
    }
#endif
  else
    {
      return SCHED_FIFO;
    }
}
Exemplo n.º 5
0
void cpuload_initialize_once()
{
	system_load.start_time = hrt_absolute_time();
	int i;

	for (i = 0; i < CONFIG_MAX_TASKS; i++) {
		system_load.tasks[i].valid = false;
	}

	int static_tasks_count = 2;	// there are at least 2 threads that should be initialized statically - "idle" and "init"

#ifdef CONFIG_PAGING
	static_tasks_count++;	// include paging thread in initialization
#endif /* CONFIG_PAGING */
#if CONFIG_SCHED_WORKQUEUE
	static_tasks_count++;	// include high priority work0 thread in initialization
#endif /* CONFIG_SCHED_WORKQUEUE */
#if CONFIG_SCHED_LPWORK
	static_tasks_count++;	// include low priority work1 thread in initialization
#endif /* CONFIG_SCHED_WORKQUEUE */

	// perform static initialization of "system" threads
	for (system_load.total_count = 0; system_load.total_count < static_tasks_count; system_load.total_count++) {
		system_load.tasks[system_load.total_count].total_runtime = 0;
		system_load.tasks[system_load.total_count].curr_start_time = 0;
		system_load.tasks[system_load.total_count].tcb = sched_gettcb(
					system_load.total_count);	// it is assumed that these static threads have consecutive PIDs
		system_load.tasks[system_load.total_count].valid = true;
	}
}
Exemplo n.º 6
0
int sched_getscheduler(pid_t pid)
{
  FAR struct tcb_s *tcb;
  int policy;

  /* Verify that the PID corresponds to a real task */

  if (!pid)
    {
      tcb = (struct tcb_s*)g_readytorun.head;
    }
  else
    {
      tcb = sched_gettcb(pid);
    }

  if (!tcb)
    {
      set_errno(ESRCH);
      return ERROR;
    }

  /* Return the scheduling policy from the TCB.  NOTE that the user-
   * interpretable values are 1 based; the TCB values are zero-based.
   */

  policy = (tcb->flags & TCB_FLAG_POLICY_MASK) >> TCB_FLAG_POLICY_SHIFT;
  return policy + 1;
}
Exemplo n.º 7
0
static void pthread_condtimedout(int argc, uint32_t pid, uint32_t signo)
{
#ifdef HAVE_GROUP_MEMBERS

  FAR struct tcb_s *tcb;
  siginfo_t info;

  /* The logic below if equivalent to sigqueue(), but uses sig_tcbdispatch()
   * instead of sig_dispatch().  This avoids the group signal deliver logic
   * and assures, instead, that the signal is delivered specifically to this
   * thread that is known to be waiting on the signal.
   */

  /* Get the waiting TCB.  sched_gettcb() might return NULL if the task has
   * exited for some reason.
   */

  tcb = sched_gettcb((pid_t)pid);
  if (tcb)
    {
      /* Create the siginfo structure */

      info.si_signo           = signo;
      info.si_code            = SI_QUEUE;
      info.si_errno           = ETIMEDOUT;
      info.si_value.sival_ptr = NULL;
#ifdef CONFIG_SCHED_HAVE_PARENT
      info.si_pid             = (pid_t)pid;
      info.si_status          = OK;
#endif

      /* Process the receipt of the signal.  The scheduler is not locked as
       * is normally the case when this function is called because we are in
       * a watchdog timer interrupt handler.
       */

      (void)sig_tcbdispatch(tcb, &info);
    }

#else /* HAVE_GROUP_MEMBERS */

  /* Things are a little easier if there are not group members.  We can just
   * use sigqueue().
   */

#ifdef CONFIG_CAN_PASS_STRUCTS
  union sigval value;

  /* Send the specified signal to the specified task. */

  value.sival_ptr = NULL;
  (void)sigqueue((int)pid, (int)signo, value);
#else
  (void)sigqueue((int)pid, (int)signo, NULL);
#endif

#endif /* HAVE_GROUP_MEMBERS */
}
Exemplo n.º 8
0
void pg_miss(void)
{
  FAR struct tcb_s *ftcb = (FAR struct tcb_s*)g_readytorun.head;
  FAR struct tcb_s *wtcb;

  /* Sanity checking
   *
   * ASSERT if the currently executing task is the page fill worker thread.
   * The page fill worker thread is how the page fault is resolved and
   * all logic associated with the page fill worker must be "locked" and
   * always present in memory.
   */

  pglldbg("Blocking TCB: %p PID: %d\n", ftcb, ftcb->pid);
  DEBUGASSERT(g_pgworker != ftcb->pid);

  /* Block the currently executing task
   * - Call up_block_task() to block the task at the head of the ready-
   *   to-run list.  This should cause an interrupt level context switch
   *   to the next highest priority task.
   * - The blocked task will be marked with state TSTATE_WAIT_PAGEFILL
   *   and will be retained in the g_waitingforfill prioritized task list.
   */

  up_block_task(ftcb, TSTATE_WAIT_PAGEFILL);

  /* Boost the page fill worker thread priority.
   * - Check the priority of the task at the head of the g_waitingforfill
   *   list.  If the priority of that task is higher than the current
   *   priority of the page fill worker thread, then boost the priority
   *   of the page fill worker thread to that priority.
   */

  wtcb = sched_gettcb(g_pgworker);
  DEBUGASSERT(wtcb != NULL);

  if (wtcb->sched_priority < ftcb->sched_priority)
    {
      /* Reprioritize the page fill worker thread */

      pgllvdbg("New worker priority. %d->%d\n",
               wtcb->sched_priority, ftcb->sched_priority);
      sched_setpriority(wtcb, ftcb->sched_priority);
    }

  /* Signal the page fill worker thread.
   * - Is there a page fill pending?  If not then signal the worker
   *   thread to start working on the queued page fill requests.
   */

  if (!g_pftcb)
    {
      pglldbg("Signaling worker. PID: %d\n", g_pgworker);
      kill(g_pgworker, SIGWORK);
    }
}
Exemplo n.º 9
0
static unsigned long prv_fetch_taskaddr(int pid)
{
	struct tcb_s *tcbptr = sched_gettcb(pid);
	if (tcbptr != NULL) {
		entry_t e = tcbptr->entry;
		if ((tcbptr->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) {
			return (unsigned long)e.pthread;
		} else {
			return (unsigned long)e.main;
		}
	}
	return 0;
}
Exemplo n.º 10
0
int sched_rr_get_interval(pid_t pid, struct timespec *interval)
{
#if CONFIG_RR_INTERVAL > 0
  FAR struct tcb_s *rrtcb;

  /* If pid is zero, the timeslice for the calling process is written
   * into 'interval.'
   */

  if (!pid)
    {
      rrtcb = (FAR struct tcb_s*)g_readytorun.head;
    }

  /* Return a special error code on invalid PID */

  else if (pid < 0)
    {
      set_errno(EINVAL);
      return ERROR;
    }

  /* Otherwise, lookup the TCB associated with this PID */

  else
    {
      rrtcb = sched_gettcb(pid);
      if (!rrtcb)
        {
          set_errno(ESRCH);
          return ERROR;
        }
    }

  if (!interval)
    {
      set_errno(EFAULT);
      return ERROR;
    }

  /* Convert the timeslice value from ticks to timespec */

  interval->tv_sec  =  CONFIG_RR_INTERVAL / MSEC_PER_SEC;
  interval->tv_nsec = (CONFIG_RR_INTERVAL % MSEC_PER_SEC) * NSEC_PER_MSEC;

  return OK;
#else
  set_errno(ENOSYS);
  return ERROR;
#endif
}
Exemplo n.º 11
0
static void pg_callback(FAR struct tcb_s *tcb, int result)
{
  /* Verify that g_pftcb is non-NULL */

  pgllvdbg("g_pftcb: %p\n", g_pftcb);
  if (g_pftcb)
    {
      FAR struct tcb_s *htcb = (FAR struct tcb_s *)g_waitingforfill.head;
      FAR struct tcb_s *wtcb = sched_gettcb(g_pgworker);

      /* Find the higher priority between the task waiting for the fill to
       * complete in g_pftcb and the task waiting at the head of the
       * g_waitingforfill list.  That will be the priority of he highest
       * priority task waiting for a fill.
       */

      int priority = g_pftcb->sched_priority;
      if (htcb && priority < htcb->sched_priority)
        {
          priority = htcb->sched_priority;
        }

      /* If this higher priority is higher than current page fill worker
       * thread, then boost worker thread's priority to that level. Thus,
       * the page fill worker thread will always run at the priority of
       * the highest priority task that is waiting for a fill.
       */

      if (priority > wtcb->sched_priority)
        {
          pgllvdbg("New worker priority. %d->%d\n",
                   wtcb->sched_priority, priority);
          sched_setpriority(wtcb, priority);
        }

      /* Save the page fill result (don't permit the value -EBUSY) */

      if (result == -EBUSY)
        {
          result = -ENOSYS;
        }

      g_fillresult = result;
    }

  /* Signal the page fill worker thread (in any event) */

  pglldbg("Signaling worker. PID: %d\n", g_pgworker);
  kill(g_pgworker, SIGWORK);
}
Exemplo n.º 12
0
static int taskmgr_task_init(pid_t pid)
{
	struct tcb_s *tcb;
	struct sigaction act;

	tcb = sched_gettcb(pid);
	if (!tcb) {
		tmdbg("[TM] tcb is invalid. pid = %d.\n", pid);
		return ERROR;
	}

	memset(&act, '\0', sizeof(act));
	act.sa_handler = (_sa_handler_t)&taskmgr_pause_handler;

	return sig_sethandler(tcb, SIGTM_PAUSE, &act);
}
Exemplo n.º 13
0
int sched_getparam (pid_t pid, struct sched_param * param)
{
  FAR _TCB *rtcb;
  FAR _TCB *tcb;
  int ret = OK;

  if (!param)
    {
      return ERROR;
    }

  /* Check if the task to restart is the calling task */

  rtcb = (FAR _TCB*)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
       /* Return the priority if the calling task. */

       param->sched_priority = (int)rtcb->sched_priority;
    }

  /* Ths pid is not for the calling task, we will have to look it up */

  else
    {
      /* Get the TCB associated with this pid */

      sched_lock();
      tcb = sched_gettcb(pid);
      if (!tcb)
        {
          /* This pid does not correspond to any known task */

          ret = ERROR;
        }
      else
        {
          /* Return the priority of the task */

          param->sched_priority = (int)tcb->sched_priority;
        }
      sched_unlock();
    }

  return ret;
}
Exemplo n.º 14
0
static void *setschedprio_test_thread(void *param)
{
	volatile struct tcb_s *set_tcb;

	/*if this thread's priority is changed, we can terminate the loop */
	while (1) {
		set_tcb = sched_gettcb((pid_t)pthread_self());
		if (set_tcb != NULL && set_tcb->sched_priority == 101) {
			break;
		}
		sleep(1);
	}

	check_prio = set_tcb->sched_priority;
	pthread_exit(0);
	return NULL;
}
Exemplo n.º 15
0
static inline void task_signalparent(FAR struct tcb_s *ctcb, int status)
{
#ifdef HAVE_GROUP_MEMBERS
  DEBUGASSERT(ctcb && ctcb->group);

  /* Keep things stationary throughout the following */

  sched_lock();

  /* Send SIGCHLD to all members of the parent's task group */

  task_sigchild(ctcb->group->tg_pgid, ctcb, status);
  sched_unlock();
#else
  FAR struct tcb_s *ptcb;

  /* Keep things stationary throughout the following */

  sched_lock();

  /* Get the TCB of the receiving, parent task.  We do this early to
   * handle multiple calls to task_signalparent.  ctcb->ppid is set to an
   * invalid value below and the following call will fail if we are
   * called again.
   */

  ptcb = sched_gettcb(ctcb->ppid);
  if (!ptcb)
    {
      /* The parent no longer exists... bail */

      sched_unlock();
      return;
    }

  /* Send SIGCHLD to all members of the parent's task group */

  task_sigchild(ptcb, ctcb, status);

  /* Forget who our parent was */

  ctcb->ppid = INVALID_PROCESS_ID;
  sched_unlock();
#endif
}
Exemplo n.º 16
0
int sig_mqnotempty (int pid, int signo, void *sival_ptr)
#endif
{
  FAR _TCB *stcb;
  siginfo_t info;
  int       ret = ERROR;

  sched_lock();

  /* Get the TCB of the receiving task */

  stcb = sched_gettcb(pid);

#ifdef CONFIG_CAN_PASS_STRUCTS
  sdbg("TCB=%p signo=%d value=%d\n", stcb, signo, value.sival_int);
#else
  sdbg("TCB=%p signo=%d sival_ptr=%p\n", stcb, signo, sival_ptr);
#endif

  /* Create the siginfo structure */

  info.si_signo           = signo;
  info.si_code            = SI_MESGQ;
#ifdef CONFIG_CAN_PASS_STRUCTS
  info.si_value           = value;
#else
  info.si_value.sival_ptr = sival_ptr;
#endif

  /* Verify that we can perform the signalling operation */

  if ((stcb) && (GOOD_SIGNO(signo)))
   {
     /* Process the receipt of the signal */
     ret = sig_received(stcb, &info);
   }

  sched_unlock();
  return ret;
}
Exemplo n.º 17
0
int waitid(idtype_t idtype, id_t id, siginfo_t *info, int options)
{
  FAR _TCB *rtcb = (FAR _TCB *)g_readytorun.head;
  sigset_t sigset;
  int err;
  int ret;

  /* MISSING LOGIC:   If WNOHANG is provided in the options, then this function
   * should returned immediately.  However, there is no mechanism available now
   * know if the thread has child:  The children remember their parents (if
   * CONFIG_SCHED_HAVE_PARENT) but the parents do not remember their children.
   */

  /* None of the options are supported except for WEXITED (which must be
   * provided.  Currently SIGCHILD always reports CLD_EXITED so we cannot
   * distinguish any other events.
   */

#ifdef CONFIG_DEBUG
  if (options != WEXITED)
    {
      set_errno(ENOSYS);
      return ERROR;
    }
#endif

  /* Create a signal set that contains only SIGCHLD */

  (void)sigemptyset(&sigset);
  (void)sigaddset(&sigset, SIGCHLD);

  /* Disable pre-emption so that nothing changes while the loop executes */

  sched_lock();

  /* Verify that this task actually has children and that the the requeste
   * TCB is actually a child of this task.
   */

  if (rtcb->nchildren == 0)
    {
      err = ECHILD;
      goto errout_with_errno;
    }
  else if (idtype == P_PID)
    {
     /* Get the TCB corresponding to this PID and make sure it is our child. */

      FAR _TCB *ctcb = sched_gettcb((pid_t)id);
      if (!ctcb || ctcb->parent != rtcb->pid)
        {
          err = ECHILD;
          goto errout_with_errno;
        }
    }

  /* Loop until the child that we are waiting for dies */

  for (;;)
    {
      /* Check if the task has already died. Signals are not queued in
       * NuttX.  So a possibility is that the child has died and we
       * missed the death of child signal (we got some other signal
       * instead).
       */

      if (rtcb->nchildren == 0 ||
          (idtype == P_PID && (ret = kill((pid_t)id, 0)) < 0))
        {
          /* We know that the child task was running okay we stared,
           * so we must have lost the signal.  What can we do?
           * Let's claim we were interrupted by a signal.
           */

          err = EINTR;
          goto errout_with_errno;
        }

      /* Wait for any death-of-child signal */

      ret = sigwaitinfo(&sigset, info);
      if (ret < 0)
        {
          goto errout;
        }

      /* Make there this was SIGCHLD */

      if (info->si_signo == SIGCHLD)
        {
          /* Yes.. Are we waiting for the death of a specific child? */

          if (idtype == P_PID)
            {
              /* Was this the death of the thread we were waiting for? */

              if (info->si_pid == (pid_t)id)
                {
                   /* Yes... return success */

                   break;
                }
            }

          /* Are we waiting for any child to change state? */

          else if (idtype == P_ALL)
            {
              /* Return success */

              break;
            }

          /* Other ID types are not supported */

          else /* if (idtype == P_PGID) */
            {
              set_errno(ENOSYS);
              goto errout;
            }
        }
    }

  sched_unlock();
  return OK;

errout_with_errno:
  set_errno(err);
errout:
  sched_unlock();
  return ERROR;
}
Exemplo n.º 18
0
int task_restart(pid_t pid)
{
  FAR _TCB  *rtcb;
  FAR _TCB  *tcb;
  int        status;
  irqstate_t state;

  /* Make sure this task does not become ready-to-run while
   * we are futzing with its TCB
   */

  sched_lock();

  /* Check if the task to restart is the calling task */

  rtcb = (FAR _TCB*)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      return ERROR;
    }

  /* We are restarting some other task than ourselves */

  else
    {
      /* Find for the TCB associated with matching pid  */

      tcb = sched_gettcb(pid);
      if (!tcb)
        {
          /* There is no TCB with this pid */

          return ERROR;
        }

      /* Remove the TCB from whatever list it is in.  At this point, the
       * TCB should no longer be accessible to the system 
       */

      state = irqsave();
      dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->task_state].list);
      tcb->task_state = TSTATE_TASK_INVALID;
      irqrestore(state);

      /* Deallocate anything left in the TCB's queues */

      sig_cleanup(tcb); /* Deallocate Signal lists */

      /* Reset the current task priority  */

      tcb->sched_priority = tcb->init_priority;

      /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
      tcb->base_priority  = tcb->init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
      tcb->npend_reprio   = 0;
#  endif
#endif

      /* Re-initialize the processor-specific portion of the TCB
       * This will reset the entry point and the start-up parameters
       */

      up_initial_state(tcb);

      /* Add the task to the inactive task list */

      dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
      tcb->task_state = TSTATE_TASK_INACTIVE;

      /* Activate the task */

      status = task_activate(tcb);
      if (status != OK)
        {
          dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
          sched_releasetcb(tcb);
          return ERROR;
        }
    }

  sched_unlock();
  return OK;
}
Exemplo n.º 19
0
int prctl(int option, ...)
{
  va_list ap;
  int err;

  va_start(ap, option);
  switch (option)
    {
    case PR_SET_NAME:
    case PR_GET_NAME:
#if CONFIG_TASK_NAME_SIZE > 0
      {
        /* Get the prctl arguments */

        char     *name = va_arg(ap, char *);
        int       pid  = va_arg(ap, int);
        FAR _TCB *tcb;

        /* Get the TCB associated with the PID (handling the special case of
         * pid==0 meaning "this thread")
         */

        if (!pid)
          {
            tcb = (FAR _TCB *)g_readytorun.head;
          }
        else
          {
            tcb = sched_gettcb(pid);
          }

        /* An invalid pid will be indicated by a NULL TCB returned from
         * sched_gettcb()
         */

        if (!tcb)
          {
            sdbg("Pid does not correspond to a task: %d\n", pid);
            err = ESRCH;
            goto errout;
          }

        /* A pointer to the task name storage must also be provided */

        if (!name)
          {
            sdbg("No name provide\n");
            err = EFAULT;
            goto errout;
          }

        /* Now get or set the task name */

        if (option == PR_SET_NAME)
          {
            /* tcb->name may not be null-terminated */

            strncpy(tcb->name, name, CONFIG_TASK_NAME_SIZE);
          }
        else
          {
            /* The returned value will be null-terminated, truncating if necessary */

            strncpy(name, tcb->name, CONFIG_TASK_NAME_SIZE-1);
            name[CONFIG_TASK_NAME_SIZE-1] = '\0';
          }
      }
      break;
#else
      sdbg("Option not enabled: %d\n", option);
      err = ENOSYS;
      goto errout;
#endif

    default:
      sdbg("Unrecognized option: %d\n", option);
      err = EINVAL;
      goto errout;
    }

  va_end(ap);
  return OK;

errout:
  va_end(ap);
  set_errno(err);
  return ERROR;
}
Exemplo n.º 20
0
int task_reparent(pid_t ppid, pid_t chpid)
{
#ifdef CONFIG_SCHED_CHILD_STATUS
  FAR struct child_status_s *child;
#endif
  struct tcb_s *ptcb;
  struct tcb_s *chtcb;
  struct tcb_s *otcb;
  pid_t opid;
  irqstate_t flags;
  int ret;

  /* Disable interrupts so that nothing can change in the relatinoship of
   * the three task:  Child, current parent, and new parent.
   */

  flags = irqsave();

  /* Get the child tasks TCB (chtcb) */

  chtcb = sched_gettcb(chpid);
  if (!chtcb)
    {
      ret = -ECHILD;
      goto errout_with_ints;
    }

  /* Get the PID of the child task's parent (opid) */

  opid = chtcb->ppid;

  /* Get the TCB of the child task's parent (otcb) */

  otcb = sched_gettcb(opid);
  if (!otcb)
    {
      ret = -ESRCH;
      goto errout_with_ints;
    }

  /* If new parent task's PID (ppid) is zero, then new parent is the
   * grandparent will be the new parent, i.e., the parent of the current
   * parent task.
   */

  if (ppid == 0)
    {
      ppid = otcb->ppid;
    }

  /* Get the new parent task's TCB (ptcb) */

  ptcb = sched_gettcb(ppid);
  if (!ptcb)
    {
      ret = -ESRCH;
      goto errout_with_ints;
    }

  /* Then reparent the child */

  chtcb->ppid = ppid;  /* The task specified by ppid is the new parent */

#ifdef CONFIG_SCHED_CHILD_STATUS
  /* Remove the child status entry from old parent TCB */

  child = group_removechild(otcb->group, chpid);
  if (child)
    {
      /* Has the new parent's task group supressed child exit status? */

      if ((ptcb->group->tg_flags && GROUP_FLAG_NOCLDWAIT) == 0)
        {
          /* No.. Add the child status entry to the new parent's task group */

          group_addchild(ptcb->group, child);
        }
      else
        {
          /* Yes.. Discard the child status entry */

          group_freechild(child);
        }

      /* Either case is a success */

      ret = OK;
    }
  else
    {
      /* This would not be an error if the original parent's task group has
       * suppressed child exit status.
       */

      ret = ((otcb->group->tg_flags && GROUP_FLAG_NOCLDWAIT) == 0) ? -ENOENT : OK;
    }

#else /* CONFIG_SCHED_CHILD_STATUS */

  DEBUGASSERT(otcb->nchildren > 0);

  otcb->nchildren--;     /* The orignal parent now has one few children */
  ptcb->nchildren++;     /* The new parent has one additional child */
  ret = OK;

#endif /* CONFIG_SCHED_CHILD_STATUS */

errout_with_ints:
  irqrestore(flags);
  return ret;
}
Exemplo n.º 21
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  FAR dq_queue_t *tasklist;
  irqstate_t state;
  int status;

  /* Make sure this task does not become ready-to-run while
   * we are futzing with its TCB
   */

  sched_lock();

  /* Check if the task to restart is the calling task */

  rtcb = this_task();
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      set_errno(ENOSYS);
      return ERROR;
    }

#ifdef CONFIG_SMP
  /* There is currently no capability to restart a task that is actively
   * running on another CPU either.  This is not the calling cast so if it
   * is running, then it could only be running a a different CPU.
   *
   * Also, will need some interlocks to assure that no tasks are rescheduled
   * on any other CPU while we do this.
   */

#warning Missing SMP logic
  if (rtcb->task_state == TSTATE_TASK_RUNNING)
    {
      /* Not implemented */

      set_errno(ENOSYS);
      return ERROR;
    }
#endif

  /* We are restarting some other task than ourselves */
  /* Find for the TCB associated with matching pid  */

  tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
  if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
  if (!tcb)
#endif
    {
      /* There is no TCB with this pid or, if there is, it is not a task. */

      set_errno(ESRCH);
      return ERROR;
    }

  /* Try to recover from any bad states */

  task_recover((FAR struct tcb_s *)tcb);

  /* Kill any children of this thread */

#ifdef HAVE_GROUP_MEMBERS
  (void)group_killchildren(tcb);
#endif

  /* Remove the TCB from whatever list it is in.  After this point, the TCB
   * should no longer be accessible to the system
   */

#ifdef CONFIG_SMP
  tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu);
#else
  tasklist = TLIST_HEAD(tcb->cmn.task_state);
#endif

  state = irqsave();
  dq_rem((FAR dq_entry_t *)tcb, tasklist);
  tcb->cmn.task_state = TSTATE_TASK_INVALID;
  irqrestore(state);

  /* Deallocate anything left in the TCB's queues */

  sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

  /* Reset the current task priority  */

  tcb->cmn.sched_priority = tcb->init_priority;

  /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
  tcb->cmn.base_priority = tcb->init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
  tcb->cmn.npend_reprio = 0;
#  endif
#endif

  /* Re-initialize the processor-specific portion of the TCB.  This will
   * reset the entry point and the start-up parameters
   */

  up_initial_state((FAR struct tcb_s *)tcb);

  /* Add the task to the inactive task list */

  dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks);
  tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

  /* Activate the task */

  status = task_activate((FAR struct tcb_s *)tcb);
  if (status != OK)
    {
      (void)task_delete(pid);
      set_errno(-status);
      return ERROR;
    }

  sched_unlock();
  return OK;
}
Exemplo n.º 22
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  FAR dq_queue_t *tasklist;
  irqstate_t flags;
  int errcode;
#ifdef CONFIG_SMP
  int cpu;
#endif
  int ret;

  /* Check if the task to restart is the calling task */

  rtcb = this_task();
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      errcode = ENOSYS;
      goto errout;
    }

  /* We are restarting some other task than ourselves.  Make sure that the
   * task does not change its state while we are executing.  In the single
   * CPU state this could be done by disabling pre-emption.  But we will
   * a little stronger medicine on the SMP case:  The task make be running
   * on another CPU.
   */

  flags = enter_critical_section();

  /* Find for the TCB associated with matching pid  */

  tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
  if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
  if (!tcb)
#endif
    {
      /* There is no TCB with this pid or, if there is, it is not a task. */

      errcode = ESRCH;
      goto errout_with_lock;
    }

#ifdef CONFIG_SMP
  /* If the task is running on another CPU, then pause that CPU.  We can
   * then manipulate the TCB of the restarted task and when we resume the
   * that CPU, the restart take effect.
   */

  cpu = sched_cpu_pause(&tcb->cmn);
#endif /* CONFIG_SMP */

  /* Try to recover from any bad states */

  task_recover((FAR struct tcb_s *)tcb);

  /* Kill any children of this thread */

#ifdef HAVE_GROUP_MEMBERS
  (void)group_killchildren(tcb);
#endif

  /* Remove the TCB from whatever list it is in.  After this point, the TCB
   * should no longer be accessible to the system
   */

#ifdef CONFIG_SMP
  tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu);
#else
  tasklist = TLIST_HEAD(tcb->cmn.task_state);
#endif

  dq_rem((FAR dq_entry_t *)tcb, tasklist);
  tcb->cmn.task_state = TSTATE_TASK_INVALID;

  /* Deallocate anything left in the TCB's queues */

  sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

  /* Reset the current task priority  */

  tcb->cmn.sched_priority = tcb->cmn.init_priority;

  /* The task should restart with pre-emption disabled and not in a critical
   * secton.
   */

  tcb->cmn.lockcount = 0;
#ifdef CONFIG_SMP
  tcb->cmn.irqcount  = 0;
#endif

  /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
  tcb->cmn.base_priority = tcb->cmn.init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
  tcb->cmn.npend_reprio = 0;
#  endif
#endif

  /* Re-initialize the processor-specific portion of the TCB.  This will
   * reset the entry point and the start-up parameters
   */

  up_initial_state((FAR struct tcb_s *)tcb);

  /* Add the task to the inactive task list */

  dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks);
  tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

#ifdef CONFIG_SMP
  /* Resume the paused CPU (if any) */

  if (cpu >= 0)
    {
      ret = up_cpu_resume(cpu);
      if (ret < 0)
        {
          errcode = -ret;
          goto errout_with_lock;
        }
    }
#endif /* CONFIG_SMP */

  leave_critical_section(flags);

  /* Activate the task. */

  ret = task_activate((FAR struct tcb_s *)tcb);
  if (ret != OK)
    {
      (void)task_terminate(pid, true);
      errcode = -ret;
      goto errout_with_lock;
    }

  return OK;

errout_with_lock:
  leave_critical_section(flags);
errout:
  set_errno(errcode);
  return ERROR;
}
Exemplo n.º 23
0
static int proc_open(FAR struct file *filep, FAR const char *relpath,
                     int oflags, mode_t mode)
{
  FAR struct proc_file_s *procfile;
  FAR const struct proc_node_s *node;
  FAR struct tcb_s *tcb;
  FAR char *ptr;
  irqstate_t flags;
  unsigned long tmp;
  pid_t pid;

  fvdbg("Open '%s'\n", relpath);

  /* PROCFS is read-only.  Any attempt to open with any kind of write
   * access is not permitted.
   *
   * REVISIT:  Write-able proc files could be quite useful.
   */

  if ((oflags & O_WRONLY) != 0 || (oflags & O_RDONLY) == 0)
    {
      fdbg("ERROR: Only O_RDONLY supported\n");
      return -EACCES;
    }

  /* The first segment of the relative path should be a task/thread ID */

  ptr = NULL;
  tmp = strtoul(relpath, &ptr, 10);

  if (!ptr || *ptr != '/')
    {
      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
    }

  /* Skip over the slash */

  ptr++;

  /* A valid PID would be in the range of 0-32767 (0 is reserved for the
   * IDLE thread).
   */

  if (tmp >= 32768)
    {
      fdbg("ERROR: Invalid PID %ld\n", tmp);
      return -ENOENT;
    }

  /* Now verify that a task with this task/thread ID exists */

  pid = (pid_t)tmp;

  flags = irqsave();
  tcb = sched_gettcb(pid);
  irqrestore(flags);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
      return -ENOENT;
    }

  /* The remaining segments of the relpath should be a well known node in
   * the task/thread tree.
   */

  node = proc_findnode(ptr);
  if (!node)
    {
      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
    }

  /* The node must be a file, not a directory */

  if (node->dtype != DTYPE_FILE)
    {
      fdbg("ERROR: Path \"%s\" is a directory\n", relpath);
      return -EISDIR;
    }

  /* Allocate a container to hold the task and node selection */

  procfile = (FAR struct proc_file_s *)kzalloc(sizeof(struct proc_file_s));
  if (!procfile)
    {
      fdbg("ERROR: Failed to allocate file container\n");
      return -ENOMEM;
    }

  /* Initialize the file container */

  procfile->pid  = pid;
  procfile->node = node;

  /* Save the index as the open-specific state in filep->f_priv */

  filep->f_priv = (FAR void *)procfile;
  return OK;
}
Exemplo n.º 24
0
static int proc_stat(const char *relpath, struct stat *buf)
{
  FAR const struct proc_node_s *node;
  FAR struct tcb_s *tcb;
  unsigned long tmp;
  FAR char *ptr;
  irqstate_t flags;
  pid_t pid;

  /* Two path forms are accepted:
   *
   * "<pid>" - If <pid> refers to a currently active task/thread, then it
   *   is a directory
   * "<pid>/<node>" - If <node> is a recognized node then, then it
   *   is a file or directory.
   */

  ptr = NULL;
  tmp = strtoul(relpath, &ptr, 10);

  if (!ptr)
    {
      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
   }

  /* A valid PID would be in the range of 0-32767 (0 is reserved for the
   * IDLE thread).
   */

  if (tmp >= 32768)
    {
      fdbg("ERROR: Invalid PID %ld\n", tmp);
      return -ENOENT;
    }

  /* Now verify that a task with this task/thread ID exists */

  pid = (pid_t)tmp;

  flags = irqsave();
  tcb = sched_gettcb(pid);
  irqrestore(flags);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
      return -ENOENT;
    }

  /* Was the <pid> the final element of the path? */

  if (*ptr == '\0' || strcmp(ptr, "/") == 0)
    {
      /* Yes ... It's a read-only directory */

      buf->st_mode = S_IFDIR|S_IROTH|S_IRGRP|S_IRUSR;
    }

  /* Verify that the process ID is followed by valid path segment delimiter */

  else if (*ptr != '/')
    {
      /* We are required to return -ENOENT all all invalid paths */

      fdbg("ERROR: Bad delimiter '%c' in relpath '%s'\n", *ptr, relpath);
      return -ENOENT;
    }
  else
    {
      /* Otherwise, the second segment of the relpath should be a well
       * known node of the task/thread directory structure.
       */

      /* Skip over the path segment delimiter */

      ptr++;

      /* Lookup the well-known node associated with the relative path. */

      node = proc_findnode(ptr);
      if (!node)
        {
          fdbg("ERROR: Invalid path \"%s\"\n", relpath);
          return -ENOENT;
        }

      /* If the node exists, it is the name for a read-only file or
       * directory.
       */

      if (node->dtype == DTYPE_FILE)
        {
          buf->st_mode = S_IFREG|S_IROTH|S_IRGRP|S_IRUSR;
        }
      else
        {
          buf->st_mode = S_IFDIR|S_IROTH|S_IRGRP|S_IRUSR;
        }
    }

  /* File/directory size, access block size */

  buf->st_size    = 0;
  buf->st_blksize = 0;
  buf->st_blocks  = 0;
  return OK;
}
Exemplo n.º 25
0
int task_reparent(pid_t ppid, pid_t chpid)
{
#ifdef CONFIG_SCHED_CHILD_STATUS
  FAR struct child_status_s *child;
#endif
  FAR struct task_group_s *chgrp;
  FAR struct task_group_s *ogrp;
  FAR struct task_group_s *pgrp;
  struct tcb_s *tcb;
  gid_t ogid;
  gid_t pgid;
  irqstate_t flags;
  int ret;

  /* Disable interrupts so that nothing can change in the relatinoship of
   * the three task:  Child, current parent, and new parent.
   */

  flags = irqsave();

  /* Get the child tasks task group */

  tcb = sched_gettcb(chpid);
  if (!tcb)
    {
      ret = -ECHILD;
      goto errout_with_ints;
    }

  DEBUGASSERT(tcb->group);
  chgrp = tcb->group;

  /* Get the GID of the old parent task's task group (ogid) */

  ogid = chgrp->tg_pgid;

  /* Get the old parent task's task group (ogrp) */

  ogrp = group_findbygid(ogid);
  if (!ogrp)
    {
      ret = -ESRCH;
      goto errout_with_ints;
    }

  /* If new parent task's PID (ppid) is zero, then new parent is the
   * grandparent will be the new parent, i.e., the parent of the current
   * parent task.
   */

  if (ppid == 0)
    {
      /* Get the grandparent task's task group (pgrp) */

      pgid = ogrp->tg_pgid;
      pgrp = group_findbygid(pgid);
    }
  else
    {
      /* Get the new parent task's task group (pgrp) */

      tcb = sched_gettcb(ppid);
      if (!tcb)
        {
          ret = -ESRCH;
          goto errout_with_ints;
        }

      pgrp = tcb->group;
      pgid = pgrp->tg_gid;
    }

  if (!pgrp)
    {
      ret = -ESRCH;
      goto errout_with_ints;
    }

  /* Then reparent the child.  Notice that we don't actually change the
   * parent of the task. Rather, we change the parent task group for
   * all members of the child's task group.
   */

  chgrp->tg_pgid = pgid;

#ifdef CONFIG_SCHED_CHILD_STATUS
  /* Remove the child status entry from old parent task group */

  child = group_removechild(ogrp, chpid);
  if (child)
    {
      /* Has the new parent's task group supressed child exit status? */

      if ((pgrp->tg_flags && GROUP_FLAG_NOCLDWAIT) == 0)
        {
          /* No.. Add the child status entry to the new parent's task group */

          group_addchild(pgrp, child);
        }
      else
        {
          /* Yes.. Discard the child status entry */

          group_freechild(child);
        }

      /* Either case is a success */

      ret = OK;
    }
  else
    {
      /* This would not be an error if the original parent's task group has
       * suppressed child exit status.
       */

      ret = ((ogrp->tg_flags && GROUP_FLAG_NOCLDWAIT) == 0) ? -ENOENT : OK;
    }

#else /* CONFIG_SCHED_CHILD_STATUS */

  DEBUGASSERT(otcb->nchildren > 0);

  otcb->nchildren--;     /* The orignal parent now has one few children */
  ptcb->nchildren++;     /* The new parent has one additional child */
  ret = OK;

#endif /* CONFIG_SCHED_CHILD_STATUS */

errout_with_ints:
  irqrestore(flags);
  return ret;
}
Exemplo n.º 26
0
static ssize_t proc_read(FAR struct file *filep, FAR char *buffer,
                         size_t buflen)
{
  FAR struct proc_file_s *procfile;
  FAR struct tcb_s *tcb;
  irqstate_t flags;
  ssize_t ret;

  fvdbg("buffer=%p buflen=%d\n", buffer, (int)buflen);

  /* Recover our private data from the struct file instance */

  procfile = (FAR struct proc_file_s *)filep->f_priv;
  DEBUGASSERT(procfile);

  /* Verify that the thread is still valid */

  flags = irqsave();
  tcb = sched_gettcb(procfile->pid);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is not valid\n", (int)procfile->pid);
      irqrestore(flags);
      return -ENODEV;
    }

  /* Provide the requested data */

  switch (procfile->node->node)
    {
    case PROC_STATUS: /* Task/thread status */
      ret = proc_status(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

    case PROC_CMDLINE: /* Task command line */
      ret = proc_cmdline(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

#ifdef CONFIG_SCHED_CPULOAD
    case PROC_LOADAVG: /* Average CPU utilization */
      ret = proc_loadavg(procfile, tcb, buffer, buflen, filep->f_pos);
      break;
#endif
    case PROC_STACK: /* Task stack info */
      ret = proc_stack(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

    case PROC_GROUP_STATUS: /* Task group status */
      ret = proc_groupstatus(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

    case PROC_GROUP_FD: /* Group file descriptors */
      ret = proc_groupfd(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

     default:
      ret = -EINVAL;
      break;
    }

  irqrestore(flags);

  /* Update the file offset */

  if (ret > 0)
    {
      filep->f_pos += ret;
    }

  return ret;
}
Exemplo n.º 27
0
int sched_getparam (pid_t pid, FAR struct sched_param *param)
{
  FAR struct tcb_s *rtcb;
  FAR struct tcb_s *tcb;
  int ret = OK;

  if (!param)
    {
      return ERROR;
    }

  /* Check if the task to restart is the calling task */

  rtcb = (FAR struct tcb_s *)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Return the priority if the calling task. */

      param->sched_priority = (int)rtcb->sched_priority;
    }

  /* Ths pid is not for the calling task, we will have to look it up */

  else
    {
      /* Get the TCB associated with this pid */

      sched_lock();
      tcb = sched_gettcb(pid);
      if (!tcb)
        {
          /* This pid does not correspond to any known task */

          ret = ERROR;
        }
      else
        {
#ifdef CONFIG_SCHED_SPORADIC
#endif
          /* Return the priority of the task */

          param->sched_priority = (int)tcb->sched_priority;

#ifdef CONFIG_SCHED_SPORADIC
          if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
            {
              FAR struct sporadic_s *sporadic = tcb->sporadic;
              DEBUGASSERT(sporadic != NULL);

              /* Return parameters associated with SCHED_SPORADIC */

              param->sched_ss_low_priority = (int)sporadic->low_priority;
              param->sched_ss_max_repl     = (int)sporadic->max_repl;

              clock_ticks2time((int)sporadic->repl_period, &param->sched_ss_repl_period);
              clock_ticks2time((int)sporadic->budget, &param->sched_ss_init_budget);
            }
          else
            {
              param->sched_ss_low_priority        = 0;
              param->sched_ss_max_repl            = 0;
              param->sched_ss_repl_period.tv_sec  = 0;
              param->sched_ss_repl_period.tv_nsec = 0;
              param->sched_ss_init_budget.tv_sec  = 0;
              param->sched_ss_init_budget.tv_nsec = 0;
            }
#endif
        }

      sched_unlock();
    }

  return ret;
}
Exemplo n.º 28
0
static int proc_opendir(FAR const char *relpath, FAR struct fs_dirent_s *dir)
{
  FAR struct proc_dir_s *procdir;
  FAR const struct proc_node_s *node;
  FAR struct tcb_s *tcb;
  irqstate_t flags;
  unsigned long tmp;
  FAR char *ptr;
  pid_t pid;

  fvdbg("relpath: \"%s\"\n", relpath ? relpath : "NULL");
  DEBUGASSERT(relpath && dir && !dir->u.procfs);

  /* The relative must be either:
   *
   *  (1) "<pid>" - The sub-directory of task/thread attributes, or
   *  (2) The name of a directory node under <pid>
   */

  /* Otherwise, the relative path should be a valid task/thread ID */

  ptr = NULL;
  tmp = strtoul(relpath, &ptr, 10);

  if (!ptr || (*ptr != '\0' && *ptr != '/'))
    {
      /* strtoul failed or there is something in the path after the pid */

      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
   }

  /* A valid PID would be in the range of 0-32767 (0 is reserved for the
   * IDLE thread).
   */

  if (tmp >= 32768)
    {
      fdbg("ERROR: Invalid PID %ld\n", tmp);
      return -ENOENT;
    }

  /* Now verify that a task with this task/thread ID exists */

  pid = (pid_t)tmp;

  flags = irqsave();
  tcb = sched_gettcb(pid);
  irqrestore(flags);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is not valid\n", (int)pid);
      return -ENOENT;
    }

  /* Allocate the directory structure.  Note that the index and procentry
   * pointer are implicitly nullified by kzalloc().  Only the remaining,
   * non-zero entries will need be initialized.
   */

  procdir = (FAR struct proc_dir_s *)kzalloc(sizeof(struct proc_dir_s));
  if (!procdir)
    {
      fdbg("ERROR: Failed to allocate the directory structure\n");
      return -ENOMEM;
    }

  /* Was the <pid> the final element of the path? */

  if (*ptr != '\0' && strcmp(ptr, "/") != 0)
    {
      /* There is something in the path after the pid.  Skip over the path
       * segment delimiter and see if we can identify the node of interest.
       */

      ptr++;
      node = proc_findnode(ptr);
      if (!node)
        {
          fdbg("ERROR: Invalid path \"%s\"\n", relpath);
          kfree(procdir);
          return -ENOENT;
        }

      /* The node must be a directory, not a file */

      if (node->dtype != DTYPE_DIRECTORY)
        {
          fdbg("ERROR: Path \"%s\" is not a directory\n", relpath);
          kfree(procdir);
          return -ENOTDIR;
        }

      /* This is a second level directory */

      procdir->base.level    = 2;
      procdir->base.nentries = PROC_NGROUPNODES;
      procdir->node          = node;
    }
  else
    {
      /* Use the special level0 node */

      procdir->base.level    = 1;
      procdir->base.nentries = PROC_NLEVEL0NODES;
      procdir->node          = &g_level0node;
    }

   procdir->pid  = pid;
   dir->u.procfs = (FAR void *)procdir;
   return OK;
}
Exemplo n.º 29
0
static int procfs_readdir(struct inode *mountpt, struct fs_dirent_s *dir)
{
  FAR struct procfs_dir_priv_s *priv;
  FAR struct procfs_level0_s *level0;
  FAR struct tcb_s *tcb;
  FAR const char *name = NULL;
  unsigned int index;
  irqstate_t flags;
  pid_t pid;
  int ret = -ENOENT;

  DEBUGASSERT(mountpt && dir && dir->u.procfs);
  priv = dir->u.procfs;

  /* Are we reading the 1st directory level with dynamic PID and static
   * entries?
   */

  if (priv->level == 0)
    {
      level0 = (FAR struct procfs_level0_s *)priv;

      /* Have we reached the end of the PID information */

      index = priv->index;
      if (index >= priv->nentries)
        {
          /* We must report the next static entry ... no more PID entries.
           * skip any entries with wildcards in the first segment of the
           * directory name.
           */

          while (index < priv->nentries + g_procfsentrycount)
            {
              name = g_procfsentries[index - priv->nentries].pathpattern;
              while (*name != '/' && *name != '\0')
                {
                  if (*name == '*' || *name == '[' || *name == '?')
                    {
                      /* Wildcard found.  Skip this entry */

                      index++;
                      name = NULL;
                      break;
                    }

                  name++;
                }

              /* Test if we skipped this entry */

              if (name != NULL)
              {
                /* This entry is okay to report. Test if it has a duplicate
                 * first level name as the one we just reported.  This could
                 * happen in the event of procfs_entry_s such as:
                 *
                 *    fs/smartfs
                 *    fs/nfs
                 *    fs/nxffs
                 */

                name = g_procfsentries[index - priv->nentries].pathpattern;
                if (!level0->lastlen || (strncmp(name, level0->lastread,
                      level0->lastlen) != 0))
                  {
                    /* Not a duplicate, return the first segment of this
                     * entry
                     */

                    break;
                  }
                else
                  {
                    /* Skip this entry ... duplicate 1st level name found */

                    index++;
                  }
              }
            }

          /* Test if we are at the end of the directory */

          if (index >= priv->nentries + g_procfsentrycount)
            {
              /* We signal the end of the directory by returning the special
               * error -ENOENT
               */

              fvdbg("Entry %d: End of directory\n", index);
              ret = -ENOENT;
            }
          else
            {
              /* Report the next static entry */

              level0->lastlen = strcspn(name, "/");
              level0->lastread = name;
              strncpy(dir->fd_dir.d_name, name, level0->lastlen);
              dir->fd_dir.d_name[level0->lastlen] = '\0';

              if (name[level0->lastlen] == '/')
                {
                  dir->fd_dir.d_type = DTYPE_DIRECTORY;
                }
              else
                {
                  dir->fd_dir.d_type = DTYPE_FILE;
                }

              /* Advance to next entry for the next read */

              priv->index = index;
              ret = OK;
            }
        }
#ifndef CONFIG_FS_PROCFS_EXCLUDE_PROCESS
      else
        {
          /* Verify that the pid still refers to an active task/thread */

          pid = level0->pid[index];

          flags = irqsave();
          tcb = sched_gettcb(pid);
          irqrestore(flags);

          if (!tcb)
            {
              fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
              return -ENOENT;
            }

          /* Save the filename=pid and file type=directory */

          dir->fd_dir.d_type = DTYPE_DIRECTORY;
          snprintf(dir->fd_dir.d_name, NAME_MAX+1, "%d", (int)pid);

          /* Set up the next directory entry offset.  NOTE that we could use the
           * standard f_pos instead of our own private index.
           */

          level0->base.index = index + 1;
          ret = OK;
        }
#endif /* CONFIG_FS_PROCFS_EXCLUDE_PROCESS */
    }

    /* Are we reading an intermediate subdirectory? */

  else if (priv->level > 0 && priv->procfsentry == NULL)
    {
      FAR struct procfs_level1_s *level1;

      level1 = (FAR struct procfs_level1_s *) priv;

      /* Test if this entry matches.  We assume all entries of the same
       * subdirectory are listed in order in the procfs_entry array.
       */

      if (strncmp(g_procfsentries[level1->base.index].pathpattern,
              g_procfsentries[level1->firstindex].pathpattern,
              level1->subdirlen) == 0)
        {
          /* This entry matches.  Report the subdir entry */

          name = &g_procfsentries[level1->base.index].pathpattern[
                    level1->subdirlen + 1];
          level1->lastlen = strcspn(name, "/");
          level1->lastread = name;
          strncpy(dir->fd_dir.d_name, name, level1->lastlen);

          /* Some of the search entries contain '**' wildcards.  When we
           * report the entry name, we must remove this wildcard search
           * specifier.
           */

          while (dir->fd_dir.d_name[level1->lastlen - 1] == '*')
            {
              level1->lastlen--;
            }

          dir->fd_dir.d_name[level1->lastlen] = '\0';

          if (name[level1->lastlen] == '/')
            {
              dir->fd_dir.d_type = DTYPE_DIRECTORY;
            }
          else
            {
              dir->fd_dir.d_type = DTYPE_FILE;
            }

          level1->base.index++;
          ret = OK;
        }
      else
        {
          /* No more entries in the subdirectory */

          ret = -ENOENT;
        }
    }
  else
    {
      /* We are performing a directory search of one of the subdirectories
       * and we must let the handler perform the read.
       */

      DEBUGASSERT(priv->procfsentry && priv->procfsentry->ops->readdir);
      ret = priv->procfsentry->ops->readdir(dir);
    }

  return ret;
}
Exemplo n.º 30
0
static int proc_readdir(struct fs_dirent_s *dir)
{
  FAR struct proc_dir_s *procdir;
  FAR const struct proc_node_s *node = NULL;
  FAR struct tcb_s *tcb;
  unsigned int index;
  irqstate_t flags;
  pid_t pid;
  int ret;

  DEBUGASSERT(dir && dir->u.procfs);
  procdir = dir->u.procfs;

  /* Have we reached the end of the directory */

  index = procdir->base.index;
  if (index >= procdir->base.nentries)
    {
      /* We signal the end of the directory by returning the special
       * error -ENOENT
       */

      fvdbg("Entry %d: End of directory\n", index);
      ret = -ENOENT;
    }

  /* No, we are not at the end of the directory */

  else
    {
      /* Verify that the pid still refers to an active task/thread */

      pid = procdir->pid;

      flags = irqsave();
      tcb = sched_gettcb(pid);
      irqrestore(flags);

      if (!tcb)
        {
          fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
          return -ENOENT;
        }

      /* The TCB is still valid (or at least was when we entered this function) */
      /* Handle the directory listing by the node type */

      switch (procdir->node->node)
        {
         case PROC_LEVEL0: /* Top level directory */
           DEBUGASSERT(procdir->base.level == 1);
           node = g_level0info[index];
           break;

         case PROC_GROUP:  /* Group sub-directory */
           DEBUGASSERT(procdir->base.level == 2);
           node = g_groupinfo[index];
           break;

          default:
            ret = -ENOENT;
           break;
        }

      /* Save the filename and file type */

      dir->fd_dir.d_type = node->dtype;
      strncpy(dir->fd_dir.d_name, node->name, NAME_MAX+1);

      /* Set up the next directory entry offset.  NOTE that we could use the
       * standard f_pos instead of our own private index.
       */

      procdir->base.index = index + 1;
      ret = OK;
    }

  return ret;
}