Ejemplo n.º 1
0
static int work_qcancel(FAR struct usr_wqueue_s *wqueue, FAR struct work_s *work)
{
  int ret = -ENOENT;

  DEBUGASSERT(work != NULL);

  /* Get exclusive access to the work queue */

  while (work_lock() < 0);

  /* Cancelling the work is simply a matter of removing the work structure
   * from the work queue.  This must be done with interrupts disabled because
   * new work is typically added to the work queue from interrupt handlers.
   */

  if (work->worker != NULL)
    {
      /* A little test of the integrity of the work queue */

      DEBUGASSERT(work->dq.flink || (FAR dq_entry_t *)work == wqueue->q.tail);
      DEBUGASSERT(work->dq.blink || (FAR dq_entry_t *)work == wqueue->q.head);

      /* Remove the entry from the work queue and make sure that it is
       * mark as available (i.e., the worker field is nullified).
       */

      dq_rem((FAR dq_entry_t *)work, &wqueue->q);
      work->worker = NULL;
      ret = OK;
    }

  work_unlock();
  return ret;
}
Ejemplo n.º 2
0
void hrt_work_cancel(struct work_s *work)
{
	struct wqueue_s *wqueue = &g_hrt_work;

	//DEBUGASSERT(work != NULL && (unsigned)qid < NWORKERS);

	/* Cancelling the work is simply a matter of removing the work structure
	 * from the work queue.  This must be done with interrupts disabled because
	 * new work is typically added to the work queue from interrupt handlers.
	 */

	hrt_work_lock();

	if (work->worker != NULL) {
		/* A little test of the integrity of the work queue */

		//DEBUGASSERT(work->dq.flink ||(dq_entry_t *)work == wqueue->q.tail);
		//DEBUGASSERT(work->dq.blink ||(dq_entry_t *)work == wqueue->q.head);

		/* Remove the entry from the work queue and make sure that it is
		 * mark as availalbe (i.e., the worker field is nullified).
		 */

		dq_rem((dq_entry_t *)work, &wqueue->q);
		work->worker = NULL;
	}

	hrt_work_unlock();
}
Ejemplo n.º 3
0
bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
  bool doswitch = false;

  /* Check if the TCB to be removed is at the head of the ready to run list.
   * There is only one list, g_readytorun, and it always contains the
   * currently running task.  If we are removing the head of this list,
   * then we are removing the currently active task.
   */

  if (rtcb->blink == NULL)
    {
      /* There must always be at least one task in the list (the IDLE task)
       * after the TCB being removed.
       */

      FAR struct tcb_s *nxttcb = (FAR struct tcb_s *)rtcb->flink;
      DEBUGASSERT(nxttcb != NULL);

      nxttcb->task_state = TSTATE_TASK_RUNNING;
      doswitch = true;
    }

  /* Remove the TCB from the ready-to-run list.  In the non-SMP case, this
   * is always the g_readytorun list.
   */

  dq_rem((FAR dq_entry_t *)rtcb, (FAR dq_queue_t *)&g_readytorun);

  /* Since the TCB is not in any list, it is now invalid */

  rtcb->task_state = TSTATE_TASK_INVALID;
  return doswitch;
}
Ejemplo n.º 4
0
bool sched_removereadytorun(FAR _TCB *rtcb)
{
  bool ret = false;

  /* Check if the TCB to be removed is at the head of the ready
   * to run list.  In this case, we are removing the currently
   * active task.
   */

  if (!rtcb->blink)
    {
      /* There must always be at least one task in the list (the idle task) */

      ASSERT(rtcb->flink != NULL);

      /* Inform the instrumentation layer that we are switching tasks */

      sched_note_switch(rtcb, rtcb->flink);

      rtcb->flink->task_state = TSTATE_TASK_RUNNING;
      ret = true;
    }

  /* Remove the TCB from the ready-to-run list */

  dq_rem((FAR dq_entry_t*)rtcb, (dq_queue_t*)&g_readytorun);

  rtcb->task_state = TSTATE_TASK_INVALID;
  return ret;
}
Ejemplo n.º 5
0
bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
  FAR struct tcb_s *ntcb = NULL;
  bool ret = false;

  /* Check if the TCB to be removed is at the head of the ready to run list.
   * In this case, we are removing the currently active task.
   */

  if (!rtcb->blink)
    {
      /* There must always be at least one task in the list (the idle task) */

      ntcb = (FAR struct tcb_s *)rtcb->flink;
      DEBUGASSERT(ntcb != NULL);

      /* Inform the instrumentation layer that we are switching tasks */

      sched_note_switch(rtcb, ntcb);
      ntcb->task_state = TSTATE_TASK_RUNNING;
      ret = true;
    }

  /* Remove the TCB from the ready-to-run list */

  dq_rem((FAR dq_entry_t *)rtcb, (FAR dq_queue_t *)&g_readytorun);

  /* Since the TCB is not in any list, it is now invalid */

  rtcb->task_state = TSTATE_TASK_INVALID;
  return ret;
}
Ejemplo n.º 6
0
void uip_tcpfree(struct uip_conn *conn)
{
#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  struct uip_readahead_s *readahead;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

  /* Release any read-ahead buffers attached to the connection */

#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadaheadrelease(readahead);
    }
#endif

  /* Remove any backlog attached to this connection */

#ifdef CONFIG_NET_TCPBACKLOG
  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
Ejemplo n.º 7
0
void task_vforkabort(FAR struct task_tcb_s *child, int errcode)
{
  /* The TCB was added to the active task list by task_schedsetup() */

  dq_rem((FAR dq_entry_t *)child, (FAR dq_queue_t *)&g_inactivetasks);

  /* Release the TCB */

  sched_releasetcb((FAR struct tcb_s *)child,
                   child->cmn.flags & TCB_FLAG_TTYPE_MASK);
  set_errno(errcode);
}
Ejemplo n.º 8
0
FAR struct aiocb *aioc_decant(FAR struct aio_container_s *aioc)
{
  FAR struct aiocb *aiocbp;

  DEBUGASSERT(aioc);

  /* Remove the container to the pending transfer list. */

  aio_lock();
  dq_rem(&aioc->aioc_link, &g_aio_pending);

  /* De-cant the AIO control block and return the container to the free list */

  aiocbp = aioc->aioc_aiocbp;
  aioc_free(aioc);

  aio_unlock();
  return aiocbp;
}
void uip_udpfree(struct uip_udp_conn *conn)
{
  /* The free list is only accessed from user, non-interrupt level and
   * is protected by a semaphore (that behaves like a mutex).
   */

  DEBUGASSERT(conn->crefs == 0);

  _uip_semtake(&g_free_sem);
  conn->lport = 0;

  /* Remove the connection from the active list */

  dq_rem(&conn->node, &g_active_udp_connections);

  /* Free the connection */

  dq_addlast(&conn->node, &g_free_udp_connections);
  _uip_semgive(&g_free_sem);
}
Ejemplo n.º 10
0
int sem_close(FAR sem_t *sem)
{
  FAR nsem_t *psem;
  int ret = ERROR;

  /* Verify the inputs */

  if (sem)
    {
      sched_lock();

      /* Search the list of named semaphores */

      for (psem = (FAR nsem_t*)g_nsems.head;
           ((psem) && (sem != &psem->sem));
           psem = psem->flink);

      /* Check if we found it */

      if (psem)
        {
          /* Decrement the count of sem_open connections to this semaphore */

          if (psem->nconnect) psem->nconnect--;

          /* If the semaphore is no long connected to any processes AND the
           * semaphore was previously unlinked, then deallocate it.
           */

          if (!psem->nconnect && psem->unlinked)
            {
              dq_rem((FAR dq_entry_t*)psem, &g_nsems);
              sched_free(psem);
            }
          ret = OK;
        }
      sched_unlock();
    }

  return ret;
}
Ejemplo n.º 11
0
void netlink_free(FAR struct netlink_conn_s *conn)
{
  /* The free list is protected by a semaphore (that behaves like a mutex). */

  DEBUGASSERT(conn->crefs == 0);

  _netlink_semtake(&g_free_sem);

  /* Remove the connection from the active list */

  dq_rem(&conn->node, &g_active_netlink_connections);

  /* Reset structure */

  memset(conn, 0, sizeof(*conn));

  /* Free the connection */

  dq_addlast(&conn->node, &g_free_netlink_connections);
  _netlink_semgive(&g_free_sem);
}
Ejemplo n.º 12
0
void sched_removeblocked(FAR struct tcb_s *btcb)
{
    tstate_t task_state = btcb->task_state;

    /* Make sure the TCB is in a valid blocked state */

    ASSERT(task_state >= FIRST_BLOCKED_STATE &&
           task_state <= LAST_BLOCKED_STATE);

    /* Remove the TCB from the blocked task list associated
     * with this state
     */

    dq_rem((FAR dq_entry_t*)btcb, (dq_queue_t*)g_tasklisttable[task_state].list);

    /* Make sure the TCB's state corresponds to not being in
     * any list
     */

    btcb->task_state = TSTATE_TASK_INVALID;
}
Ejemplo n.º 13
0
static void hrt_queue_refresh(void)
{
  int elapsed;
  dq_entry_t *pent;
  struct hrt_s *tmp;
  irqstate_t flags;

  flags = spin_lock_irqsave();
  elapsed = (uint64_t)getreg32(rMT20CNT) * (1000 * 1000) * 10 / XT1OSC_CLK;

  for (pent = hrt_timer_queue.head; pent; pent = dq_next(pent))
    {
      tmp = container_of(pent, struct hrt_s, ent);
      tmp->usec -= elapsed;
    }

cont:
  /* serch for expired */

  for (pent = hrt_timer_queue.head; pent; pent = dq_next(pent))
    {
      tmp = container_of(pent, struct hrt_s, ent);
      if (tmp->usec <= 0)
        {
          dq_rem(pent, &hrt_timer_queue);
          spin_unlock_irqrestore(flags);
          nxsem_post(&tmp->sem);
          flags = spin_lock_irqsave();
          goto cont;
        }
      else
        {
          break;
        }
    }

  spin_unlock_irqrestore(flags);
}
Ejemplo n.º 14
0
static int work_qqueue(FAR struct usr_wqueue_s *wqueue,
                       FAR struct work_s *work, worker_t worker,
                       FAR void *arg, clock_t delay)
{
  DEBUGASSERT(work != NULL);

  /* Get exclusive access to the work queue */

  while (work_lock() < 0);

  /* Is there already pending work? */

  if (work->worker != NULL)
    {
      /* Remove the entry from the work queue.  It will re requeued at the
       * end of the work queue.
       */

      dq_rem((FAR dq_entry_t *)work, &wqueue->q);
    }

  /* Initialize the work structure */

  work->worker = worker;           /* Work callback. non-NULL means queued */
  work->arg    = arg;              /* Callback argument */
  work->delay  = delay;            /* Delay until work performed */

  /* Now, time-tag that entry and put it in the work queue. */

  work->qtime  = clock(); /* Time work queued */

  dq_addlast((FAR dq_entry_t *)work, &wqueue->q);
  kill(wqueue->pid, SIGWORK);   /* Wake up the worker thread */

  work_unlock();
  return OK;
}
Ejemplo n.º 15
0
int work_cancel(struct work_s *work)
{
  irqstate_t flags;

  DEBUGASSERT(work != NULL);

  /* Cancelling the work is simply a matter of removing the work structure
   * from the work queue.  This must be done with interrupts disabled because
   * new work is typically added to the work queue from interrupt handlers.
   */

  flags = irqsave();
  if (work->worker != NULL)
    {
      DEBUGASSERT(work->dq.flink || (FAR dq_entry_t *)work == g_work.head);
      DEBUGASSERT(work->dq.blink || (FAR dq_entry_t *)work == g_work.tail);
      dq_rem((FAR dq_entry_t *)work, &g_work);

      work->worker = NULL;
    }

  irqrestore(flags);
  return OK;
}
Ejemplo n.º 16
0
bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
  FAR dq_queue_t *tasklist;
  bool doswitch = false;
  int cpu;

  /* Which CPU (if any) is the task running on?  Which task list holds the
   * TCB?
   */

  cpu      = rtcb->cpu;
  tasklist = TLIST_HEAD(rtcb->task_state, cpu);

  /* Check if the TCB to be removed is at the head of a ready-to-run list.
   * For the case of SMP, there are two lists involved:  (1) the
   * g_readytorun list that holds non-running tasks that have not been
   * assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
   * tasks assigned a CPU, including the task that is currently running on
   * that CPU.  Only this latter list contains the currently active task
   * only only removing the head of that list can result in a context
   * switch.
   *
   * rtcb->blink == NULL will tell us if the TCB is at the head of the
   * ready-to-run list and, hence, a candidate for the new running task.
   *
   * If so, then the tasklist RUNNABLE attribute will inform us if the list
   * holds the currently executing task and, hence, if a context switch
   * should occur.
   */

  if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state))
    {
      FAR struct tcb_s *nxttcb;
      FAR struct tcb_s *rtrtcb;
      int me;

      /* There must always be at least one task in the list (the IDLE task)
       * after the TCB being removed.
       */

      nxttcb = (FAR struct tcb_s *)rtcb->flink;
      DEBUGASSERT(nxttcb != NULL);

      /* If we are modifying the head of some assigned task list other than
       * our own, we will need to stop that CPU.
       */

      me = this_cpu();
      if (cpu != me)
        {
          DEBUGVERIFY(up_cpu_pause(cpu));
        }

      /* The task is running but the CPU that it was running on has been
       * paused.  We can now safely remove its TCB from the ready-to-run
       * task list.  In the SMP case this may be either the g_readytorun()
       * or the g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);

      /* Which task will go at the head of the list?  It will be either the
       * next tcb in the assigned task list (nxttcb) or a TCB in the
       * g_readytorun list.  We can only select a task from that list if
       * the affinity mask includes the current CPU.
       *
       * REVISIT: What should we do, if anything, if pre-emption is locked
       * by the another CPU?  Should just used nxttcb?  Should we select
       * from the pending task list instead of the g_readytorun list?
       */

      for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
           rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
           rtrtcb = (FAR struct tcb_s *)rtrtcb->flink);

      /* Did we find a task in the g_readytorun list?  Which task should
       * we use?  We decide strictly by the priority of the two tasks:
       * Either (1) the task currently at the head of the g_assignedtasks[cpu]
       * list (nexttcb) or (2) the highest priority task from the
       * g_readytorun list with matching affinity (rtrtcb).
       */

      if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
        {
          FAR struct tcb_s *tmptcb;

          /* The TCB at the head of the ready to run list has the higher
           * priority.  Remove that task from the head of the g_readytorun
           * list and add to the head of the g_assignedtasks[cpu] list.
           */

          tmptcb = (FAR struct tcb_s *)
            dq_remfirst((FAR dq_queue_t *)&g_readytorun);

          DEBUGASSERT(tmptcb == rtrtcb);

          dq_addfirst((FAR dq_entry_t *)tmptcb, tasklist);

          tmptcb->cpu = cpu;
          nxttcb = tmptcb;
        }

      /* Will pre-emption be disabled after the switch?  If the lockcount is
       * greater than zero, then this task/this CPU holds the scheduler lock.
       */

      if (nxttcb->lockcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }
      else
        {
          /* No.. we may need to perform release our hold on the lock. */

          spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }

      /* Interrupts may be disabled after the switch.  If irqcount is greater
       * than zero, then this task/this CPU holds the IRQ lock
       */

      if (nxttcb->irqcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }
      else
        {
          /* No.. we may need to release our hold on the irq state. */

          spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }

      nxttcb->task_state = TSTATE_TASK_RUNNING;

      /* All done, restart the other CPU (if it was paused). */

      doswitch = true;
      if (cpu != me)
        {
          /* In this we will not want to report a context switch to this
           * CPU.  Only the other CPU is affected.
           */

          DEBUGVERIFY(up_cpu_resume(cpu));
          doswitch = false;
        }
    }
  else
    {
      /* The task is not running.  Just remove its TCB from the ready-to-run
       * list.  In the SMP case this may be either the g_readytorun() or the
       * g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);
    }

  /* Since the TCB is no longer in any list, it is now invalid */

  rtcb->task_state = TSTATE_TASK_INVALID;
  return doswitch;
}
Ejemplo n.º 17
0
int pthread_create(FAR pthread_t *thread, FAR pthread_attr_t *attr,
                   pthread_startroutine_t start_routine, pthread_addr_t arg)
{
  FAR _TCB *ptcb;
  FAR join_t *pjoin;
  int status;
  int priority;
#if CONFIG_RR_INTERVAL > 0
  int policy;
#endif
  pid_t pid;

  /* If attributes were not supplied, use the default attributes */

  if (!attr)
    {
      attr = &g_default_pthread_attr;
    }

  /* Allocate a TCB for the new task. */

  ptcb = (FAR _TCB*)kzalloc(sizeof(_TCB));
  if (!ptcb)
    {
      return ENOMEM;
    }

  /* Associate file descriptors with the new task */

  status = sched_setuppthreadfiles(ptcb);
  if (status != OK)
    {
      sched_releasetcb(ptcb);
      return status;
    }

  /* Share the parent's envionment */

  (void)env_share(ptcb);

  /* Allocate a detachable structure to support pthread_join logic */

  pjoin = (FAR join_t*)kzalloc(sizeof(join_t));
  if (!pjoin)
    {
      sched_releasetcb(ptcb);
      return ENOMEM;
    }

  /* Allocate the stack for the TCB */

  status = up_create_stack(ptcb, attr->stacksize);
  if (status != OK)
    {
      sched_releasetcb(ptcb);
      sched_free(pjoin);
      return ENOMEM;
    }

  /* Should we use the priority and scheduler specified in the
   * pthread attributes?  Or should we use the current thread's
   * priority and scheduler?
   */

  if (attr->inheritsched == PTHREAD_INHERIT_SCHED)
    {
      /* Get the priority for this thread. */

      struct sched_param param;
      status = sched_getparam(0, &param);
      if (status == OK)
        {
          priority = param.sched_priority;
        }
      else
        {
          priority = SCHED_FIFO;
        }

      /* Get the scheduler policy for this thread */

#if CONFIG_RR_INTERVAL > 0
      policy = sched_getscheduler(0);
      if (policy == ERROR)
        {
          policy = SCHED_FIFO;
        }
#endif
    }
  else
    {
      /* Use the priority and scheduler from the attributes */

      priority = attr->priority;
#if CONFIG_RR_INTERVAL > 0
      policy   = attr->policy;
#endif
    }

  /* Mark this task as a pthread (this setting will be needed in
   * task_schedsetup() when up_initial_state() is called.
   */

  ptcb->flags |= TCB_FLAG_TTYPE_PTHREAD;

  /* Initialize the task control block */

  status  = task_schedsetup(ptcb, priority, pthread_start,
                            (main_t)start_routine);
  if (status != OK)
    {

      sched_releasetcb(ptcb);
      sched_free(pjoin);
      return EBUSY;
    }

  /* Configure the TCB for a pthread receiving on parameter
   * passed by value
   */

  pthread_argsetup(ptcb, arg);

  /* Attach the join info to the TCB. */

  ptcb->joininfo = (void*)pjoin;

  /* If round robin scheduling is selected, set the appropriate flag
   * in the TCB.
   */

#if CONFIG_RR_INTERVAL > 0
  if (policy == SCHED_RR)
    {
      ptcb->flags    |= TCB_FLAG_ROUND_ROBIN;
      ptcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
    }
#endif

  /* Get the assigned pid before we start the task (who knows what
   * could happen to ptcb after this!).  Copy this ID into the join structure
   * as well.
   */

  pid = (int)ptcb->pid;
  pjoin->thread = (pthread_t)pid;

  /* Initialize the semaphores in the join structure to zero. */

  status = sem_init(&pjoin->data_sem, 0, 0);
  if (status == OK)
    {
      status = sem_init(&pjoin->exit_sem, 0, 0);
    }

  /* Activate the task */

  sched_lock();
  if (status == OK)
    {
      status = task_activate(ptcb);
    }

  if (status == OK)
    {
      /* Wait for the task to actually get running and to register
       * its join_t
       */

      (void)pthread_takesemaphore(&pjoin->data_sem);

      /* Return the thread information to the caller */

      if (thread) *thread = (pthread_t)pid;
      if (!pjoin->started) status = ERROR;

      sched_unlock();
      (void)sem_destroy(&pjoin->data_sem);
    }
  else
    {
      sched_unlock();
      dq_rem((FAR dq_entry_t*)ptcb, (dq_queue_t*)&g_inactivetasks);
      (void)sem_destroy(&pjoin->data_sem);
      (void)sem_destroy(&pjoin->exit_sem);
      sched_releasetcb(ptcb);
      sched_free(pjoin);
      return EIO;
    }

  return OK;
}
Ejemplo n.º 18
0
int task_restart(pid_t pid)
{
  FAR _TCB  *rtcb;
  FAR _TCB  *tcb;
  int        status;
  irqstate_t state;

  /* Make sure this task does not become ready-to-run while
   * we are futzing with its TCB
   */

  sched_lock();

  /* Check if the task to restart is the calling task */

  rtcb = (FAR _TCB*)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      return ERROR;
    }

  /* We are restarting some other task than ourselves */

  else
    {
      /* Find for the TCB associated with matching pid  */

      tcb = sched_gettcb(pid);
      if (!tcb)
        {
          /* There is no TCB with this pid */

          return ERROR;
        }

      /* Remove the TCB from whatever list it is in.  At this point, the
       * TCB should no longer be accessible to the system 
       */

      state = irqsave();
      dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->task_state].list);
      tcb->task_state = TSTATE_TASK_INVALID;
      irqrestore(state);

      /* Deallocate anything left in the TCB's queues */

      sig_cleanup(tcb); /* Deallocate Signal lists */

      /* Reset the current task priority  */

      tcb->sched_priority = tcb->init_priority;

      /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
      tcb->base_priority  = tcb->init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
      tcb->npend_reprio   = 0;
#  endif
#endif

      /* Re-initialize the processor-specific portion of the TCB
       * This will reset the entry point and the start-up parameters
       */

      up_initial_state(tcb);

      /* Add the task to the inactive task list */

      dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
      tcb->task_state = TSTATE_TASK_INACTIVE;

      /* Activate the task */

      status = task_activate(tcb);
      if (status != OK)
        {
          dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
          sched_releasetcb(tcb);
          return ERROR;
        }
    }

  sched_unlock();
  return OK;
}
Ejemplo n.º 19
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  FAR dq_queue_t *tasklist;
  irqstate_t state;
  int status;

  /* Make sure this task does not become ready-to-run while
   * we are futzing with its TCB
   */

  sched_lock();

  /* Check if the task to restart is the calling task */

  rtcb = this_task();
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      set_errno(ENOSYS);
      return ERROR;
    }

#ifdef CONFIG_SMP
  /* There is currently no capability to restart a task that is actively
   * running on another CPU either.  This is not the calling cast so if it
   * is running, then it could only be running a a different CPU.
   *
   * Also, will need some interlocks to assure that no tasks are rescheduled
   * on any other CPU while we do this.
   */

#warning Missing SMP logic
  if (rtcb->task_state == TSTATE_TASK_RUNNING)
    {
      /* Not implemented */

      set_errno(ENOSYS);
      return ERROR;
    }
#endif

  /* We are restarting some other task than ourselves */
  /* Find for the TCB associated with matching pid  */

  tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
  if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
  if (!tcb)
#endif
    {
      /* There is no TCB with this pid or, if there is, it is not a task. */

      set_errno(ESRCH);
      return ERROR;
    }

  /* Try to recover from any bad states */

  task_recover((FAR struct tcb_s *)tcb);

  /* Kill any children of this thread */

#ifdef HAVE_GROUP_MEMBERS
  (void)group_killchildren(tcb);
#endif

  /* Remove the TCB from whatever list it is in.  After this point, the TCB
   * should no longer be accessible to the system
   */

#ifdef CONFIG_SMP
  tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu);
#else
  tasklist = TLIST_HEAD(tcb->cmn.task_state);
#endif

  state = irqsave();
  dq_rem((FAR dq_entry_t *)tcb, tasklist);
  tcb->cmn.task_state = TSTATE_TASK_INVALID;
  irqrestore(state);

  /* Deallocate anything left in the TCB's queues */

  sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

  /* Reset the current task priority  */

  tcb->cmn.sched_priority = tcb->init_priority;

  /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
  tcb->cmn.base_priority = tcb->init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
  tcb->cmn.npend_reprio = 0;
#  endif
#endif

  /* Re-initialize the processor-specific portion of the TCB.  This will
   * reset the entry point and the start-up parameters
   */

  up_initial_state((FAR struct tcb_s *)tcb);

  /* Add the task to the inactive task list */

  dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks);
  tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

  /* Activate the task */

  status = task_activate((FAR struct tcb_s *)tcb);
  if (status != OK)
    {
      (void)task_delete(pid);
      set_errno(-status);
      return ERROR;
    }

  sched_unlock();
  return OK;
}
Ejemplo n.º 20
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  FAR dq_queue_t *tasklist;
  irqstate_t flags;
  int errcode;
#ifdef CONFIG_SMP
  int cpu;
#endif
  int ret;

  /* Check if the task to restart is the calling task */

  rtcb = this_task();
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      errcode = ENOSYS;
      goto errout;
    }

  /* We are restarting some other task than ourselves.  Make sure that the
   * task does not change its state while we are executing.  In the single
   * CPU state this could be done by disabling pre-emption.  But we will
   * a little stronger medicine on the SMP case:  The task make be running
   * on another CPU.
   */

  flags = enter_critical_section();

  /* Find for the TCB associated with matching pid  */

  tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
  if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
  if (!tcb)
#endif
    {
      /* There is no TCB with this pid or, if there is, it is not a task. */

      errcode = ESRCH;
      goto errout_with_lock;
    }

#ifdef CONFIG_SMP
  /* If the task is running on another CPU, then pause that CPU.  We can
   * then manipulate the TCB of the restarted task and when we resume the
   * that CPU, the restart take effect.
   */

  cpu = sched_cpu_pause(&tcb->cmn);
#endif /* CONFIG_SMP */

  /* Try to recover from any bad states */

  task_recover((FAR struct tcb_s *)tcb);

  /* Kill any children of this thread */

#ifdef HAVE_GROUP_MEMBERS
  (void)group_killchildren(tcb);
#endif

  /* Remove the TCB from whatever list it is in.  After this point, the TCB
   * should no longer be accessible to the system
   */

#ifdef CONFIG_SMP
  tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu);
#else
  tasklist = TLIST_HEAD(tcb->cmn.task_state);
#endif

  dq_rem((FAR dq_entry_t *)tcb, tasklist);
  tcb->cmn.task_state = TSTATE_TASK_INVALID;

  /* Deallocate anything left in the TCB's queues */

  sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

  /* Reset the current task priority  */

  tcb->cmn.sched_priority = tcb->cmn.init_priority;

  /* The task should restart with pre-emption disabled and not in a critical
   * secton.
   */

  tcb->cmn.lockcount = 0;
#ifdef CONFIG_SMP
  tcb->cmn.irqcount  = 0;
#endif

  /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
  tcb->cmn.base_priority = tcb->cmn.init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
  tcb->cmn.npend_reprio = 0;
#  endif
#endif

  /* Re-initialize the processor-specific portion of the TCB.  This will
   * reset the entry point and the start-up parameters
   */

  up_initial_state((FAR struct tcb_s *)tcb);

  /* Add the task to the inactive task list */

  dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks);
  tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

#ifdef CONFIG_SMP
  /* Resume the paused CPU (if any) */

  if (cpu >= 0)
    {
      ret = up_cpu_resume(cpu);
      if (ret < 0)
        {
          errcode = -ret;
          goto errout_with_lock;
        }
    }
#endif /* CONFIG_SMP */

  leave_critical_section(flags);

  /* Activate the task. */

  ret = task_activate((FAR struct tcb_s *)tcb);
  if (ret != OK)
    {
      (void)task_terminate(pid, true);
      errcode = -ret;
      goto errout_with_lock;
    }

  return OK;

errout_with_lock:
  leave_critical_section(flags);
errout:
  set_errno(errcode);
  return ERROR;
}
Ejemplo n.º 21
0
int work_thread(int argc, char *argv[])
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;
  int usec;
  irqstate_t flags;

  /* Loop forever */

  usec = CONFIG_SCHED_WORKPERIOD;
  flags = irqsave();
  for (;;)
    {
      /* Wait awhile to check the work list.  We will wait here until either
       * the time elapses or until we are awakened by a signal.
       */

      usleep(usec);
      irqrestore(flags);

      /* First, perform garbage collection.  This cleans-up memory de-allocations
       * that were queued because they could not be freed in that execution
       * context (for example, if the memory was freed from an interrupt handler).
       * NOTE: If the work thread is disabled, this clean-up is performed by
       * the IDLE thread (at a very, very lower priority).
       */

      sched_garbagecollection();

      /* Then process queued work.  We need to keep interrupts disabled while
       * we process items in the work list.
       */

      next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
      flags = irqsave();
      work  = (FAR struct work_s *)g_work.head;
      while (work)
        {
          /* Is this work ready?  It is ready if there is no delay or if
           * the delay has elapsed. qtime is the time that the work was added
           * to the work queue.  It will always be greater than or equal to
           * zero.  Therefore a delay of zero will always execute immediately.
           */

          elapsed = clock_systimer() - work->qtime;
          if (elapsed >= work->delay)
            {
              /* Remove the ready-to-execute work from the list */

              (void)dq_rem((struct dq_entry_s *)work, &g_work);

              /* Extract the work description from the entry (in case the work
               * instance by the re-used after it has been de-queued).
               */

              worker = work->worker;
              arg    = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)g_work.head;
            }
          else
            {
              /* This one is not ready.. will it be ready before the next
               * scheduled wakeup interval?
               */

              remaining = elapsed - work->delay;
              if (remaining < next)
                {
                  /* Yes.. Then schedule to wake up when the work is ready */

                  next = remaining;
                }
              
              /* Then try the next in the list. */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }

      /* Now calculate the microsecond delay we should wait */

      usec = next * USEC_PER_TICK;
    }

  return OK; /* To keep some compilers happy */
}
Ejemplo n.º 22
0
void work_process(FAR struct usr_wqueue_s *wqueue)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t stick;
  uint32_t ctick;
  uint32_t next;
  int ret;

  /* Then process queued work.  Lock the work queue while we process items
   * in the work list.
   */

  next = wqueue->delay;
  ret = work_lock();
  if (ret < 0)
    {
      /* Break out earlier if we were awakened by a signal */

      return;
    }

  /* Get the time that we started this polling cycle in clock ticks. */

  stick = clock_systimer();

  /* And check each entry in the work queue.  Since we have locked the
   * work queue we know:  (1) we will not be suspended unless we do
   * so ourselves, and (2) there will be no changes to the work queue
   */

  work = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      ctick   = clock_systimer();
      elapsed = ctick - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before unlocking the work queue) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Unlock the the work queue while the work is being
               * performed... we don't have any idea how long this will take!
               */

              work_unlock();
              worker(arg);

              /* Now, unfortunately, since we unlocked the work queue we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              ret = work_lock();
              if (ret < 0)
                {
                  /* Break out earlier if we were awakened by a signal */

                  return;
                }

              work = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * the work queue still locked.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else /* elapsed < work->delay */
        {
          /* This one is not ready.
           *
           * NOTE that elapsed is relative to the the current time,
           * not the time of beginning of this queue processing pass.
           * So it may need an adjustment.
           */

          elapsed += (ctick - stick);
          if (elapsed > work->delay)
            {
              /* The delay has expired while we are processing */

              elapsed = work->delay;
            }

          /* Will it be ready before the next scheduled wakeup interval? */

          remaining = work->delay - elapsed;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

  /* Get the delay (in clock ticks) since we started the sampling */

  elapsed = clock_systimer() - stick;
  if (elapsed < wqueue->delay && next > 0)
    {
      /* How must time would we need to delay to get to the end of the 
       * sampling period?  The amount of time we delay should be the smaller
       * of the time to the end of the sampling period and the time to the
       * next work expiry.
       */

      remaining = wqueue->delay - elapsed;
      next      = MIN(next, remaining);

      /* Wait awhile to check the work list.  We will wait here until
       * either the time elapses or until we are awakened by a signal.
       * Interrupts will be re-enabled while we wait.
       */

      usleep(next * USEC_PER_TICK);
    }

  work_unlock();
}
Ejemplo n.º 23
0
static int thread_create(FAR const char *name, uint8_t ttype, int priority,
                         int stack_size, main_t entry, FAR char * const argv[])
{
  FAR struct task_tcb_s *tcb;
  pid_t pid;
  int errcode;
  int ret;

  /* Allocate a TCB for the new task. */

  tcb = (FAR struct task_tcb_s *)kmm_zalloc(sizeof(struct task_tcb_s));
  if (!tcb)
    {
      sdbg("ERROR: Failed to allocate TCB\n");
      errcode = ENOMEM;
      goto errout;
    }

  /* Allocate a new task group with privileges appropriate for the parent
   * thread type.
   */

#ifdef HAVE_TASK_GROUP
  ret = group_allocate(tcb, ttype);
  if (ret < 0)
    {
      errcode = -ret;
      goto errout_with_tcb;
    }
#endif

  /* Associate file descriptors with the new task */

#if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0
  ret = group_setuptaskfiles(tcb);
  if (ret < OK)
    {
      errcode = -ret;
      goto errout_with_tcb;
    }
#endif

  /* Allocate the stack for the TCB */

  ret = up_create_stack((FAR struct tcb_s *)tcb, stack_size, ttype);
  if (ret < OK)
    {
      errcode = -ret;
      goto errout_with_tcb;
    }

  /* Initialize the task control block */

  ret = task_schedsetup(tcb, priority, task_start, entry, ttype);
  if (ret < OK)
    {
      errcode = -ret;
      goto errout_with_tcb;
    }

  /* Setup to pass parameters to the new task */

  (void)task_argsetup(tcb, name, argv);

  /* Now we have enough in place that we can join the group */

#ifdef HAVE_TASK_GROUP
  ret = group_initialize(tcb);
  if (ret < 0)
    {
      errcode = -ret;
      goto errout_with_tcb;
    }
#endif

  /* Get the assigned pid before we start the task */

  pid = (int)tcb->cmn.pid;

  /* Activate the task */

  ret = task_activate((FAR struct tcb_s *)tcb);
  if (ret < OK)
    {
      errcode = get_errno();

      /* The TCB was added to the active task list by task_schedsetup() */

      dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
      goto errout_with_tcb;
    }

  return pid;

errout_with_tcb:
  sched_releasetcb((FAR struct tcb_s *)tcb, ttype);

errout:
  set_errno(errcode);
  return ERROR;
}
Ejemplo n.º 24
0
struct uip_conn *uip_tcpalloc(void)
{
  struct uip_conn *conn;
  uip_lock_t flags;

  /* Because this routine is called from both interrupt level and
   * and from user level, we have not option but to disable interrupts
   * while accessing g_free_tcp_connections[];
   */

  flags = uip_lock();

  /* Return the entry from the head of the free list */

  conn = (struct uip_conn *)dq_remfirst(&g_free_tcp_connections);

#if 0 /* Revisit */
  /* Is the free list empty? */

  if (!conn)
    {
      /* As a fallback, check for connection structures in the TIME_WAIT
       * state.  If no CLOSED connections are found, then take the oldest
       */

      struct uip_conn *tmp = g_active_tcp_connections.head;
      while (tmp)
        {
          /* Is this connectin in the UIP_TIME_WAIT state? */

          if (tmp->tcpstateflags == UIP_TIME_WAIT)
            {
              /* Is it the oldest one we have seen so far? */

              if (!conn || tmp->timer > conn->timer)
                {
                  /* Yes.. remember it */

                  conn = tmp;
                }
            }

          /* Look at the next active connection */

          tmp = tmp->node.flink;
        }

      /* If we found one, remove it from the active connection list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }
#endif

  uip_unlock(flags);

  /* Mark the connection allocated */

  if (conn)
    {
      conn->tcpstateflags = UIP_ALLOCATED;
    }

  return conn;
}
Ejemplo n.º 25
0
void work_process(FAR struct kwork_wqueue_s *wqueue, systime_t period, int wndx)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  systime_t elapsed;
  systime_t remaining;
  systime_t stick;
  systime_t ctick;
  systime_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = period;
  flags = irqsave();

  /* Get the time that we started this polling cycle in clock ticks. */

  stick = clock_systimer();

  /* And check each entry in the work queue.  Since we have disabled
   * interrupts we know:  (1) we will not be suspended unless we do
   * so ourselves, and (2) there will be no changes to the work queue
   */

  work = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      ctick   = clock_systimer();
      elapsed = ctick - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long this will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else /* elapsed < work->delay */
        {
          /* This one is not ready.
           *
           * NOTE that elapsed is relative to the the current time,
           * not the time of beginning of this queue processing pass.
           * So it may need an adjustment.
           */

          elapsed += (ctick - stick);
          if (elapsed > work->delay)
            {
              /* The delay has expired while we are processing */

              elapsed = work->delay;
            }

          /* Will it be ready before the next scheduled wakeup interval? */

          remaining = work->delay - elapsed;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

#if defined(CONFIG_SCHED_LPWORK) && CONFIG_SCHED_LPNTHREADS > 0
  /* Value of zero for period means that we should wait indefinitely until
   * signalled.  This option is used only for the case where there are
   * multiple, low-priority worker threads.  In that case, only one of
   * the threads does the poll... the others simple.  In all other cases
   * period will be non-zero and equal to wqueue->delay.
   */

   if (period == 0)
     {
       sigset_t set;

       /* Wait indefinitely until signalled with SIGWORK */

       sigemptyset(&set);
       sigaddset(&set, SIGWORK);

       wqueue->worker[wndx].busy = false;
       DEBUGVERIFY(sigwaitinfo(&set, NULL));
       wqueue->worker[wndx].busy = true;
     }
   else
#endif
    {
      /* Get the delay (in clock ticks) since we started the sampling */

      elapsed = clock_systimer() - stick;
      if (elapsed < period && next > 0)
        {
          /* How much time would we need to delay to get to the end of the
           * sampling period?  The amount of time we delay should be the smaller
           * of the time to the end of the sampling period and the time to the
           * next work expiry.
           */

          remaining = period - elapsed;
          next      = MIN(next, remaining);

          /* Wait awhile to check the work list.  We will wait here until
           * either the time elapses or until we are awakened by a signal.
           * Interrupts will be re-enabled while we wait.
           */

          wqueue->worker[wndx].busy = false;
          usleep(next * USEC_PER_TICK);
          wqueue->worker[wndx].busy = true;
        }
    }

  irqrestore(flags);
}
Ejemplo n.º 26
0
static void work_process(struct wqueue_s *wqueue, int lock_id)
{
	volatile struct work_s *work;
	worker_t  worker;
	void *arg;
	uint64_t elapsed;
	uint32_t remaining;
	uint32_t next;

	/* Then process queued work.  We need to keep interrupts disabled while
	 * we process items in the work list.
	 */

	next  = CONFIG_SCHED_WORKPERIOD;

	work_lock(lock_id);

	work  = (struct work_s *)wqueue->q.head;

	while (work) {
		/* Is this work ready?  It is ready if there is no delay or if
		 * the delay has elapsed. qtime is the time that the work was added
		 * to the work queue.  It will always be greater than or equal to
		 * zero.  Therefore a delay of zero will always execute immediately.
		 */

		elapsed = USEC2TICK(clock_systimer() - work->qtime);

		//printf("work_process: in ticks elapsed=%lu delay=%u\n", elapsed, work->delay);
		if (elapsed >= work->delay) {
			/* Remove the ready-to-execute work from the list */

			(void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

			/* Extract the work description from the entry (in case the work
			 * instance by the re-used after it has been de-queued).
			 */

			worker = work->worker;
			arg    = work->arg;

			/* Mark the work as no longer being queued */

			work->worker = NULL;

			/* Do the work.  Re-enable interrupts while the work is being
			 * performed... we don't have any idea how long that will take!
			 */

			work_unlock(lock_id);

			if (!worker) {
				PX4_WARN("MESSED UP: worker = 0\n");

			} else {
				worker(arg);
			}

			/* Now, unfortunately, since we re-enabled interrupts we don't
			 * know the state of the work list and we will have to start
			 * back at the head of the list.
			 */

			work_lock(lock_id);
			work  = (struct work_s *)wqueue->q.head;

		} else {
			/* This one is not ready.. will it be ready before the next
			 * scheduled wakeup interval?
			 */

			/* Here: elapsed < work->delay */
			remaining = USEC_PER_TICK * (work->delay - elapsed);

			if (remaining < next) {
				/* Yes.. Then schedule to wake up when the work is ready */

				next = remaining;
			}

			/* Then try the next in the list. */

			work = (struct work_s *)work->dq.flink;
		}
	}

	/* Wait awhile to check the work list.  We will wait here until either
	 * the time elapses or until we are awakened by a signal.
	 */
	work_unlock(lock_id);

	usleep(next);
}
Ejemplo n.º 27
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  irqstate_t state;
  int status;

  /* Make sure this task does not become ready-to-run while
   * we are futzing with its TCB
   */

  sched_lock();

  /* Check if the task to restart is the calling task */

  rtcb = (FAR struct tcb_s *)g_readytorun.head;
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      set_errno(ENOSYS);
      return ERROR;
    }

  /* We are restarting some other task than ourselves */

  else
    {
      /* Find for the TCB associated with matching pid  */

      tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
      if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
      if (!tcb)
#endif
        {
          /* There is no TCB with this pid or, if there is, it is not a
           * task.
           */

          set_errno(ESRCH);
          return ERROR;
        }

      /* Try to recover from any bad states */

      task_recover((FAR struct tcb_s *)tcb);

      /* Kill any children of this thread */

#if HAVE_GROUP_MEMBERS
      (void)group_killchildren(tcb);
#endif

      /* Remove the TCB from whatever list it is in.  At this point, the
       * TCB should no longer be accessible to the system
       */

      state = irqsave();
      dq_rem((FAR dq_entry_t*)tcb,
             (dq_queue_t*)g_tasklisttable[tcb->cmn.task_state].list);
      tcb->cmn.task_state = TSTATE_TASK_INVALID;
      irqrestore(state);

      /* Deallocate anything left in the TCB's queues */

      sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

      /* Reset the current task priority  */

      tcb->cmn.sched_priority = tcb->init_priority;

      /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
      tcb->cmn.base_priority = tcb->init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
      tcb->cmn.npend_reprio = 0;
#  endif
#endif

      /* Re-initialize the processor-specific portion of the TCB
       * This will reset the entry point and the start-up parameters
       */

      up_initial_state((FAR struct tcb_s *)tcb);

      /* Add the task to the inactive task list */

      dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks);
      tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

      /* Activate the task */

      status = task_activate((FAR struct tcb_s *)tcb);
      if (status != OK)
        {
          (void)task_delete(pid);
          set_errno(-status);
          return ERROR;
        }
    }

  sched_unlock();
  return OK;
}
Ejemplo n.º 28
0
int sched_setpriority(FAR struct tcb_s *tcb, int sched_priority)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
  tstate_t task_state;
  irqstate_t saved_state;

  /* Verify that the requested priority is in the valid range */

  if (sched_priority < SCHED_PRIORITY_MIN ||
      sched_priority > SCHED_PRIORITY_MAX)
    {
      set_errno(EINVAL);
      return ERROR;
    }

  /* We need to assure that there there is no interrupt activity while
   * performing the following.
   */

  saved_state = irqsave();

  /* There are four cases that must be considered: */

  task_state = tcb->task_state;
  switch (task_state)
    {
      /* CASE 1. The task is running or ready-to-run and a context switch
       * may be caused by the re-prioritization
       */

      case TSTATE_TASK_RUNNING:

        /* A context switch will occur if the new priority of the running
         * task becomes less than OR EQUAL TO the next highest priority
         * ready to run task.
         */

        if (sched_priority <= tcb->flink->sched_priority)
          {
            /* A context switch will occur. */

            up_reprioritize_rtr(tcb, (uint8_t)sched_priority);
          }

        /* Otherwise, we can just change priority since it has no effect */

        else
          {
            /* Change the task priority */

            tcb->sched_priority = (uint8_t)sched_priority;
          }
        break;

      /* CASE 2. The task is running or ready-to-run and a context switch
       * may be caused by the re-prioritization
       */

      case TSTATE_TASK_READYTORUN:

        /* A context switch will occur if the new priority of the ready-to
         * run task is (strictly) greater than the current running task
         */

        if (sched_priority > rtcb->sched_priority)
          {
            /* A context switch will occur. */

            up_reprioritize_rtr(tcb, (uint8_t)sched_priority);
          }

        /* Otherwise, we can just change priority and re-schedule (since it
         * have no other effect).
         */

        else
          {
            /* Remove the TCB from the ready-to-run task list */

            ASSERT(!sched_removereadytorun(tcb));

            /* Change the task priority */

            tcb->sched_priority = (uint8_t)sched_priority;

            /* Put it back into the ready-to-run task list */

            ASSERT(!sched_addreadytorun(tcb));
          }
        break;

      /* CASE 3. The task is not in the ready to run list.  Changing its
       * Priority cannot effect the currently executing task.
       */

      default:

        /* CASE 3a. The task resides in a prioritized list. */

        if (g_tasklisttable[task_state].prioritized)
          {
            /* Remove the TCB from the prioritized task list */

            dq_rem((FAR dq_entry_t*)tcb, (FAR dq_queue_t*)g_tasklisttable[task_state].list);

            /* Change the task priority */

            tcb->sched_priority = (uint8_t)sched_priority;

            /* Put it back into the prioritized list at the correct
             * position
             */

            sched_addprioritized(tcb, (FAR dq_queue_t*)g_tasklisttable[task_state].list);
          }

        /* CASE 3b. The task resides in a non-prioritized list. */

        else
          {
            /* Just change the task's priority */

            tcb->sched_priority = (uint8_t)sched_priority;
          }
        break;
    }

  irqrestore(saved_state);
  return OK;
}
Ejemplo n.º 29
0
void uip_tcpfree(struct uip_conn *conn)
{
  FAR struct uip_callback_s *cb;
  FAR struct uip_callback_s *next;
#ifdef CONFIG_NET_TCP_READAHEAD
  FAR struct uip_readahead_s *readahead;
#endif
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  FAR struct uip_wrbuffer_s *wrbuffer;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* Free remaining callbacks, actually there should be only the close callback
   * left.
   */

  for (cb = conn->list; cb; cb = next)
    {
      next = cb->flink;
      uip_tcpcallbackfree(conn, cb);
    }

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

#ifdef CONFIG_NET_TCP_READAHEAD
  /* Release any read-ahead buffers attached to the connection */

  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadahead_release(readahead);
    }
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  /* Release any write buffers attached to the connection */

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->unacked_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }
#endif

#ifdef CONFIG_NET_TCPBACKLOG
  /* Remove any backlog attached to this connection */

  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
Ejemplo n.º 30
0
static void work_process(FAR struct wqueue_s *wqueue)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
  flags = irqsave();
  work  = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      elapsed = clock_systimer() - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else
        {
          /* This one is not ready.. will it be ready before the next
           * scheduled wakeup interval?
           */

          remaining = elapsed - work->delay;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

  /* Wait awhile to check the work list.  We will wait here until either
   * the time elapses or until we are awakened by a signal.
   */

  usleep(next * USEC_PER_TICK);
  irqrestore(flags);
}