void pg_miss(void)
{
  FAR struct tcb_s *ftcb = (FAR struct tcb_s*)g_readytorun.head;
  FAR struct tcb_s *wtcb;

  /* Sanity checking
   *
   * ASSERT if the currently executing task is the page fill worker thread.
   * The page fill worker thread is how the page fault is resolved and
   * all logic associated with the page fill worker must be "locked" and
   * always present in memory.
   */

  pglldbg("Blocking TCB: %p PID: %d\n", ftcb, ftcb->pid);
  DEBUGASSERT(g_pgworker != ftcb->pid);

  /* Block the currently executing task
   * - Call up_block_task() to block the task at the head of the ready-
   *   to-run list.  This should cause an interrupt level context switch
   *   to the next highest priority task.
   * - The blocked task will be marked with state TSTATE_WAIT_PAGEFILL
   *   and will be retained in the g_waitingforfill prioritized task list.
   */

  up_block_task(ftcb, TSTATE_WAIT_PAGEFILL);

  /* Boost the page fill worker thread priority.
   * - Check the priority of the task at the head of the g_waitingforfill
   *   list.  If the priority of that task is higher than the current
   *   priority of the page fill worker thread, then boost the priority
   *   of the page fill worker thread to that priority.
   */

  wtcb = sched_gettcb(g_pgworker);
  DEBUGASSERT(wtcb != NULL);

  if (wtcb->sched_priority < ftcb->sched_priority)
    {
      /* Reprioritize the page fill worker thread */

      pgllvdbg("New worker priority. %d->%d\n",
               wtcb->sched_priority, ftcb->sched_priority);
      sched_setpriority(wtcb, ftcb->sched_priority);
    }

  /* Signal the page fill worker thread.
   * - Is there a page fill pending?  If not then signal the worker
   *   thread to start working on the queued page fill requests.
   */

  if (!g_pftcb)
    {
      pglldbg("Signaling worker. PID: %d\n", g_pgworker);
      kill(g_pgworker, SIGWORK);
    }
}
Example #2
0
FAR mqmsg_t *mq_waitreceive(mqd_t mqdes)
{
  FAR struct tcb_s *rtcb;
  FAR msgq_t *msgq;
  FAR mqmsg_t *rcvmsg;

  /* Get a pointer to the message queue */

  msgq = mqdes->msgq;

  /* Get the message from the head of the queue */

  while ((rcvmsg = (FAR mqmsg_t*)sq_remfirst(&msgq->msglist)) == NULL)
    {
      /* The queue is empty!  Should we block until there the above condition
       * has been satisfied?
       */

      if ((mqdes->oflags & O_NONBLOCK) == 0)
        {
          /* Yes.. Block and try again */

          rtcb = (FAR struct tcb_s*)g_readytorun.head;
          rtcb->msgwaitq = msgq;
          msgq->nwaitnotempty++;

          set_errno(OK);
          up_block_task(rtcb, TSTATE_WAIT_MQNOTEMPTY);

          /* When we resume at this point, either (1) the message queue
           * is no longer empty, or (2) the wait has been interrupted by
           * a signal.  We can detect the latter case be examining the
           * errno value (should be either EINTR or ETIMEDOUT).
           */

          if (get_errno() != OK)
            {
              break;
            }
        }
      else
        {
          /* The queue was empty, and the O_NONBLOCK flag was set for the
           * message queue description referred to by 'mqdes'.
           */

          set_errno(EAGAIN);
          break;
        }
    }

  /* If we got message, then decrement the number of messages in
   * the queue while we are still in the critical section
   */

  if (rcvmsg)
    {
      msgq->nmsgs--;
    }

  return rcvmsg;
}
Example #3
0
int sigsuspend(FAR const sigset_t *set)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s *)g_readytorun.head;
  sigset_t intersection;
  sigset_t saved_sigprocmask;
  FAR sigpendq_t *sigpend;
  irqstate_t saved_state;
  int unblocksigno;

  /* Several operations must be performed below:  We must determine if any
   * signal is pending and, if not, wait for the signal.  Since signals can
   * be posted from the interrupt level, there is a race condition that
   * can only be eliminated by disabling interrupts!
   */

  sched_lock();  /* Not necessary */
  saved_state = irqsave();

  /* Check if there is a pending signal corresponding to one of the
   * signals that will be unblocked by the new sigprocmask.
   */

  intersection = ~(*set) & sig_pendingset(rtcb);
  if (intersection != NULL_SIGNAL_SET)
    {
      /* One or more of the signals in intersections is sufficient to cause
       * us to not wait.  Pick the lowest numbered signal and mark it not
       * pending.
       */

      unblocksigno = sig_lowest(&intersection);
      sigpend = sig_removependingsignal(rtcb, unblocksigno);
      ASSERT(sigpend);

      sig_releasependingsignal(sigpend);
      irqrestore(saved_state);
    }
  else
    {
      /* Its time to wait. Save a copy of the old sigprocmask and install
       * the new (temporary) sigprocmask
       */

      saved_sigprocmask = rtcb->sigprocmask;
      rtcb->sigprocmask = *set;
      rtcb->sigwaitmask = NULL_SIGNAL_SET;

      /* And wait until one of the unblocked signals is posted */

      up_block_task(rtcb, TSTATE_WAIT_SIG);

      /* We are running again, restore the original sigprocmask */

      rtcb->sigprocmask = saved_sigprocmask;
      irqrestore(saved_state);

      /* Now, handle the (rare?) case where (a) a blocked signal was received
       * while the task was suspended but (b) restoring the original
       * sigprocmask will unblock the signal.
       */

      sig_unmaskpendingsignal();
    }

  sched_unlock();
  return ERROR;
}
int mq_waitsend(mqd_t mqdes)
{
  FAR struct tcb_s *rtcb;
  FAR struct mqueue_inode_s *msgq;

  /* Get a pointer to the message queue */

  msgq = mqdes->msgq;

  /* Verify that the queue is indeed full as the caller thinks */

  if (msgq->nmsgs >= msgq->maxmsgs)
    {
      /* Should we block until there is sufficient space in the
       * message queue?
       */

      if ((mqdes->oflags & O_NONBLOCK) != 0)
        {
          /* No... We will return an error to the caller. */

          set_errno(EAGAIN);
          return ERROR;
        }

      /* Yes... We will not return control until the message queue is
       * available or we receive a signal or at timout occurs.
       */

      else
        {
          /* Loop until there are fewer than max allowable messages in the
           * receiving message queue
           */

          while (msgq->nmsgs >= msgq->maxmsgs)
            {
              /* Block until the message queue is no longer full.
               * When we are unblocked, we will try again
               */

              rtcb = (FAR struct tcb_s *)g_readytorun.head;
              rtcb->msgwaitq = msgq;
              msgq->nwaitnotfull++;

              set_errno(OK);
              up_block_task(rtcb, TSTATE_WAIT_MQNOTFULL);

              /* When we resume at this point, either (1) the message queue
               * is no longer empty, or (2) the wait has been interrupted by
               * a signal.  We can detect the latter case be examining the
               * errno value (should be EINTR or ETIMEOUT).
               */

              if (get_errno() != OK)
                {
                  return ERROR;
                }
            }
        }
    }

  return OK;
}
Example #5
0
int sigtimedwait(FAR const sigset_t *set, FAR struct siginfo *info,
                 FAR const struct timespec *timeout)
{
    FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
    sigset_t intersection;
    FAR sigpendq_t *sigpend;
    irqstate_t saved_state;
    int32_t waitticks;
    int ret = ERROR;

    DEBUGASSERT(rtcb->waitdog == NULL);

    sched_lock();  /* Not necessary */

    /* Several operations must be performed below:  We must determine if any
     * signal is pending and, if not, wait for the signal.  Since signals can
     * be posted from the interrupt level, there is a race condition that
     * can only be eliminated by disabling interrupts!
     */

    saved_state = irqsave();

    /* Check if there is a pending signal corresponding to one of the
     * signals in the pending signal set argument.
     */

    intersection = *set & sig_pendingset(rtcb);
    if (intersection != NULL_SIGNAL_SET)
    {
        /* One or more of the signals in intersections is sufficient to cause
         * us to not wait.  Pick the lowest numbered signal and mark it not
         * pending.
         */

        sigpend = sig_removependingsignal(rtcb, sig_lowest(&intersection));
        ASSERT(sigpend);

        /* Return the signal info to the caller if so requested */

        if (info)
        {
            memcpy(info, &sigpend->info, sizeof(struct siginfo));
        }

        /* Then dispose of the pending signal structure properly */

        sig_releasependingsignal(sigpend);
        irqrestore(saved_state);

        /* The return value is the number of the signal that awakened us */

        ret = sigpend->info.si_signo;
    }

    /* We will have to wait for a signal to be posted to this task. */

    else
    {
        /* Save the set of pending signals to wait for */

        rtcb->sigwaitmask = *set;

        /* Check if we should wait for the timeout */

        if (timeout)
        {
            /* Convert the timespec to system clock ticks, making sure that
             * the resulting delay is greater than or equal to the requested
             * time in nanoseconds.
             */

#ifdef CONFIG_HAVE_LONG_LONG
            uint64_t waitticks64 = ((uint64_t)timeout->tv_sec * NSEC_PER_SEC +
                                    (uint64_t)timeout->tv_nsec + NSEC_PER_TICK - 1) /
                                   NSEC_PER_TICK;
            DEBUGASSERT(waitticks64 <= UINT32_MAX);
            waitticks = (uint32_t)waitticks64;
#else
            uint32_t waitmsec;

            DEBUGASSERT(timeout->tv_sec < UINT32_MAX / MSEC_PER_SEC);
            waitmsec = timeout->tv_sec * MSEC_PER_SEC +
                       (timeout->tv_nsec + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC;
            waitticks = MSEC2TICK(waitmsec);
#endif

            /* Create a watchdog */

            rtcb->waitdog = wd_create();
            DEBUGASSERT(rtcb->waitdog);

            if (rtcb->waitdog)
            {
                /* This little bit of nonsense is necessary for some
                 * processors where sizeof(pointer) < sizeof(uint32_t).
                 * see wdog.h.
                 */

                wdparm_t wdparm;
                wdparm.pvarg = (FAR void *)rtcb;

                /* Start the watchdog */

                wd_start(rtcb->waitdog, waitticks, (wdentry_t)sig_timeout, 1,
                         wdparm.dwarg);

                /* Now wait for either the signal or the watchdog */

                up_block_task(rtcb, TSTATE_WAIT_SIG);

                /* We no longer need the watchdog */

                wd_delete(rtcb->waitdog);
                rtcb->waitdog = NULL;
            }

            /* REVISIT: And do what if there are no watchdog timers?  The wait
             * will fail and we will return something bogus.
             */
        }

        /* No timeout, just wait */

        else
        {
            /* And wait until one of the unblocked signals is posted */

            up_block_task(rtcb, TSTATE_WAIT_SIG);
        }

        /* We are running again, clear the sigwaitmask */

        rtcb->sigwaitmask = NULL_SIGNAL_SET;

        /* When we awaken, the cause will be in the TCB.  Get the signal number
         * or timeout) that awakened us.
         */

        if (GOOD_SIGNO(rtcb->sigunbinfo.si_signo))
        {
            /* We were awakened by a signal... but is it one of the signals that
             * we were waiting for?
             */

            if (sigismember(set, rtcb->sigunbinfo.si_signo))
            {
                /* Yes.. the return value is the number of the signal that
                 * awakened us.
                 */

                ret = rtcb->sigunbinfo.si_signo;
            }
            else
            {
                /* No... then set EINTR and report an error */

                set_errno(EINTR);
                ret = ERROR;
            }
        }
        else
        {
            /* Otherwise, we must have been awakened by the timeout.  Set EGAIN
             * and return an error.
             */

            DEBUGASSERT(rtcb->sigunbinfo.si_signo == SIG_WAIT_TIMEOUT);
            set_errno(EAGAIN);
            ret = ERROR;
        }

        /* Return the signal info to the caller if so requested */

        if (info)
        {
            memcpy(info, &rtcb->sigunbinfo, sizeof(struct siginfo));
        }

        irqrestore(saved_state);
    }

    sched_unlock();
    return ret;
}
Example #6
0
int sem_wait(FAR sem_t *sem)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
  irqstate_t saved_state;
  int ret  = ERROR;

  /* This API should not be called from interrupt handlers */

  DEBUGASSERT(up_interrupt_context() == false)

  /* Assume any errors reported are due to invalid arguments. */

  errno = EINVAL;

  if (sem)
    {
      /* The following operations must be performed with interrupts
       * disabled because sem_post() may be called from an interrupt
       * handler.
       */

      saved_state = irqsave();

      /* Check if the lock is available */

      if (sem->semcount > 0)
        {
          /* It is, let the task take the semaphore. */

          sem->semcount--;
          sem_addholder(sem);
          rtcb->waitsem = NULL;
          ret = OK;
        }

      /* The semaphore is NOT available, We will have to block the
       * current thread of execution.
       */

      else
        {
          /* First, verify that the task is not already waiting on a
           * semaphore
           */

          ASSERT(rtcb->waitsem == NULL);

          /* Handle the POSIX semaphore (but don't set the owner yet) */

          sem->semcount--;

          /* Save the waited on semaphore in the TCB */

          rtcb->waitsem = sem;

          /* If priority inheritance is enabled, then check the priority of
           * the holder of the semaphore.
           */

#ifdef CONFIG_PRIORITY_INHERITANCE
          /* Disable context switching.  The following operations must be
           * atomic with regard to the scheduler.
           */

          sched_lock();

          /* Boost the priority of any threads holding a count on the
           * semaphore.
           */

          sem_boostpriority(sem);
#endif
          /* Add the TCB to the prioritized semaphore wait queue */

          errno = 0;
          up_block_task(rtcb, TSTATE_WAIT_SEM);

          /* When we resume at this point, either (1) the semaphore has been
           * assigned to this thread of execution, or (2) the semaphore wait
           * has been interrupted by a signal or a timeout.  We can detect these
           * latter cases be examining the errno value.
           *
           * In the event that the semaphore wait was interrupted by a signal or
           * a timeout, certain semaphore clean-up operations have already been
           * performed (see sem_waitirq.c).  Specifically:
           *
           * - sem_canceled() was called to restore the priority of all threads
           *   that hold a reference to the semaphore,
           * - The semaphore count was decremented, and
           * - tcb->waitsem was nullifed.
           *
           * It is necesaary to do these things in sem_waitirq.c because a long
           * time may elapse between the time that the signal was issued and
           * this thread is awakened and this leaves a door open to several
           * race conditions.
           */

          if (errno != EINTR && errno != ETIMEDOUT)
            {
              /* Not awakened by a signal or a timeout... We hold the semaphore */

              sem_addholder(sem);
              ret = OK;
            }

#ifdef CONFIG_PRIORITY_INHERITANCE
          sched_unlock();
#endif
        }

      /* Interrupts may now be enabled. */

      irqrestore(saved_state);
    }

  return ret;
}
Example #7
0
int mq_waitsend(mqd_t mqdes)
{
  FAR struct tcb_s *rtcb;
  FAR struct mqueue_inode_s *msgq;

  /* mq_waitsend() is not a cancellation point, but it is always called from
   * a cancellation point.
   */

  if (enter_cancellation_point())
    {
#ifndef CONFIG_CANCELLATION_POINTS /* Not reachable in this case */
      /* If there is a pending cancellation, then do not perform
       * the wait.  Exit now with ECANCELED.
       */

      set_errno(ECANCELED);
      leave_cancellation_point();
      return ERROR;
#endif
    }

  /* Get a pointer to the message queue */

  msgq = mqdes->msgq;

  /* Verify that the queue is indeed full as the caller thinks */

  if (msgq->nmsgs >= msgq->maxmsgs)
    {
      /* Should we block until there is sufficient space in the
       * message queue?
       */

      if ((mqdes->oflags & O_NONBLOCK) != 0)
        {
          /* No... We will return an error to the caller. */

          set_errno(EAGAIN);
          leave_cancellation_point();
          return ERROR;
        }

      /* Yes... We will not return control until the message queue is
       * available or we receive a signal or at timout occurs.
       */

      else
        {
          /* Loop until there are fewer than max allowable messages in the
           * receiving message queue
           */

          while (msgq->nmsgs >= msgq->maxmsgs)
            {
              /* Block until the message queue is no longer full.
               * When we are unblocked, we will try again
               */

              rtcb = this_task();
              rtcb->msgwaitq = msgq;
              msgq->nwaitnotfull++;

              set_errno(OK);
              up_block_task(rtcb, TSTATE_WAIT_MQNOTFULL);

              /* When we resume at this point, either (1) the message queue
               * is no longer empty, or (2) the wait has been interrupted by
               * a signal.  We can detect the latter case be examining the
               * errno value (should be EINTR or ETIMEOUT).
               */

              if (get_errno() != OK)
                {
                  leave_cancellation_point();
                  return ERROR;
                }
            }
        }
    }

  leave_cancellation_point();
  return OK;
}