Esempio n. 1
0
/**
 * @brief   Wakes up a thread.
 * @details The thread is inserted into the ready list or immediately made
 *          running depending on its relative priority compared to the current
 *          thread.
 * @pre     The thread must not be already inserted in any list through its
 *          @p p_next and @p p_prev or list corruption would occur.
 * @note    It is equivalent to a @p chSchReadyI() followed by a
 *          @p chSchRescheduleS() but much more efficient.
 * @note    The function assumes that the current thread has the highest
 *          priority.
 *
 * @param[in] ntp       the thread to be made ready
 * @param[in] msg       the wakeup message
 *
 * @sclass
 */
void chSchWakeupS(thread_t *ntp, msg_t msg) {

  chDbgCheckClassS();

  /* Storing the message to be retrieved by the target thread when it will
     restart execution.*/
  ntp->p_u.rdymsg = msg;

  /* If the waken thread has a not-greater priority than the current
     one then it is just inserted in the ready list else it made
     running immediately and the invoking thread goes in the ready
     list instead.*/
  if (ntp->p_prio <= currp->p_prio) {
    chSchReadyI(ntp);
  }
  else {
    thread_t *otp = chSchReadyI(currp);
    setcurrp(ntp);
#if defined(CH_CFG_IDLE_LEAVE_HOOK)
  if (otp->p_prio == IDLEPRIO) {
    CH_CFG_IDLE_LEAVE_HOOK();
  }
#endif
    ntp->p_state = CH_STATE_CURRENT;
    chSysSwitch(ntp, otp);
  }
}
Esempio n. 2
0
/**
 * @brief   Signals one thread that is waiting on the condition variable.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] cp        pointer to the @p CondVar structure
 *
 * @iclass
 */
void chCondSignalI(CondVar *cp) {

  chDbgCheck(cp != NULL, "chCondSignalI");

  if (notempty(&cp->c_queue))
    chSchReadyI(fifo_remove(&cp->c_queue))->p_u.rdymsg = RDY_OK;
}
Esempio n. 3
0
File: chmtx.c Progetto: Kreyl/Candle
/**
 * @brief   Unlocks all mutexes owned by the invoking thread.
 * @post    The stack of owned mutexes is emptied and all the found
 *          mutexes are unlocked.
 * @note    This function is <b>MUCH MORE</b> efficient than releasing the
 *          mutexes one by one and not just because the call overhead,
 *          this function does not have any overhead related to the priority
 *          inheritance mechanism.
 *
 * @api
 */
void chMtxUnlockAll(void) {
  thread_t *ctp = currp;

  chSysLock();
  if (ctp->p_mtxlist != NULL) {
    do {
      mutex_t *mp = ctp->p_mtxlist;
      ctp->p_mtxlist = mp->m_next;
      if (chMtxQueueNotEmptyS(mp)) {
#if CH_CFG_USE_MUTEXES_RECURSIVE == TRUE
        mp->m_cnt = (cnt_t)1;
#endif
        thread_t *tp = queue_fifo_remove(&mp->m_queue);
        mp->m_owner = tp;
        mp->m_next = tp->p_mtxlist;
        tp->p_mtxlist = mp;
        (void) chSchReadyI(tp);
      }
      else {
#if CH_CFG_USE_MUTEXES_RECURSIVE == TRUE
        mp->m_cnt = (cnt_t)0;
#endif
        mp->m_owner = NULL;
      }
    } while (ctp->p_mtxlist != NULL);
    ctp->p_prio = ctp->p_realprio;
    chSchRescheduleS();
  }
  chSysUnlock();
}
Esempio n. 4
0
/**
 * @brief   Unlocks all mutexes owned by the invoking thread.
 * @post    The stack of owned mutexes is emptied and all the found
 *          mutexes are unlocked.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel.
 * @note    This function is <b>MUCH MORE</b> efficient than releasing the
 *          mutexes one by one and not just because the call overhead,
 *          this function does not have any overhead related to the priority
 *          inheritance mechanism.
 *
 * @sclass
 */
void chMtxUnlockAllS(void) {
  thread_t *ctp = currp;

  while (ctp->mtxlist != NULL) {
    mutex_t *mp = ctp->mtxlist;
    ctp->mtxlist = mp->next;
    if (chMtxQueueNotEmptyS(mp)) {
#if CH_CFG_USE_MUTEXES_RECURSIVE == TRUE
      mp->cnt = (cnt_t)1;
#endif
      thread_t *tp = queue_fifo_remove(&mp->queue);
      mp->owner = tp;
      mp->next = tp->mtxlist;
      tp->mtxlist = mp;
      (void) chSchReadyI(tp);
    }
    else {
#if CH_CFG_USE_MUTEXES_RECURSIVE == TRUE
      mp->cnt = (cnt_t)0;
#endif
      mp->owner = NULL;
    }
  }
  ctp->prio = ctp->realprio;
}
Esempio n. 5
0
/**
 * @brief   Performs a reset operation on the semaphore.
 * @post    After invoking this function all the threads waiting on the
 *          semaphore, if any, are released and the semaphore counter is set
 *          to the specified, non negative, value.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] sp        pointer to a @p semaphore_t structure
 * @param[in] n         the new value of the semaphore counter. The value must
 *                      be non-negative.
 *
 * @iclass
 */
void chSemResetI(semaphore_t *sp, cnt_t n) {
  thread_t *tp;
  cnt_t cnt;

  chDbgCheckClassI();
  chDbgCheck((sp != NULL) && (n >= (cnt_t)0));

  cnt = sp->cnt;
  sp->cnt = n;
  tp = nil.threads;
  while (cnt < (cnt_t)0) {

    chDbgAssert(tp < &nil.threads[CH_CFG_NUM_THREADS],
                "pointer out of range");

    /* Is this thread waiting on this semaphore?*/
    if (tp->u1.semp == sp) {

      chDbgAssert(NIL_THD_IS_WTSEM(tp), "not waiting");

      cnt++;
      (void) chSchReadyI(tp, MSG_RESET);
    }
    tp++;
  }
}
Esempio n. 6
0
/**
 * @brief   Performs atomic signal and wait operations on two semaphores.
 * @pre     The configuration option @p CH_USE_SEMSW must be enabled in order
 *          to use this function.
 *
 * @param[in] sps       pointer to a @p Semaphore structure to be signaled
 * @param[in] spw       pointer to a @p Semaphore structure to be wait on
 * @return              A message specifying how the invoking thread has been
 *                      released from the semaphore.
 * @retval RDY_OK       if the thread has not stopped on the semaphore or the
 *                      semaphore has been signaled.
 * @retval RDY_RESET    if the semaphore has been reset using @p chSemReset().
 *
 * @api
 */
msg_t chSemSignalWait(Semaphore *sps, Semaphore *spw) {
  msg_t msg;

  chDbgCheck((sps != NULL) && (spw != NULL), "chSemSignalWait");

  chDbgAssert(((sps->s_cnt >= 0) && isempty(&sps->s_queue)) ||
              ((sps->s_cnt < 0) && notempty(&sps->s_queue)),
              "chSemSignalWait(), #1",
              "inconsistent semaphore");

  chDbgAssert(((spw->s_cnt >= 0) && isempty(&spw->s_queue)) ||
              ((spw->s_cnt < 0) && notempty(&spw->s_queue)),
              "chSemSignalWait(), #2",
              "inconsistent semaphore");

  chSysLock();
  if (++sps->s_cnt <= 0)
    chSchReadyI(fifo_remove(&sps->s_queue))->p_u.rdymsg = RDY_OK;
  if (--spw->s_cnt < 0) {
    Thread *ctp = currp;
    sem_insert(ctp, &spw->s_queue);
    ctp->p_u.wtobjp = spw;
    chSchGoSleepS(THD_STATE_WTSEM);
    msg = ctp->p_u.rdymsg;
  }
  else {
    chSchRescheduleS();
    msg = RDY_OK;
  }
  chSysUnlock();
  return msg;
}
Esempio n. 7
0
/**
 * @brief   Performs atomic signal and wait operations on two semaphores.
 *
 * @param[in] sps       pointer to a @p semaphore_t structure to be signaled
 * @param[in] spw       pointer to a @p semaphore_t structure to wait on
 * @return              A message specifying how the invoking thread has been
 *                      released from the semaphore.
 * @retval MSG_OK       if the thread has not stopped on the semaphore or the
 *                      semaphore has been signaled.
 * @retval MSG_RESET    if the semaphore has been reset using @p chSemReset().
 *
 * @api
 */
msg_t chSemSignalWait(semaphore_t *sps, semaphore_t *spw) {
  msg_t msg;

  chDbgCheck((sps != NULL) && (spw != NULL));
  chDbgAssert(((sps->s_cnt >= (cnt_t)0) && queue_isempty(&sps->s_queue)) ||
              ((sps->s_cnt < (cnt_t)0) && queue_notempty(&sps->s_queue)),
              "inconsistent semaphore");
  chDbgAssert(((spw->s_cnt >= (cnt_t)0) && queue_isempty(&spw->s_queue)) ||
              ((spw->s_cnt < (cnt_t)0) && queue_notempty(&spw->s_queue)),
              "inconsistent semaphore");

  chSysLock();
  if (++sps->s_cnt <= (cnt_t)0) {
    chSchReadyI(queue_fifo_remove(&sps->s_queue))->p_u.rdymsg = MSG_OK;
  }
  if (--spw->s_cnt < (cnt_t)0) {
    thread_t *ctp = currp;
    sem_insert(ctp, &spw->s_queue);
    ctp->p_u.wtsemp = spw;
    chSchGoSleepS(CH_STATE_WTSEM);
    msg = ctp->p_u.rdymsg;
  }
  else {
    chSchRescheduleS();
    msg = MSG_OK;
  }
  chSysUnlock();

  return msg;
}
Esempio n. 8
0
/**
 * @brief   Unlocks the specified mutex.
 * @note    Mutexes must be unlocked in reverse lock order. Violating this
 *          rules will result in a panic if assertions are enabled.
 * @pre     The invoking thread <b>must</b> have at least one owned mutex.
 * @post    The mutex is unlocked and removed from the per-thread stack of
 *          owned mutexes.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel.
 *
 * @param[in] mp        pointer to the @p mutex_t structure
 *
 * @sclass
 */
void chMtxUnlockS(mutex_t *mp) {
    thread_t *ctp = currp;
    mutex_t *lmp;

    chDbgCheckClassS();
    chDbgCheck(mp != NULL);

    chDbgAssert(ctp->p_mtxlist != NULL, "owned mutexes list empty");
    chDbgAssert(ctp->p_mtxlist->m_owner == ctp, "ownership failure");
#if CH_CFG_USE_MUTEXES_RECURSIVE
    chDbgAssert(mp->m_cnt >= 1, "counter is not positive");

    if (--mp->m_cnt == 0) {
#endif

        chDbgAssert(ctp->p_mtxlist == mp, "not next in list");

        /* Removes the top mutex from the thread's owned mutexes list and marks
           it as not owned. Note, it is assumed to be the same mutex passed as
           parameter of this function.*/
        ctp->p_mtxlist = mp->m_next;

        /* If a thread is waiting on the mutex then the fun part begins.*/
        if (chMtxQueueNotEmptyS(mp)) {
            thread_t *tp;

            /* Recalculates the optimal thread priority by scanning the owned
               mutexes list.*/
            tprio_t newprio = ctp->p_realprio;
            lmp = ctp->p_mtxlist;
            while (lmp != NULL) {
                /* If the highest priority thread waiting in the mutexes list has a
                   greater priority than the current thread base priority then the
                   final priority will have at least that priority.*/
                if (chMtxQueueNotEmptyS(lmp) && (lmp->m_queue.p_next->p_prio > newprio))
                    newprio = lmp->m_queue.p_next->p_prio;
                lmp = lmp->m_next;
            }

            /* Assigns to the current thread the highest priority among all the
               waiting threads.*/
            ctp->p_prio = newprio;

            /* Awakens the highest priority thread waiting for the unlocked mutex and
               assigns the mutex to it.*/
#if CH_CFG_USE_MUTEXES_RECURSIVE
            mp->m_cnt = 1;
#endif
            tp = queue_fifo_remove(&mp->m_queue);
            mp->m_owner = tp;
            mp->m_next = tp->p_mtxlist;
            tp->p_mtxlist = mp;
            chSchReadyI(tp);
        }
        else
            mp->m_owner = NULL;
#if CH_CFG_USE_MUTEXES_RECURSIVE
    }
#endif
}
Esempio n. 9
0
/*
 * Timeout wakeup callback.
 */
static void wakeup(void *p) {
  Thread *tp = (Thread *)p;

  chSysLockFromIsr();
  switch (tp->p_state) {
  case THD_STATE_READY:
    /* Handling the special case where the thread has been made ready by
       another thread with higher priority.*/
    chSysUnlockFromIsr();
    return;
#if CH_USE_SEMAPHORES || CH_USE_QUEUES ||                                   \
    (CH_USE_CONDVARS && CH_USE_CONDVARS_TIMEOUT)
#if CH_USE_SEMAPHORES
  case THD_STATE_WTSEM:
    chSemFastSignalI((Semaphore *)tp->p_u.wtobjp);
    /* Falls into, intentional. */
#endif
#if CH_USE_QUEUES
  case THD_STATE_WTQUEUE:
#endif
#if CH_USE_CONDVARS && CH_USE_CONDVARS_TIMEOUT
  case THD_STATE_WTCOND:
#endif
    /* States requiring dequeuing.*/
    dequeue(tp);
#endif
  }
  tp->p_u.rdymsg = RDY_TIMEOUT;
  chSchReadyI(tp);
  chSysUnlockFromIsr();
}
Esempio n. 10
0
/**
 * @brief   Reads from a dedicated packet buffer.
 *
 * @param[in] udp       pointer to a @p stm32_usb_descriptor_t
 * @param[in] iqp       pointer to an @p InputQueue object
 * @param[in] n         maximum number of bytes to copy. This value must
 *                      not exceed the maximum packet size for this endpoint.
 *
 * @notapi
 */
static void usb_packet_read_to_queue(stm32_usb_descriptor_t *udp,
                                     InputQueue *iqp, size_t n) {
  size_t nhw;
  uint32_t *pmap= USB_ADDR2PTR(udp->RXADDR0);

  nhw = n / 2;
  while (nhw > 0) {
    uint32_t w;

    w = *pmap++;
    *iqp->q_wrptr++ = (uint8_t)w;
    if (iqp->q_wrptr >= iqp->q_top)
      iqp->q_wrptr = iqp->q_buffer;
    *iqp->q_wrptr++ = (uint8_t)(w >> 8);
    if (iqp->q_wrptr >= iqp->q_top)
      iqp->q_wrptr = iqp->q_buffer;
    nhw--;
  }
  /* Last byte for odd numbers.*/
  if ((n & 1) != 0) {
    *iqp->q_wrptr++ = (uint8_t)*pmap;
    if (iqp->q_wrptr >= iqp->q_top)
      iqp->q_wrptr = iqp->q_buffer;
  }

  /* Updating queue.*/
  chSysLockFromIsr();
  iqp->q_counter += n;
  while (notempty(&iqp->q_waiting))
    chSchReadyI(fifo_remove(&iqp->q_waiting))->p_u.rdymsg = Q_OK;
  chSysUnlockFromIsr();
}
Esempio n. 11
0
/*
 * Timeout wakeup callback.
 */
static void wakeup(void *p) {
  thread_t *tp = (thread_t *)p;

  chSysLockFromISR();
  switch (tp->p_state) {
  case CH_STATE_READY:
    /* Handling the special case where the thread has been made ready by
       another thread with higher priority.*/
    chSysUnlockFromISR();
    return;
  case CH_STATE_SUSPENDED:
    *(thread_reference_t *)tp->p_u.wtobjp = NULL;
    break;
#if CH_CFG_USE_SEMAPHORES
  case CH_STATE_WTSEM:
    chSemFastSignalI((semaphore_t *)tp->p_u.wtobjp);
    /* Falls into, intentional. */
#endif
#if CH_CFG_USE_CONDVARS && CH_CFG_USE_CONDVARS_TIMEOUT
  case CH_STATE_WTCOND:
#endif
  case CH_STATE_QUEUED:
    /* States requiring dequeuing.*/
    queue_dequeue(tp);
  }
  tp->p_u.rdymsg = MSG_TIMEOUT;
  chSchReadyI(tp);
  chSysUnlockFromISR();
}
Esempio n. 12
0
/**
 * @brief   Locks the specified mutex.
 *
 * @param[in] mp        pointer to the @p Mutex structure
 */
void chMtxLockS(Mutex *mp) {
  Thread *ctp = currp;

  chDbgCheck(mp != NULL, "chMtxLockS");

  /* Ia the mutex already locked? */
  if (mp->m_owner != NULL) {
    /* Priority inheritance protocol; explores the thread-mutex dependencies
       boosting the priority of all the affected threads to equal the priority
       of the running thread requesting the mutex.*/
    Thread *tp = mp->m_owner;
    /* Does the running thread have higher priority than the mutex
       ownning thread? */
    while (tp->p_prio < ctp->p_prio) {
      /* Make priority of thread tp match the running thread's priority.*/
      tp->p_prio = ctp->p_prio;
      /* The following states need priority queues reordering.*/
      switch (tp->p_state) {
      case THD_STATE_WTMTX:
        /* Re-enqueues the mutex owner with its new priority.*/
        prio_insert(dequeue(tp), (ThreadsQueue *)tp->p_u.wtobjp);
        tp = ((Mutex *)tp->p_u.wtobjp)->m_owner;
        continue;
#if CH_USE_CONDVARS | CH_USE_SEMAPHORES_PRIORITY | CH_USE_MESSAGES_PRIORITY
#if CH_USE_CONDVARS
      case THD_STATE_WTCOND:
#endif
#if CH_USE_SEMAPHORES_PRIORITY
      case THD_STATE_WTSEM:
#endif
#if CH_USE_MESSAGES_PRIORITY
      case THD_STATE_SNDMSG:
#endif
        /* Re-enqueues tp with its new priority on the queue.*/
        prio_insert(dequeue(tp), (ThreadsQueue *)tp->p_u.wtobjp);
        break;
#endif
      case THD_STATE_READY:
        /* Re-enqueues  tp with its new priority on the ready list.*/
        chSchReadyI(dequeue(tp));
      }
      break;
    }
    /* Sleep on the mutex.*/
    prio_insert(ctp, &mp->m_queue);
    ctp->p_u.wtobjp = mp;
    chSchGoSleepS(THD_STATE_WTMTX);
    /* It is assumed that the thread performing the unlock operation assigns
       the mutex to this thread.*/
    chDbgAssert(mp->m_owner == ctp, "chMtxLockS(), #1", "not owner");
    chDbgAssert(ctp->p_mtxlist == mp, "chMtxLockS(), #2", "not owned");
  }
  else {
    /* It was not owned, inserted in the owned mutexes list.*/
    mp->m_owner = ctp;
    mp->m_next = ctp->p_mtxlist;
    ctp->p_mtxlist = mp;
  }
}
Esempio n. 13
0
/**
 * @brief   Adds a set of event flags directly to the specified @p thread_t.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] tp        the thread to be signaled
 * @param[in] mask      the event flags set to be ORed
 *
 * @iclass
 */
void chEvtSignalI(thread_t *tp, eventmask_t mask) {

  tp->epmask |= mask;
  if (NIL_THD_IS_WTOREVT(tp) &&
      ((tp->epmask & tp->u1.ewmask) != (eventmask_t)0)) {
    (void) chSchReadyI(tp, MSG_OK);
  }
}
Esempio n. 14
0
void chSchWakeupS(Thread *ntp, msg_t msg) {

  chDbgCheckClassS();

  ntp->p_u.rdymsg = msg;
  /* If the waken thread has a not-greater priority than the current
     one then it is just inserted in the ready list else it made
     running immediately and the invoking thread goes in the ready
     list instead.*/
  if (ntp->p_prio <= currp->p_prio)
    chSchReadyI(ntp);
  else {
    Thread *otp = chSchReadyI(currp);
    setcurrp(ntp);
    ntp->p_state = THD_STATE_CURRENT;
    chSysSwitch(ntp, otp);
  }
}
Esempio n. 15
0
/**
 * @brief   Handling of stalled I2C transactions.
 *
 * @param[in] i2cp      pointer to the @p I2CDriver object
 *
 * @notapi
 */
static void i2c_lld_safety_timeout(void *p) {
  I2CDriver *i2cp = (I2CDriver *)p;

  if (i2cp->thread) {
    i2c_lld_abort_operation(i2cp);
    i2cp->thread->p_u.rdymsg = RDY_TIMEOUT;
    chSchReadyI(i2cp->thread);
  }
}
Esempio n. 16
0
/**
 * @brief   Resets an output queue.
 * @details All the data in the output queue is erased and lost, any waiting
 *          thread is resumed with status @p Q_RESET.
 * @note    A reset operation can be used by a low level driver in order to
 *          obtain immediate attention from the high level layers.
 *
 * @param[in] oqp       pointer to an @p OutputQueue structure
 *
 * @iclass
 */
void chOQResetI(OutputQueue *oqp) {

    chDbgCheckClassI();

    oqp->q_rdptr = oqp->q_wrptr = oqp->q_buffer;
    oqp->q_counter = chQSizeI(oqp);
    while (notempty(&oqp->q_waiting))
        chSchReadyI(fifo_remove(&oqp->q_waiting))->p_u.rdymsg = Q_RESET;
}
Esempio n. 17
0
/**
 * @brief   Resets an input queue.
 * @details All the data in the input queue is erased and lost, any waiting
 *          thread is resumed with status @p Q_RESET.
 * @note    A reset operation can be used by a low level driver in order to
 *          obtain immediate attention from the high level layers.
 *
 * @param[in] iqp       pointer to an @p InputQueue structure
 *
 * @iclass
 */
void chIQResetI(InputQueue *iqp) {

    chDbgCheckClassI();

    iqp->q_rdptr = iqp->q_wrptr = iqp->q_buffer;
    iqp->q_counter = 0;
    while (notempty(&iqp->q_waiting))
        chSchReadyI(fifo_remove(&iqp->q_waiting))->p_u.rdymsg = Q_RESET;
}
Esempio n. 18
0
/**
 * @brief   Change thread priority.
 * @note    This can interfere with the priority inheritance mechanism.
 */
osStatus osThreadSetPriority(osThreadId thread_id, osPriority newprio) {
  osPriority oldprio;
  thread_t * tp = (thread_t *)thread_id;

  chSysLock();

  /* Changing priority.*/
#if CH_CFG_USE_MUTEXES
  oldprio = (osPriority)tp->p_realprio;
  if ((tp->p_prio == tp->p_realprio) || ((tprio_t)newprio > tp->p_prio))
    tp->p_prio = (tprio_t)newprio;
  tp->p_realprio = (tprio_t)newprio;
#else
  oldprio = tp->p_prio;
  tp->p_prio = (tprio_t)newprio;
#endif

  /* The following states need priority queues reordering.*/
  switch (tp->p_state) {
#if CH_CFG_USE_MUTEXES |                                                    \
    CH_CFG_USE_CONDVARS |                                                   \
    (CH_CFG_USE_SEMAPHORES && CH_CFG_USE_SEMAPHORES_PRIORITY) |             \
    (CH_CFG_USE_MESSAGES && CH_CFG_USE_MESSAGES_PRIORITY)
#if CH_CFG_USE_MUTEXES
  case CH_STATE_WTMTX:
#endif
#if CH_CFG_USE_CONDVARS
  case CH_STATE_WTCOND:
#endif
#if CH_CFG_USE_SEMAPHORES && CH_CFG_USE_SEMAPHORES_PRIORITY
  case CH_STATE_WTSEM:
#endif
#if CH_CFG_USE_MESSAGES && CH_CFG_USE_MESSAGES_PRIORITY
  case CH_STATE_SNDMSGQ:
#endif
    /* Re-enqueues tp with its new priority on the queue.*/
    queue_prio_insert(queue_dequeue(tp),
                      (threads_queue_t *)tp->p_u.wtobjp);
    break;
#endif
  case CH_STATE_READY:
#if CH_DBG_ENABLE_ASSERTS
    /* Prevents an assertion in chSchReadyI().*/
    tp->p_state = CH_STATE_CURRENT;
#endif
    /* Re-enqueues tp with its new priority on the ready list.*/
    chSchReadyI(queue_dequeue(tp));
    break;
  }

  /* Rescheduling.*/
  chSchRescheduleS();

  chSysUnlock();

  return oldprio;
}
Esempio n. 19
0
/**
 * @brief   Signals all threads that are waiting on the condition variable.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] cp        pointer to the @p CondVar structure
 *
 * @iclass
 */
void chCondBroadcastI(CondVar *cp) {

  chDbgCheck(cp != NULL, "chCondBroadcastI");

  /* Empties the condition variable queue and inserts all the Threads into the
     ready list in FIFO order. The wakeup message is set to @p RDY_RESET in
     order to make a chCondBroadcast() detectable from a chCondSignal().*/
  while (cp->c_queue.p_next != (void *)&cp->c_queue)
    chSchReadyI(fifo_remove(&cp->c_queue))->p_u.rdymsg = RDY_RESET;
}
bool ThreadWait::wake_from_interrupt(const int value) {
	if( thread_to_wake ) {
		thread_to_wake->p_u.rdymsg = value;
		chSchReadyI(thread_to_wake);
		thread_to_wake = nullptr;
		return true;
	} else {
		return false;
	}
}
Esempio n. 21
0
// =============================== I2C =========================================
void i2cDmaIrqHandler(void *p, uint32_t flags) {
    chSysLockFromIsr();
    //Uart.Printf("===T===");
    Thread *PThd = ((i2c_t*)p)->PRequestingThread;
    if (PThd != NULL) {
        ((i2c_t*)p)->PRequestingThread = NULL;
        chSchReadyI(PThd);
    }
    chSysUnlockFromIsr();
}
Esempio n. 22
0
void AdcTxIrq(void *p, uint32_t flags) {
    dmaStreamDisable(ADC_DMA);
    // Resume thread if any
    chSysLockFromIsr();
    if(PAdcThread != NULL) {
        if(PAdcThread->p_state == THD_STATE_SUSPENDED) chSchReadyI(PAdcThread);
        PAdcThread = NULL;
    }
    chSysUnlockFromIsr();
}
Esempio n. 23
0
//-----------------------------------------------------------------------------
void
kbed_dataReadyI(void)
{
	if (eDisplayThreadForSleep)
	{
		eDisplayThreadForSleep->p_u.rdymsg = (msg_t)1;
		chSchReadyI(eDisplayThreadForSleep);
		eDisplayThreadForSleep = NULL;
	}
}
Esempio n. 24
0
void chSchDoRescheduleBehind(void) {
  Thread *otp;

  otp = currp;
  /* Picks the first thread from the ready queue and makes it current.*/
  setcurrp(fifo_remove(&rlist.r_queue));
  currp->p_state = THD_STATE_CURRENT;
  otp->p_preempt = CH_TIME_QUANTUM;
  chSchReadyI(otp);
  chSysSwitch(currp, otp);
}
Esempio n. 25
0
void chSchWakeupS(Thread *ntp, msg_t msg) {

  ntp->p_u.rdymsg = msg;
  /* If the waken thread has a not-greater priority than the current
     one then it is just inserted in the ready list else it made
     running immediately and the invoking thread goes in the ready
     list instead.*/
  if (ntp->p_prio <= currp->p_prio)
    chSchReadyI(ntp);
  else {
    Thread *otp = chSchReadyI(currp);
#if CH_TIME_QUANTUM > 0
    rlist.r_preempt = CH_TIME_QUANTUM;
#endif
    setcurrp(ntp);
    ntp->p_state = THD_STATE_CURRENT;
    chDbgTrace(otp);
    chSysSwitchI(ntp, otp);
  }
}
Esempio n. 26
0
/**
 * @brief   Signals all threads that are waiting on the condition variable.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] cp        pointer to the @p condition_variable_t structure
 *
 * @iclass
 */
void chCondBroadcastI(condition_variable_t *cp) {

  chDbgCheckClassI();
  chDbgCheck(cp != NULL);

  /* Empties the condition variable queue and inserts all the threads into the
     ready list in FIFO order. The wakeup message is set to @p MSG_RESET in
     order to make a chCondBroadcast() detectable from a chCondSignal().*/
  while (cp->c_queue.p_next != (void *)&cp->c_queue)
    chSchReadyI(queue_fifo_remove(&cp->c_queue))->p_u.rdymsg = MSG_RESET;
}
Esempio n. 27
0
/**
 * @brief   Wakes up a thread waiting on a thread reference object.
 * @note    This function must not reschedule because it can be called from
 *          ISR context.
 *
 * @param[in] trp       a pointer to a thread reference object
 * @param[in] msg       the message code
 *
 * @iclass
 */
void chThdResumeI(thread_reference_t *trp, msg_t msg) {

  if (*trp != NULL) {
    thread_reference_t tr = *trp;

    chDbgAssert(NIL_THD_IS_SUSP(tr), "not suspended");

    *trp = NULL;
    (void) chSchReadyI(tr, msg);
  }
}
Esempio n. 28
0
/**
 * @brief   Signals one thread that is waiting on the condition variable.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] cp        pointer to the @p condition_variable_t structure
 *
 * @iclass
 */
void chCondSignalI(condition_variable_t *cp) {

  chDbgCheckClassI();
  chDbgCheck(cp != NULL);

  if (queue_notempty(&cp->c_queue)) {
    thread_t *tp = queue_fifo_remove(&cp->c_queue);
    tp->p_u.rdymsg = MSG_OK;
    chSchReadyI(tp);
  }
}
Esempio n. 29
0
/**
 * @brief   Adds a set of event flags directly to the specified @p thread_t.
 * @post    This function does not reschedule so a call to a rescheduling
 *          function must be performed before unlocking the kernel. Note that
 *          interrupt handlers always reschedule on exit so an explicit
 *          reschedule must not be performed in ISRs.
 *
 * @param[in] tp        the thread to be signaled
 * @param[in] mask      the event flags set to be ORed
 *
 * @iclass
 */
void chEvtSignalI(thread_t *tp, eventmask_t mask) {

  chDbgCheckClassI();
  chDbgCheck(tp != NULL);

  tp->epmask |= mask;
  if (NIL_THD_IS_WTOREVT(tp) &&
      ((tp->epmask & tp->u1.ewmask) != (eventmask_t)0)) {
    (void) chSchReadyI(tp, MSG_OK);
  }
}
Esempio n. 30
0
/**
 * @brief   Wakes up a thread waiting on a thread reference object.
 * @note    This function must not reschedule because it can be called from
 *          ISR context.
 *
 * @param[in] trp       a pointer to a thread reference object
 * @param[in] msg       the message code
 *
 * @iclass
 */
void chThdResumeI(thread_reference_t *trp, msg_t msg) {

  if (*trp != NULL) {
    thread_t *tp = *trp;

    chDbgAssert(tp->state == CH_STATE_SUSPENDED, "not CH_STATE_SUSPENDED");

    *trp = NULL;
    tp->u.rdymsg = msg;
    (void) chSchReadyI(tp);
  }
}