void QF_tick(void) { /* see NOTE01 */ #else void QF_tick(void const *sender) { #endif QTimeEvt *t; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_TICK, (void *)0, (void *)0) QS_TEC_((QTimeEvtCtr)(++QS_tickCtr_)); /* the tick counter */ QS_END_NOCRIT_() t = QF_timeEvtListHead_; while (t != (QTimeEvt *)0) { --t->ctr; if (t->ctr == (QTimeEvtCtr)0) { /* is time evt about to expire? */ if (t->interval != (QTimeEvtCtr)0) { /* is it periodic timeout? */ t->ctr = t->interval; /* rearm the time event */ } else { /* one-shot timeout, disarm by removing it from the list */ if (t == QF_timeEvtListHead_) { QF_timeEvtListHead_ = t->next; } else { if (t->next != (QTimeEvt *)0) { /* not the last event? */ t->next->prev = t->prev; } t->prev->next = t->next; } t->prev = (QTimeEvt *)0; /* mark the event disarmed */ QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_AUTO_DISARM, QS_teObj_, t) QS_OBJ_(t); /* this time event object */ QS_OBJ_(t->act); /* the active object */ QS_END_NOCRIT_() } QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_POST, QS_teObj_, t) QS_TIME_(); /* timestamp */ QS_OBJ_(t); /* the time event object */ QS_SIG_(t->super.sig); /* signal of this time event */ QS_OBJ_(t->act); /* the active object */ QS_END_NOCRIT_() QF_CRIT_EXIT_();/* exit crit. section before calling QF service */ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(t->act, &t->super, sender); } else { static uint8_t volatile dummy; QF_CRIT_EXIT_(); dummy = (uint8_t)0; /* execute a few instructions, see NOTE02 */ } QF_CRIT_ENTRY_(); /* enter crit. section again to advance the link */ t = t->next; }
/*..........................................................................*/ void QTimeEvt_arm_(QTimeEvt *me, QActive *act, QTimeEvtCtr nTicks) { QF_CRIT_STAT_ Q_REQUIRE((nTicks > (QTimeEvtCtr)0) /* cannot arm a timer with 0 ticks */ && (me->super.sig >= (QSignal)Q_USER_SIG) /* valid signal */ && (me->prev == (QTimeEvt *)0) /* time evt must NOT be used */ && (act != (QActive *)0)); /* active object must be provided */ me->ctr = nTicks; me->prev = me; /* mark the timer in use */ me->act = act; QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_ARM, QS_teObj_, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(act); /* the active object */ QS_TEC_(nTicks); /* the number of ticks */ QS_TEC_(me->interval); /* the interval */ QS_END_NOCRIT_() me->next = QF_timeEvtListHead_; if (QF_timeEvtListHead_ != (QTimeEvt *)0) { QF_timeEvtListHead_->prev = me; } QF_timeEvtListHead_ = me; QF_CRIT_EXIT_(); }
/*..........................................................................*/ uint8_t QActive_recall(QActive *me, QEQueue *eq) { QEvent const *e = QEQueue_get(eq); /* get an event from deferred queue */ uint8_t recalled; if (e != (QEvent const *)0) { /* event available? */ QF_CRIT_STAT_ QActive_postLIFO(me, e); /* post it to the front of the AO's queue */ QF_CRIT_ENTRY_(); if (QF_EVT_POOL_ID_(e) != (uint8_t)0) { /* is it a dynamic event? */ /* after posting to the AO's queue the event must be referenced * at least twice: once in the deferred event queue (eq->get() * did NOT decrement the reference counter) and once in the * AO's event queue. */ Q_ASSERT(QF_EVT_REF_CTR_(e) > (uint8_t)1); /* we need to decrement the reference counter once, to account * for removing the event from the deferred event queue. */ QF_EVT_REF_CTR_DEC_(e); /* decrement the reference counter */ } QF_CRIT_EXIT_(); recalled = (uint8_t)1; } else { recalled = (uint8_t)0; } return recalled; }
//**************************************************************************** // @description // Starts execution of the AO and registers the AO with the framework. // // @param[in] prio priority at which to start the active object // @param[in] qSto pointer to the storage for the ring buffer of the // event queue (used only with the built-in QP::QEQueue) // @param[in] qLen length of the event queue (in events) // @param[in] stkSto pointer to the stack storage (used only when // per-AO stack is needed) // @param[in] stkSize stack size (in bytes) // @param[in] ie pointer to the optional initialization event // (might be NULL). // void QMActive::start(uint_fast8_t const prio, QEvt const *qSto[], uint_fast16_t const qLen, void * const stkSto, uint_fast16_t const stkSize, QEvt const * const ie) { Q_REQUIRE_ID(200, (!QXK_ISR_CONTEXT_()) /* don't start AO's in an ISR! */ && (prio <= (uint_fast8_t)QF_MAX_ACTIVE) && (qSto != static_cast<QEvt const **>(0)) && (qLen != static_cast<uint_fast16_t>(0)) && (stkSto != static_cast<void *>(0)) && (stkSize != static_cast<uint_fast16_t>(0))); m_eQueue.init(qSto, qLen); // initialize QEQueue of this AO // initialize the stack of the private thread QXK_stackInit_(this, static_cast<QXThreadHandler>(&thread_ao), stkSto, stkSize); m_prio = prio; // set the QF priority of this AO QF::add_(this); // make QF aware of this AO this->init(ie); // take the top-most initial tran. (virtual) QS_FLUSH(); // flush the trace buffer to the host QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QXK_attr_.readySet.insert(m_prio); if (QXK_attr_.curr != static_cast<QMActive *>(0)) { // is QXK running? QXK_sched_(); } QF_CRIT_EXIT_(); }
bool GuiQMActive::post_(QEvt const * const e, uint_fast16_t const /*margin*/, void const * const sender) #endif { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_FIFO, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_OBJ_(sender); // the sender object QS_SIG_(e->sig); // the signal of the event QS_OBJ_(this); // this active object QS_2U8_(QF_EVT_POOL_ID_(e), /* the poolID of the event */ QF_EVT_REF_CTR_(e)); // the ref Ctr of the event QS_EQC_(0); // number of free entries (not used) QS_EQC_(0); // min number of free entries (not used) QS_END_NOCRIT_() // is it a dynamic event? if (QF_EVT_POOL_ID_(e) != static_cast<uint8_t>(0)) { QF_EVT_REF_CTR_INC_(e); // increment the reference counter } QF_CRIT_EXIT_(); // QCoreApplication::postEvent() is thread-safe per Qt documentation QCoreApplication::postEvent(QApplication::instance(), new QP_Event(e)); return true; }
/*..........................................................................*/ uint8_t QTimeEvt_rearm(QTimeEvt * const me, QTimeEvtCtr const nTicks) { uint8_t isArmed; QF_CRIT_STAT_ Q_REQUIRE((nTicks != (QTimeEvtCtr)0) /* cannot arm a timer with 0 ticks */ && (me->act != (QActive *)0) /* active object must be valid */ && (me->super.sig >= (QSignal)Q_USER_SIG)); /* valid signal */ QF_CRIT_ENTRY_(); if (me->ctr == (QTimeEvtCtr)0) { /* is the time evt disarmed? */ isArmed = (uint8_t)0; if (QF_EVT_REF_CTR_(&me->super) == (uint8_t)0) { /* not linked? */ me->next = QF_timeEvtListHead_; QF_timeEvtListHead_ = me; QF_EVT_REF_CTR_INC_(&me->super); /* mark as linked */ } } else { /* the time event is armed */ isArmed = (uint8_t)1; } me->ctr = nTicks; /* re-load the tick counter (shift the phasing) */ QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_REARM, QS_teObj_, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(me->act); /* the active object */ QS_TEC_(me->ctr); /* the number of ticks */ QS_TEC_(me->interval); /* the interval */ QS_U8_(isArmed); /* was the timer armed? */ QS_END_NOCRIT_() QF_CRIT_EXIT_(); return isArmed; }
/*..........................................................................*/ void QActive_unsubscribeAll(QActive const *me) { uint8_t p = me->prio; uint8_t i; QSignal sig; Q_REQUIRE(((uint8_t)0 < p) && (p <= (uint8_t)QF_MAX_ACTIVE) && (QF_active_[p] == me)); i = QF_div8Lkup[p]; for (sig = (QSignal)Q_USER_SIG; sig < QF_maxSignal_; ++sig) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if ((QF_PTR_AT_(QF_subscrList_, sig).bits[i] & Q_ROM_BYTE(QF_pwr2Lkup[p])) != (uint8_t)0) { QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_UNSUBSCRIBE, QS_aoObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_(sig); /* the signal of this event */ QS_OBJ_(me); /* this active object */ QS_END_NOCRIT_() /* clear the priority bit */ QF_PTR_AT_(QF_subscrList_, sig).bits[i] &= Q_ROM_BYTE(QF_invPwr2Lkup[p]); } QF_CRIT_EXIT_(); } }
//**************************************************************************** void QXThread::start(uint_fast8_t const prio, QEvt const *qSto[], uint_fast16_t const qLen, void * const stkSto, uint_fast16_t const stkSize, QEvt const * const /*ie*/) { QF_CRIT_STAT_ Q_REQUIRE_ID(300, (!QXK_ISR_CONTEXT_()) /* don't start AO's in an ISR! */ && (prio <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) && (stkSto != static_cast<void *>(0)) && (stkSize != static_cast<uint_fast16_t>(0)) && (m_state.act == static_cast<QActionHandler>(0))); // is storage for the queue buffer provided? if (qSto != static_cast<QEvt const **>(0)) { m_eQueue.init(qSto, qLen); } // "naked" threads provide their thread function in place of // the top-most initial transition 'me->super.temp.act' QXK_stackInit_(this, reinterpret_cast<QXThreadHandler>(m_temp.act), stkSto, stkSize); m_prio = prio; QF::add_(this); // make QF aware of this naked thread QF_CRIT_ENTRY_(); QXK_attr_.readySet.insert(m_prio); // is QXK running? if (QXK_attr_.curr != static_cast<QMActive *>(0)) { QXK_sched_(); } QF_CRIT_EXIT_(); }
//**************************************************************************** /// @description /// This function is part of the Publish-Subscribe event delivery mechanism /// available in QF. Un-subscribing from all events means that the framework /// will stop posting any published events to the event queue of the active /// object. /// /// @note Due to the latency of event queues, an active object should NOT /// assume that no events will ever be dispatched to the state machine of /// the active object after un-subscribing from all events. /// The events might be already in the queue, or just about to be posted /// and the un-subscribe operation will not flush such events. Also, the /// alternative event-delivery mechanisms, such as direct event posting or /// time events, can be still delivered to the event queue of the active /// object. /// /// @sa QP::QF::publish_(), QP::QMActive::subscribe(), and /// QP::QMActive::unsubscribe() /// void QMActive::unsubscribeAll(void) const { uint_fast8_t const p = m_prio; Q_REQUIRE_ID(500, (static_cast<uint_fast8_t>(0) < p) && (p <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) && (QF::active_[p] == this)); uint_fast8_t const i = static_cast<uint_fast8_t>(QF_div8Lkup[p]); enum_t sig; for (sig = Q_USER_SIG; sig < QF_maxSignal_; ++sig) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if ((QF_PTR_AT_(QF_subscrList_, sig).m_bits[i] & QF_pwr2Lkup[p]) != static_cast<uint8_t>(0)) { QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_UNSUBSCRIBE, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_SIG_(sig); // the signal of this event QS_OBJ_(this); // this active object QS_END_NOCRIT_() // clear the priority bit QF_PTR_AT_(QF_subscrList_, sig).m_bits[i] &= QF_invPwr2Lkup[p]; } QF_CRIT_EXIT_(); } }
/* NOTE: disarm a timer (no harm in disarming an already disarmed timer) */ uint8_t QTimeEvt_disarm(QTimeEvt * const me) { uint8_t wasArmed; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if (me->ctr != (QTimeEvtCtr)0) { /* is the time evt running? */ wasArmed = (uint8_t)1; QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_DISARM, QS_priv_.teObjFilter, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(me->act); /* the target AO */ QS_TEC_(me->ctr); /* the number of ticks */ QS_TEC_(me->interval); /* the interval */ QS_U8_((uint8_t)(me->super.refCtr_ & (uint8_t)0x7F));/*tick rate*/ QS_END_NOCRIT_() me->ctr = (QTimeEvtCtr)0; /* schedule removal from the list */ } else { /* the time event was already not running */ wasArmed = (uint8_t)0; QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_DISARM_ATTEMPT, QS_priv_.teObjFilter, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(me->act); /* the target AO */ QS_U8_((uint8_t)(me->super.refCtr_ & (uint8_t)0x7F));/*tick rate*/ QS_END_NOCRIT_() } QF_CRIT_EXIT_(); return wasArmed; }
//**************************************************************************** /// @description /// This function implements a simple garbage collector for dynamic events. /// Only dynamic events are candidates for recycling. (A dynamic event is one /// that is allocated from an event pool, which is determined as non-zero /// e->poolId_ attribute.) Next, the function decrements the reference counter /// of the event (e->refCtr_), and recycles the event only if the counter /// drops to zero (meaning that no more references are outstanding for this /// event). The dynamic event is recycled by returning it to the pool from /// which it was originally allocated. /// /// @param[in] e pointer to the event to recycle /// /// @note /// QF invokes the garbage collector at all appropriate contexts, when /// an event can become garbage (automatic garbage collection), so the /// application code should have no need to call QP::QF::gc() directly. /// The QP::QF::gc() function is exposed only for special cases when your /// application sends dynamic events to the "raw" thread-safe queues /// (see QP::QEQueue). Such queues are processed outside of QF and the /// automatic garbage collection is **NOT** performed for these events. /// In this case you need to call QP::QF::gc() explicitly. /// void QF::gc(QEvt const * const e) { // is it a dynamic event? if (QF_EVT_POOL_ID_(e) != static_cast<uint8_t>(0)) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); // isn't this the last reference? if (e->refCtr_ > static_cast<uint8_t>(1)) { QF_EVT_REF_CTR_DEC_(e); // decrement the ref counter QS_BEGIN_NOCRIT_(QS_QF_GC_ATTEMPT, static_cast<void *>(0), static_cast<void *>(0)) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of the event QS_2U8_(e->poolId_, e->refCtr_);// pool Id & refCtr of the evt QS_END_NOCRIT_() QF_CRIT_EXIT_(); } // this is the last reference to this event, recycle it else { uint_fast8_t idx = static_cast<uint_fast8_t>(e->poolId_) - static_cast<uint_fast8_t>(1); QS_BEGIN_NOCRIT_(QS_QF_GC, static_cast<void *>(0), static_cast<void *>(0)) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of the event QS_2U8_(e->poolId_, e->refCtr_);// pool Id & refCtr of the evt QS_END_NOCRIT_() QF_CRIT_EXIT_(); // pool ID must be in range Q_ASSERT_ID(410, idx < QF_maxPool_); #ifdef Q_EVT_VIRTUAL // explicitly exectute the destructor' // NOTE: casting 'const' away is legitimate, // because it's a pool event QF_EVT_CONST_CAST_(e)->~QEvt(); // xtor, #endif // cast 'const' away, which is OK, because it's a pool event QF_EPOOL_PUT_(QF_pool_[idx], QF_EVT_CONST_CAST_(e)); } } }
void QF_publish_(QEvt const * const e, void const * const sender) #endif { QF_CRIT_STAT_ /* make sure that the published signal is within the configured range */ Q_REQUIRE(e->sig < (QSignal)QF_maxSignal_); QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_PUBLISH, (void *)0, (void *)0) QS_TIME_(); /* the timestamp */ QS_OBJ_(sender); /* the sender object */ QS_SIG_(e->sig); /* the signal of the event */ QS_2U8_(e->poolId_, e->refCtr_);/* pool Id & ref Count of the event */ QS_END_NOCRIT_() if (e->poolId_ != (uint8_t)0) { /* is it a dynamic event? */ QF_EVT_REF_CTR_INC_(e); /* increment reference counter, NOTE01 */ } QF_CRIT_EXIT_(); #if (QF_MAX_ACTIVE <= 8) { uint8_t tmp = QF_subscrList_[e->sig].bits[0]; while (tmp != (uint8_t)0) { uint8_t p = QF_LOG2(tmp); tmp &= Q_ROM_BYTE(QF_invPwr2Lkup[p]); /* clear subscriber bit */ Q_ASSERT(QF_active_[p] != (QActive *)0); /* must be registered */ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(QF_active_[p], e, sender); } } #else { uint_t i = (uint_t)Q_DIM(QF_subscrList_[0].bits); do { /* go through all bytes in the subscription list */ uint8_t tmp; --i; tmp = QF_PTR_AT_(QF_subscrList_, e->sig).bits[i]; while (tmp != (uint8_t)0) { uint8_t p = QF_LOG2(tmp); tmp &= Q_ROM_BYTE(QF_invPwr2Lkup[p]);/*clear subscriber bit */ p = (uint8_t)(p + (uint8_t)(i << 3));/* adjust the priority */ Q_ASSERT(QF_active_[p] != (QActive *)0);/*must be registered*/ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(QF_active_[p], e, sender); } } while (i != (uint_t)0); } #endif QF_gc(e); /* run the garbage collector, see NOTE01 */ }
//**************************************************************************** //! cancel the delay bool QXThread::delayCancel(void) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); bool wasArmed = teDisarm_(); unblock_(); QF_CRIT_EXIT_(); return wasArmed; }
//**************************************************************************** //! unblock (resume) a given "naked" thread void QXThread::unblock(void) const { QF_CRIT_STAT_ // the unblocked thread must be a "naked" thread (no state) Q_REQUIRE_ID(800, m_state.act == (QActionHandler)0); QF_CRIT_ENTRY_(); unblock_(); QF_CRIT_EXIT_(); }
//**************************************************************************** //! block (suspend) the current "naked" thread void QXThread::block(void) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QXThread *thr = static_cast<QXThread *>(QXK_attr_.curr); Q_REQUIRE_ID(700, (!QXK_ISR_CONTEXT_()) /* can't block inside an ISR */ /* this must be a "naked" thread (no state) */ && (thr->m_state.act == static_cast<QActionHandler>(0))); thr->block_(); QF_CRIT_EXIT_(); }
//............................................................................ void QF::gc(QEvt const * const e) { if (QF_EVT_POOL_ID_(e) != u8_0) { // is it a dynamic event? QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if (QF_EVT_REF_CTR_(e) > u8_1) { // isn't this the last reference? QF_EVT_REF_CTR_DEC_(e); // decrement the ref counter QS_BEGIN_NOCRIT_(QS_QF_GC_ATTEMPT, null_void, null_void) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of the event QS_U8_(QF_EVT_POOL_ID_(e)); // the pool Id of the event QS_U8_(QF_EVT_REF_CTR_(e)); // the ref count of the event QS_END_NOCRIT_() QF_CRIT_EXIT_(); } else { // this is the last reference to this event, recycle it uint8_t idx = static_cast<uint8_t>(QF_EVT_POOL_ID_(e) - u8_1); QS_BEGIN_NOCRIT_(QS_QF_GC, null_void, null_void) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of the event QS_U8_(QF_EVT_POOL_ID_(e)); // the pool Id of the event QS_U8_(QF_EVT_REF_CTR_(e)); // the ref count of the event QS_END_NOCRIT_() QF_CRIT_EXIT_(); Q_ASSERT(idx < QF_maxPool_); #ifdef Q_EVT_VIRTUAL QF_EVT_CONST_CAST_(e)->~QEvt(); // xtor, cast 'const' away, // which is legitimate, because it's a pool event #endif // cast 'const' away, which is OK, because it's a pool event QF_EPOOL_PUT_(QF_pool_[idx], QF_EVT_CONST_CAST_(e)); } } }
/** * \description * Arms a time event to fire in a specified number of clock ticks and with * a specified interval. If the interval is zero, the time event is armed for * one shot ('one-shot' time event). The time event gets directly posted * (using the FIFO policy) into the event queue of the host active object. * * \arguments * \arg[in,out] \c me pointer (see \ref derivation) * \arg[in] \c nTicks number of clock ticks (at the associated rate) * to rearm the time event with. * \arg[in] \c interval interval (in clock ticks) for periodic time event. * * \note After posting, a one-shot time event gets automatically disarmed * while a periodic time event (interval != 0) is automatically re-armed. * * \note A time event can be disarmed at any time by calling the * QTimeEvt_disarm() function. Also, a time event can be re-armed to fire * in a different number of clock ticks by calling the QTimeEvt_rearm() * function. * * \usage * The following example shows how to arm a one-shot time event from a state * machine of an active object: * \include qf_state.c */ void QTimeEvt_armX(QTimeEvt * const me, QTimeEvtCtr const nTicks, QTimeEvtCtr const interval) { uint_fast8_t tickRate = (uint_fast8_t)me->super.refCtr_ & (uint_fast8_t)0x7F; QTimeEvtCtr ctr = me->ctr; QF_CRIT_STAT_ /** \pre the host AO must be valid, time evnet must be disarmed, * number of clock ticks cannot be zero, and the signal must be valid. */ Q_REQUIRE_ID(100, (me->act != (void *)0) && (ctr == (QTimeEvtCtr)0) && (nTicks != (QTimeEvtCtr)0) && (tickRate < (uint_fast8_t)QF_MAX_TICK_RATE) && (me->super.sig >= (QSignal)Q_USER_SIG)); QF_CRIT_ENTRY_(); me->ctr = nTicks; me->interval = interval; /* is the time event unlinked? * NOTE: For the duration of a single clock tick of the specified tick * rate a time event can be disarmed and yet still linked into the list, * because un-linking is performed exclusively in the QF_tickX() function. */ if ((me->super.refCtr_ & (uint8_t)0x80) == (uint8_t)0) { me->super.refCtr_ |= (uint8_t)0x80; /* mark as linked */ /* The time event is initially inserted into the separate * "freshly armed" link list based on QF_timeEvtHead_[tickRate].act. * Only later, inside the QF_tickX() function, the "freshly armed" * list is appended to the main list of armed time events based on * QF_timeEvtHead_[tickRate].next. Again, this is to keep any * changes to the main list exclusively inside the QF_tickX() * function. */ me->next = (QTimeEvt *)QF_timeEvtHead_[tickRate].act; QF_timeEvtHead_[tickRate].act = me; } QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_ARM, QS_priv_.teObjFilter, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(me->act); /* the active object */ QS_TEC_(nTicks); /* the number of ticks */ QS_TEC_(interval); /* the interval */ QS_U8_((uint8_t)tickRate); /* tick rate */ QS_END_NOCRIT_() QF_CRIT_EXIT_(); }
//**************************************************************************** //! obtain a message from the private message queue (block if no messages) void const *QXThread::queueGet(uint_fast16_t const nTicks, uint_fast8_t const tickRate) { QEQueueCtr nFree; QEvt const *e; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QXThread *thr = static_cast<QXThread *>(QXK_attr_.curr); Q_REQUIRE_ID(900, (!QXK_ISR_CONTEXT_()) /* can't block inside an ISR */ /* this must be a "naked" thread (no state) */ && (thr->m_state.act == (QActionHandler)0)); // is the queue empty? -- block and wait for event(s) if (thr->m_eQueue.m_frontEvt == static_cast<QEvt *>(0)) { thr->m_temp.obj = reinterpret_cast<QMState const *>(&thr->m_eQueue); thr->teArm_(static_cast<enum_t>(QXK_QUEUE_SIG), nTicks, tickRate); QXK_attr_.readySet.remove(thr->m_prio); QXK_sched_(); QF_CRIT_EXIT_(); QF_CRIT_EXIT_NOP(); QF_CRIT_ENTRY_(); } // is the queue not empty? if (thr->m_eQueue.m_frontEvt != static_cast<QEvt *>(0)) { e = thr->m_eQueue.m_frontEvt; // always remove from the front // volatile into tmp nFree= thr->m_eQueue.m_nFree + static_cast<QEQueueCtr>(1); thr->m_eQueue.m_nFree = nFree; // update the number of free // any events in the ring buffer? if (nFree <= thr->m_eQueue.m_end) { // remove event from the tail thr->m_eQueue.m_frontEvt = QF_PTR_AT_(thr->m_eQueue.m_ring, thr->m_eQueue.m_tail); if (thr->m_eQueue.m_tail == static_cast<QEQueueCtr>(0)) { thr->m_eQueue.m_tail = thr->m_eQueue.m_end; // wrap } --thr->m_eQueue.m_tail; QS_BEGIN_NOCRIT_(QP::QS_QF_ACTIVE_GET, QP::QS::priv_.aoObjFilter, thr) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(&thr); // this active object QS_2U8_(e->poolId_, e->refCtr_); // pool Id & ref Count QS_EQC_(nFree); // number of free entries QS_END_NOCRIT_() }
bool QXThread::post_(QEvt const * const e, uint_fast16_t const margin, void const * const sender) #endif { bool stat; QF_CRIT_STAT_ // is it the private time event? if (e == &m_timeEvt) { QF_CRIT_ENTRY_(); stat = true; // the private time event is disarmed and not in any queue, // so it is safe to change its signal. The signal of 0 means // that the time event __has__ expired. m_timeEvt.sig = static_cast<QSignal>(0); unblock(); QF_CRIT_EXIT_(); } // is the event queue provided? else if (m_eQueue.m_end != static_cast<QEQueueCtr>(0)) { QF_CRIT_ENTRY_(); (void)teDisarm_(); QF_CRIT_EXIT_(); #ifndef Q_SPY stat = QMActive::post_(e, margin); #else stat = QMActive::post_(e, margin, sender); #endif } else { // the queue is not available QF::gc(e); // make sure the event is not leaked stat = false; Q_ERROR_ID(410); } return stat; }
//**************************************************************************** // @description // The preferred way of calling this function is from within the active // object that needs to stop. In other words, an active object should stop // itself rather than being stopped by someone else. This policy works // best, because only the active object itself "knows" when it has reached // the appropriate state for the shutdown. // // @note // By the time the AO calls QP::QActive::stop(), it should have unsubscribed // from all events and no more events should be directly-posted to it. // void QMActive::stop(void) { QF_CRIT_STAT_ /// @pre QActive_stop() must be called from the AO that wants to stop. Q_REQUIRE_ID(300, (!QXK_ISR_CONTEXT_()) /* don't stop AO's from an ISR! */ && (this == QXK_attr_.curr)); QF::remove_(this); // remove this active object from the QF QF_CRIT_ENTRY_(); QXK_attr_.readySet.remove(m_prio); QXK_sched_(); QF_CRIT_EXIT_(); }
//**************************************************************************** //! delay (timed block) the current "naked" thread bool QXThread::delay(uint_fast16_t const nTicks, uint_fast8_t const tickRate) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QXThread *thr = static_cast<QXThread *>(QXK_attr_.curr); // remember the blocking object thr->m_temp.obj = reinterpret_cast<QMState const *>(&thr->m_timeEvt); thr->teArm_(static_cast<enum_t>(QXK_DELAY_SIG), nTicks, tickRate); thr->block_(); QF_CRIT_EXIT_(); // signal of zero means that the time event was posted without // being canceled. return (thr->m_timeEvt.sig == static_cast<QSignal>(0)); }
/*..........................................................................*/ QMutex QK_mutexLock(uint8_t prioCeiling) { uint8_t mutex; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); mutex = QK_ceilingPrio_; /* the original QK priority ceiling to return */ if (QK_ceilingPrio_ < prioCeiling) { QK_ceilingPrio_ = prioCeiling; /* raise the QK priority ceiling */ } QS_BEGIN_NOCRIT_(QS_QK_MUTEX_LOCK, (void *)0, (void *)0) QS_TIME_(); /* timestamp */ QS_U8_(mutex); /* the original priority */ QS_U8_(QK_ceilingPrio_); /* the current priority ceiling */ QS_END_NOCRIT_() QF_CRIT_EXIT_(); return mutex; }
//............................................................................ void QActive::postLIFO(QEvt const * const e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QEQueueCtr nFree = m_eQueue.m_nFree;// tmp to avoid UB for volatile access // the queue must be able to accept the event (cannot overflow) Q_ASSERT(nFree != static_cast<QEQueueCtr>(0)); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_LIFO, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this active object QS_2U8_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt QS_EQC_(nFree); // number of free entries QS_EQC_(m_eQueue.m_nMin); // min number of free entries QS_END_NOCRIT_() if (e->poolId_ != u8_0) { // is it a dynamic event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } --nFree; // one free entry just used up m_eQueue.m_nFree = nFree; // update the volatile if (m_eQueue.m_nMin > nFree) { m_eQueue.m_nMin = nFree; // update minimum so far } QEvt const *frontEvt = m_eQueue.m_frontEvt;// read volatile into temporary m_eQueue.m_frontEvt = e; // deliver the event directly to the front if (frontEvt == null_evt) { // is the queue empty? QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { // queue is not empty, leave event in the ring-buffer ++m_eQueue.m_tail; if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail? m_eQueue.m_tail = static_cast<QEQueueCtr>(0); // wrap around } QF_PTR_AT_(m_eQueue.m_ring, m_eQueue.m_tail) = frontEvt; } QF_CRIT_EXIT_(); }
/*..........................................................................*/ QTimeEvtCtr QTimeEvt_ctr(QTimeEvt const * const me) { QTimeEvtCtr ret; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); ret = me->ctr; QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_CTR, QS_priv_.teObjFilter, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(me->act); /* the target AO */ QS_TEC_(ret); /* the current counter */ QS_TEC_(me->interval); /* the interval */ QS_U8_((uint8_t)(me->super.refCtr_ & (uint8_t)0x7F)); /* tick rate */ QS_END_NOCRIT_() QF_CRIT_EXIT_(); return ret; }
/*..........................................................................*/ void QK_mutexUnlock(QMutex mutex) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QK_MUTEX_UNLOCK, (void *)0, (void *)0) QS_TIME_(); /* timestamp */ QS_U8_(mutex); /* the original priority */ QS_U8_(QK_ceilingPrio_); /* the current priority ceiling */ QS_END_NOCRIT_() if (QK_ceilingPrio_ > mutex) { QK_ceilingPrio_ = mutex; /* restore the saved priority ceiling */ mutex = QK_schedPrio_(); /* reuse 'mutex' to hold priority */ if (mutex != (uint8_t)0) { QK_sched_(mutex); } } QF_CRIT_EXIT_(); }
/*..........................................................................*/ void QMPool_put(QMPool *me, void *b) { QF_CRIT_STAT_ Q_REQUIRE(me->nFree <= me->nTot); /* # free blocks must be < total */ Q_REQUIRE(QF_PTR_RANGE_(b, me->start, me->end)); /* b must be in range */ QF_CRIT_ENTRY_(); ((QFreeBlock *)b)->next = (QFreeBlock *)me->free_head;/* link into list */ me->free_head = b; /* set as new head of the free list */ ++me->nFree; /* one more free block in this pool */ QS_BEGIN_NOCRIT_(QS_QF_MPOOL_PUT, QS_mpObj_, me->start) QS_TIME_(); /* timestamp */ QS_OBJ_(me->start); /* the memory managed by this pool */ QS_MPC_(me->nFree); /* the number of free blocks in the pool */ QS_END_NOCRIT_() QF_CRIT_EXIT_(); }
//**************************************************************************** /// @description /// This function removes a given active object from the active objects /// managed by the QF framework. It should not be called by the application /// directly, only through the function QP::QMActive::stop(). /// /// @param[in] a pointer to the active object to remove from the framework. /// /// @note The active object that is removed from the framework can no longer /// participate in the publish-subscribe event exchange. /// /// @sa QP::QF::add_() /// void QF::remove_(QMActive const * const a) { uint_fast8_t p = a->m_prio; Q_REQUIRE_ID(200, (static_cast<uint_fast8_t>(0) < p) && (p <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) && (active_[p] == a)); QF_CRIT_STAT_ QF_CRIT_ENTRY_(); active_[p] = static_cast<QMActive *>(0); // free-up the priority level QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_REMOVE, QS::priv_.aoObjFilter, a) QS_TIME_(); // timestamp QS_OBJ_(a); // the active object QS_U8_(p); // the priority of the active object QS_END_NOCRIT_() QF_CRIT_EXIT_(); }
//............................................................................ void QActive::postLIFO(QEvt const * const e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_LIFO, QS::aoObj_, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this active object QS_U8_(QF_EVT_POOL_ID_(e)); // the pool Id of the event QS_U8_(QF_EVT_REF_CTR_(e)); // the ref count of the event QS_EQC_(m_eQueue.m_nFree); // number of free entries QS_EQC_(m_eQueue.m_nMin); // min number of free entries QS_END_NOCRIT_() if (QF_EVT_POOL_ID_(e) != u8_0) { // is it a dynamic event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } if (m_eQueue.m_frontEvt == null_evt) { // is the queue empty? m_eQueue.m_frontEvt = e; // deliver event directly QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { // queue is not empty, leave event in the ring-buffer // queue must accept all posted events Q_ASSERT(m_eQueue.m_nFree != static_cast<QEQueueCtr>(0)); ++m_eQueue.m_tail; if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail? m_eQueue.m_tail = static_cast<QEQueueCtr>(0); // wrap around } QF_PTR_AT_(m_eQueue.m_ring, m_eQueue.m_tail) = m_eQueue.m_frontEvt; m_eQueue.m_frontEvt = e; // put event to front --m_eQueue.m_nFree; // update number of free events if (m_eQueue.m_nMin > m_eQueue.m_nFree) { m_eQueue.m_nMin = m_eQueue.m_nFree; // update minimum so far } } QF_CRIT_EXIT_(); }
/*..........................................................................*/ void QActive_subscribe(QActive const * const me, enum_t const sig) { uint8_t p = me->prio; uint8_t i = Q_ROM_BYTE(QF_div8Lkup[p]); QF_CRIT_STAT_ Q_REQUIRE(((enum_t)Q_USER_SIG <= sig) && (sig < QF_maxSignal_) && ((uint8_t)0 < p) && (p <= (uint8_t)QF_MAX_ACTIVE) && (QF_active_[p] == me)); QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_SUBSCRIBE, QS_aoObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_((QSignal)sig); /* the signal of this event */ QS_OBJ_(me); /* this active object */ QS_END_NOCRIT_() /* set the priority bit */ QF_PTR_AT_(QF_subscrList_, sig).bits[i] |= Q_ROM_BYTE(QF_pwr2Lkup[p]); QF_CRIT_EXIT_(); }
/*..........................................................................*/ void QEQueue_postLIFO(QEQueue *me, QEvent const *e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_POST_LIFO, QS_eqObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this queue object */ QS_U8_(QF_EVT_POOL_ID_(e)); /* the pool Id of the event */ QS_U8_(QF_EVT_REF_CTR_(e)); /* the ref count of the event */ QS_EQC_(me->nFree); /* number of free entries */ QS_EQC_(me->nMin); /* min number of free entries */ QS_END_NOCRIT_() if (QF_EVT_POOL_ID_(e) != (uint8_t)0) { /* is it a pool event? */ QF_EVT_REF_CTR_INC_(e); /* increment the reference counter */ } if (me->frontEvt != (QEvent *)0) { /* is the queue not empty? */ /* the queue must be able to accept the event (cannot overflow) */ Q_ASSERT(me->nFree != (QEQueueCtr)0); ++me->tail; if (me->tail == me->end) { /* need to wrap the tail? */ me->tail = (QEQueueCtr)0; /* wrap around */ } QF_PTR_AT_(me->ring, me->tail) = me->frontEvt;/* save old front evt */ --me->nFree; /* update number of free events */ if (me->nMin > me->nFree) { me->nMin = me->nFree; /* update minimum so far */ } } me->frontEvt = e; /* stick the new event to the front */ QF_CRIT_EXIT_(); }