//**************************************************************************** /// @description /// This function is part of the Publish-Subscribe event delivery mechanism /// available in QF. Un-subscribing from all events means that the framework /// will stop posting any published events to the event queue of the active /// object. /// /// @note Due to the latency of event queues, an active object should NOT /// assume that no events will ever be dispatched to the state machine of /// the active object after un-subscribing from all events. /// The events might be already in the queue, or just about to be posted /// and the un-subscribe operation will not flush such events. Also, the /// alternative event-delivery mechanisms, such as direct event posting or /// time events, can be still delivered to the event queue of the active /// object. /// /// @sa QP::QF::publish_(), QP::QMActive::subscribe(), and /// QP::QMActive::unsubscribe() /// void QMActive::unsubscribeAll(void) const { uint_fast8_t const p = m_prio; Q_REQUIRE_ID(500, (static_cast<uint_fast8_t>(0) < p) && (p <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) && (QF::active_[p] == this)); uint_fast8_t const i = static_cast<uint_fast8_t>(QF_div8Lkup[p]); enum_t sig; for (sig = Q_USER_SIG; sig < QF_maxSignal_; ++sig) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if ((QF_PTR_AT_(QF_subscrList_, sig).m_bits[i] & QF_pwr2Lkup[p]) != static_cast<uint8_t>(0)) { QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_UNSUBSCRIBE, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_SIG_(sig); // the signal of this event QS_OBJ_(this); // this active object QS_END_NOCRIT_() // clear the priority bit QF_PTR_AT_(QF_subscrList_, sig).m_bits[i] &= QF_invPwr2Lkup[p]; } QF_CRIT_EXIT_(); } }
/*..........................................................................*/ void QActive_unsubscribeAll(QActive const *me) { uint8_t p = me->prio; uint8_t i; QSignal sig; Q_REQUIRE(((uint8_t)0 < p) && (p <= (uint8_t)QF_MAX_ACTIVE) && (QF_active_[p] == me)); i = QF_div8Lkup[p]; for (sig = (QSignal)Q_USER_SIG; sig < QF_maxSignal_; ++sig) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if ((QF_PTR_AT_(QF_subscrList_, sig).bits[i] & Q_ROM_BYTE(QF_pwr2Lkup[p])) != (uint8_t)0) { QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_UNSUBSCRIBE, QS_aoObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_(sig); /* the signal of this event */ QS_OBJ_(me); /* this active object */ QS_END_NOCRIT_() /* clear the priority bit */ QF_PTR_AT_(QF_subscrList_, sig).bits[i] &= Q_ROM_BYTE(QF_invPwr2Lkup[p]); } QF_CRIT_EXIT_(); } }
QP_BEGIN_ //Q_DEFINE_THIS_MODULE("qeq_get") //............................................................................ QEvt const *QEQueue::get(void) { QEvt const *e; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if (m_frontEvt == null_evt) { // is the queue empty? e = null_evt; // no event available at this time } else { e = m_frontEvt; if (m_nFree != m_end) { // any events in the the ring buffer? m_frontEvt = QF_PTR_AT_(m_ring, m_tail); // remove from the tail if (m_tail == static_cast<QEQueueCtr>(0)) { // need to wrap? m_tail = m_end; // wrap around } --m_tail; ++m_nFree; // one more free event in the ring buffer QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_GET, QS::eqObj_, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this queue object QS_U8_(QF_EVT_POOL_ID_(e)); // the pool Id of the event QS_U8_(QF_EVT_REF_CTR_(e)); // the ref count of the event QS_EQC_(m_nFree); // number of free entries QS_END_NOCRIT_() } else {
/*..........................................................................*/ QEvt const *QActive_get_(QActive * const me) { QEQueueCtr nFree; QEvt const *e; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QACTIVE_EQUEUE_WAIT_(me); /* wait for event to arrive directly */ e = me->eQueue.frontEvt; /* always remove event from the front location */ nFree= me->eQueue.nFree + (QEQueueCtr)1; /* get volatile into tmp */ me->eQueue.nFree = nFree; /* upate the number of free */ if (nFree <= me->eQueue.end) { /* any events in the ring buffer? */ /* remove event from the tail */ me->eQueue.frontEvt = QF_PTR_AT_(me->eQueue.ring, me->eQueue.tail); if (me->eQueue.tail == (QEQueueCtr)0) { /* need to wrap the tail? */ me->eQueue.tail = me->eQueue.end; /* wrap around */ } --me->eQueue.tail; QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_GET, QS_priv_.aoObjFilter, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this active object */ QS_2U8_(e->poolId_, e->refCtr_); /* pool Id & ref Count */ QS_EQC_(nFree); /* number of free entries */ QS_END_NOCRIT_() }
/*..........................................................................*/ QEvent const *QEQueue_get(QEQueue *me) { QEvent const *e; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); if (me->frontEvt == (QEvent *)0) { /* is the queue empty? */ e = (QEvent *)0; /* no event available at this time */ } else { /* the queue is not empty */ e = me->frontEvt; if (me->nFree != me->end) { /* any events in the ring buffer? */ me->frontEvt = QF_PTR_AT_(me->ring, me->tail); /* get from tail */ if (me->tail == (QEQueueCtr)0) { /* need to wrap the tail? */ me->tail = me->end; /* wrap around */ } --me->tail; ++me->nFree; /* one more free event in the ring buffer */ QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_GET, QS_eqObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this queue object */ QS_U8_(QF_EVT_POOL_ID_(e)); /* the pool Id of the event */ QS_U8_(QF_EVT_REF_CTR_(e)); /* the ref count of the event */ QS_EQC_(me->nFree); /* number of free entries */ QS_END_NOCRIT_() } else {
void QF_publish_(QEvt const * const e, void const * const sender) #endif { QF_CRIT_STAT_ /* make sure that the published signal is within the configured range */ Q_REQUIRE(e->sig < (QSignal)QF_maxSignal_); QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_PUBLISH, (void *)0, (void *)0) QS_TIME_(); /* the timestamp */ QS_OBJ_(sender); /* the sender object */ QS_SIG_(e->sig); /* the signal of the event */ QS_2U8_(e->poolId_, e->refCtr_);/* pool Id & ref Count of the event */ QS_END_NOCRIT_() if (e->poolId_ != (uint8_t)0) { /* is it a dynamic event? */ QF_EVT_REF_CTR_INC_(e); /* increment reference counter, NOTE01 */ } QF_CRIT_EXIT_(); #if (QF_MAX_ACTIVE <= 8) { uint8_t tmp = QF_subscrList_[e->sig].bits[0]; while (tmp != (uint8_t)0) { uint8_t p = QF_LOG2(tmp); tmp &= Q_ROM_BYTE(QF_invPwr2Lkup[p]); /* clear subscriber bit */ Q_ASSERT(QF_active_[p] != (QActive *)0); /* must be registered */ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(QF_active_[p], e, sender); } } #else { uint_t i = (uint_t)Q_DIM(QF_subscrList_[0].bits); do { /* go through all bytes in the subscription list */ uint8_t tmp; --i; tmp = QF_PTR_AT_(QF_subscrList_, e->sig).bits[i]; while (tmp != (uint8_t)0) { uint8_t p = QF_LOG2(tmp); tmp &= Q_ROM_BYTE(QF_invPwr2Lkup[p]);/*clear subscriber bit */ p = (uint8_t)(p + (uint8_t)(i << 3));/* adjust the priority */ Q_ASSERT(QF_active_[p] != (QActive *)0);/*must be registered*/ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(QF_active_[p], e, sender); } } while (i != (uint_t)0); } #endif QF_gc(e); /* run the garbage collector, see NOTE01 */ }
/*..........................................................................*/ void QMPool_init(QMPool * const me, void * const poolSto, uint32_t poolSize, QMPoolSize blockSize) { QFreeBlock *fb; uint32_t nblocks; QS_CRIT_STAT_ /* The memory block must be valid * and the poolSize must fit at least one free block * and the blockSize must not be too close to the top of the dynamic range */ Q_REQUIRE((poolSto != (void *)0) && (poolSize >= (uint32_t)sizeof(QFreeBlock)) && ((QMPoolSize)(blockSize + (QMPoolSize)sizeof(QFreeBlock)) > blockSize)); me->free_head = poolSto; /* round up the blockSize to fit an integer # free blocks, no division */ me->blockSize = (QMPoolSize)sizeof(QFreeBlock); /* start with just one */ nblocks = (uint32_t)1; /* # free blocks that fit in one memory block */ while (me->blockSize < blockSize) { me->blockSize += (QMPoolSize)sizeof(QFreeBlock); ++nblocks; } blockSize = me->blockSize; /* use the rounded-up value from now on */ /* the pool buffer must fit at least one rounded-up block */ Q_ASSERT(poolSize >= (uint32_t)blockSize); /* chain all blocks together in a free-list... */ poolSize -= (uint32_t)blockSize; /* don't count the last block */ me->nTot = (QMPoolCtr)1; /* the last block already in the pool */ fb = (QFreeBlock *)me->free_head; /* start at the head of the free list */ while (poolSize >= (uint32_t)blockSize) { fb->next = &QF_PTR_AT_(fb, nblocks);/*point next link to next block */ fb = fb->next; /* advance to the next block */ poolSize -= (uint32_t)blockSize; /* reduce the available pool size */ ++me->nTot; /* increment the number of blocks so far */ } fb->next = (QFreeBlock *)0; /* the last link points to NULL */ me->nFree = me->nTot; /* all blocks are free */ me->nMin = me->nTot; /* the minimum number of free blocks */ me->start = poolSto; /* the original start this pool buffer */ me->end = fb; /* the last block in this pool */ QS_BEGIN_(QS_QF_MPOOL_INIT, QS_priv_.mpObjFilter, me->start) QS_OBJ_(me->start); /* the memory managed by this pool */ QS_MPC_(me->nTot); /* the total number of blocks */ QS_END_() }
//**************************************************************************** //! obtain a message from the private message queue (block if no messages) void const *QXThread::queueGet(uint_fast16_t const nTicks, uint_fast8_t const tickRate) { QEQueueCtr nFree; QEvt const *e; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QXThread *thr = static_cast<QXThread *>(QXK_attr_.curr); Q_REQUIRE_ID(900, (!QXK_ISR_CONTEXT_()) /* can't block inside an ISR */ /* this must be a "naked" thread (no state) */ && (thr->m_state.act == (QActionHandler)0)); // is the queue empty? -- block and wait for event(s) if (thr->m_eQueue.m_frontEvt == static_cast<QEvt *>(0)) { thr->m_temp.obj = reinterpret_cast<QMState const *>(&thr->m_eQueue); thr->teArm_(static_cast<enum_t>(QXK_QUEUE_SIG), nTicks, tickRate); QXK_attr_.readySet.remove(thr->m_prio); QXK_sched_(); QF_CRIT_EXIT_(); QF_CRIT_EXIT_NOP(); QF_CRIT_ENTRY_(); } // is the queue not empty? if (thr->m_eQueue.m_frontEvt != static_cast<QEvt *>(0)) { e = thr->m_eQueue.m_frontEvt; // always remove from the front // volatile into tmp nFree= thr->m_eQueue.m_nFree + static_cast<QEQueueCtr>(1); thr->m_eQueue.m_nFree = nFree; // update the number of free // any events in the ring buffer? if (nFree <= thr->m_eQueue.m_end) { // remove event from the tail thr->m_eQueue.m_frontEvt = QF_PTR_AT_(thr->m_eQueue.m_ring, thr->m_eQueue.m_tail); if (thr->m_eQueue.m_tail == static_cast<QEQueueCtr>(0)) { thr->m_eQueue.m_tail = thr->m_eQueue.m_end; // wrap } --thr->m_eQueue.m_tail; QS_BEGIN_NOCRIT_(QP::QS_QF_ACTIVE_GET, QP::QS::priv_.aoObjFilter, thr) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(&thr); // this active object QS_2U8_(e->poolId_, e->refCtr_); // pool Id & ref Count QS_EQC_(nFree); // number of free entries QS_END_NOCRIT_() }
/*..........................................................................*/ uint8_t QEQueue_post(QEQueue * const me, QEvt const * const e, uint16_t const margin) { QEQueueCtr nFree; /* temporary to avoid UB for volatile access */ uint8_t status; QF_CRIT_STAT_ Q_REQUIRE(e != (QEvt const *)0); /* event must be valid */ QF_CRIT_ENTRY_(); nFree = me->nFree; /* get volatile into the temporary */ if (nFree > (QEQueueCtr)margin) { /* required margin available? */ QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_POST_FIFO, QS_priv_.eqObjFilter, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this queue object */ QS_2U8_(e->poolId_, e->refCtr_); /* pool Id & ref Count */ QS_EQC_(nFree); /* number of free entries */ QS_EQC_(me->nMin); /* min number of free entries */ QS_END_NOCRIT_() if (e->poolId_ != (uint8_t)0) { /* is it a pool event? */ QF_EVT_REF_CTR_INC_(e); /* increment the reference counter */ } --nFree; /* one free entry just used up */ me->nFree = nFree; /* update the volatile */ if (me->nMin > nFree) { me->nMin = nFree; /* update minimum so far */ } if (me->frontEvt == (QEvt const *)0) { /* was the queue empty? */ me->frontEvt = e; /* deliver event directly */ } else { /* queue was not empty, insert event into the ring-buffer */ /* insert event into the ring buffer (FIFO) */ QF_PTR_AT_(me->ring, me->head) = e; /* insert e into buffer */ if (me->head == (QEQueueCtr)0) { /* need to wrap the head? */ me->head = me->end; /* wrap around */ } --me->head; } status = (uint8_t)1; /* event posted successfully */ }
//............................................................................ void QActive::postLIFO(QEvt const * const e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QEQueueCtr nFree = m_eQueue.m_nFree;// tmp to avoid UB for volatile access // the queue must be able to accept the event (cannot overflow) Q_ASSERT(nFree != static_cast<QEQueueCtr>(0)); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_LIFO, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this active object QS_2U8_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt QS_EQC_(nFree); // number of free entries QS_EQC_(m_eQueue.m_nMin); // min number of free entries QS_END_NOCRIT_() if (e->poolId_ != u8_0) { // is it a dynamic event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } --nFree; // one free entry just used up m_eQueue.m_nFree = nFree; // update the volatile if (m_eQueue.m_nMin > nFree) { m_eQueue.m_nMin = nFree; // update minimum so far } QEvt const *frontEvt = m_eQueue.m_frontEvt;// read volatile into temporary m_eQueue.m_frontEvt = e; // deliver the event directly to the front if (frontEvt == null_evt) { // is the queue empty? QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { // queue is not empty, leave event in the ring-buffer ++m_eQueue.m_tail; if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail? m_eQueue.m_tail = static_cast<QEQueueCtr>(0); // wrap around } QF_PTR_AT_(m_eQueue.m_ring, m_eQueue.m_tail) = frontEvt; } QF_CRIT_EXIT_(); }
//............................................................................ void QActive::postLIFO(QEvt const * const e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_LIFO, QS::aoObj_, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this active object QS_U8_(QF_EVT_POOL_ID_(e)); // the pool Id of the event QS_U8_(QF_EVT_REF_CTR_(e)); // the ref count of the event QS_EQC_(m_eQueue.m_nFree); // number of free entries QS_EQC_(m_eQueue.m_nMin); // min number of free entries QS_END_NOCRIT_() if (QF_EVT_POOL_ID_(e) != u8_0) { // is it a dynamic event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } if (m_eQueue.m_frontEvt == null_evt) { // is the queue empty? m_eQueue.m_frontEvt = e; // deliver event directly QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { // queue is not empty, leave event in the ring-buffer // queue must accept all posted events Q_ASSERT(m_eQueue.m_nFree != static_cast<QEQueueCtr>(0)); ++m_eQueue.m_tail; if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail? m_eQueue.m_tail = static_cast<QEQueueCtr>(0); // wrap around } QF_PTR_AT_(m_eQueue.m_ring, m_eQueue.m_tail) = m_eQueue.m_frontEvt; m_eQueue.m_frontEvt = e; // put event to front --m_eQueue.m_nFree; // update number of free events if (m_eQueue.m_nMin > m_eQueue.m_nFree) { m_eQueue.m_nMin = m_eQueue.m_nFree; // update minimum so far } } QF_CRIT_EXIT_(); }
/*..........................................................................*/ void QActive_subscribe(QActive const * const me, enum_t const sig) { uint8_t p = me->prio; uint8_t i = Q_ROM_BYTE(QF_div8Lkup[p]); QF_CRIT_STAT_ Q_REQUIRE(((enum_t)Q_USER_SIG <= sig) && (sig < QF_maxSignal_) && ((uint8_t)0 < p) && (p <= (uint8_t)QF_MAX_ACTIVE) && (QF_active_[p] == me)); QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_SUBSCRIBE, QS_aoObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_((QSignal)sig); /* the signal of this event */ QS_OBJ_(me); /* this active object */ QS_END_NOCRIT_() /* set the priority bit */ QF_PTR_AT_(QF_subscrList_, sig).bits[i] |= Q_ROM_BYTE(QF_pwr2Lkup[p]); QF_CRIT_EXIT_(); }
/*..........................................................................*/ void QEQueue_postLIFO(QEQueue *me, QEvent const *e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_POST_LIFO, QS_eqObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this queue object */ QS_U8_(QF_EVT_POOL_ID_(e)); /* the pool Id of the event */ QS_U8_(QF_EVT_REF_CTR_(e)); /* the ref count of the event */ QS_EQC_(me->nFree); /* number of free entries */ QS_EQC_(me->nMin); /* min number of free entries */ QS_END_NOCRIT_() if (QF_EVT_POOL_ID_(e) != (uint8_t)0) { /* is it a pool event? */ QF_EVT_REF_CTR_INC_(e); /* increment the reference counter */ } if (me->frontEvt != (QEvent *)0) { /* is the queue not empty? */ /* the queue must be able to accept the event (cannot overflow) */ Q_ASSERT(me->nFree != (QEQueueCtr)0); ++me->tail; if (me->tail == me->end) { /* need to wrap the tail? */ me->tail = (QEQueueCtr)0; /* wrap around */ } QF_PTR_AT_(me->ring, me->tail) = me->frontEvt;/* save old front evt */ --me->nFree; /* update number of free events */ if (me->nMin > me->nFree) { me->nMin = me->nFree; /* update minimum so far */ } } me->frontEvt = e; /* stick the new event to the front */ QF_CRIT_EXIT_(); }
//****************************************************************************/ /// @description /// This function is part of the Publish-Subscribe event delivery mechanism /// available in QF. Subscribing to an event means that the framework will /// start posting all published events with a given signal @p sig to the /// event queue of the active object. /// /// @param[in] sig event signal to subscribe /// /// The following example shows how the Table active object subscribes /// to three signals in the initial transition: /// @include qf_subscribe.c /// /// @sa QP::QF::publish_(), QP::QMActive::unsubscribe(), and /// QP::QMActive::unsubscribeAll() /// void QMActive::subscribe(enum_t const sig) const { uint_fast8_t p = m_prio; Q_REQUIRE_ID(300, (Q_USER_SIG <= sig) && (sig < QF_maxSignal_) && (static_cast<uint_fast8_t>(0) < p) && (p <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) && (QF::active_[p] == this)); uint_fast8_t const i = static_cast<uint_fast8_t>(QF_div8Lkup[p]); QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_SUBSCRIBE, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_SIG_(sig); // the signal of this event QS_OBJ_(this); // this active object QS_END_NOCRIT_() // set the priority bit QF_PTR_AT_(QF_subscrList_, sig).m_bits[i] |= QF_pwr2Lkup[p]; QF_CRIT_EXIT_(); }
void QF::publish_(QEvt const * const e) { #else void QF::publish_(QEvt const * const e, void const * const sender) { #endif /// @pre the published signal must be within the configured range Q_REQUIRE_ID(100, static_cast<enum_t>(e->sig) < QF_maxSignal_); QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_PUBLISH, static_cast<void *>(0), static_cast<void *>(0)) QS_TIME_(); // the timestamp QS_OBJ_(sender); // the sender object QS_SIG_(e->sig); // the signal of the event QS_2U8_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt QS_END_NOCRIT_() // is it a dynamic event? if (e->poolId_ != static_cast<uint8_t>(0)) { QF_EVT_REF_CTR_INC_(e); // increment the reference counter, NOTE01 } QF_CRIT_EXIT_(); QF_SCHED_STAT_TYPE_ lockStat; lockStat.m_lockPrio = static_cast<uint_fast8_t>(0xFF); // uninitialized #if (QF_MAX_ACTIVE <= 8) uint_fast8_t tmp = static_cast<uint_fast8_t>( QF_PTR_AT_(QF_subscrList_, e->sig).m_bits[0]); while (tmp != static_cast<uint8_t>(0)) { uint_fast8_t p = static_cast<uint_fast8_t>(QF_LOG2(tmp)); // clear the subscriber bit tmp &= static_cast<uint_fast8_t>(QF_invPwr2Lkup[p]); // has the scheduler been locked yet? if (lockStat.m_lockPrio == static_cast<uint_fast8_t>(0xFF)) { QF_SCHED_LOCK_(&lockStat, p); } // the priority of the AO must be registered with the framework Q_ASSERT_ID(110, active_[p] != static_cast<QMActive *>(0)); // POST() asserts internally if the queue overflows (void)active_[p]->POST(e, sender); } #else uint_fast8_t i = static_cast<uint_fast8_t>(QF_SUBSCR_LIST_SIZE); // go through all bytes in the subscription list do { --i; uint_fast8_t tmp = static_cast<uint_fast8_t>( QF_PTR_AT_(QF_subscrList_, e->sig).m_bits[i]); while (tmp != static_cast<uint_fast8_t>(0)) { uint_fast8_t p = static_cast<uint_fast8_t>(QF_LOG2(tmp)); // clear the subscriber bit tmp &= static_cast<uint_fast8_t>(QF_invPwr2Lkup[p]); // adjust the priority p += static_cast<uint_fast8_t>(i << 3); // has the scheduler been locked yet? if (lockStat.m_lockPrio == static_cast<uint_fast8_t>(0xFF)) { QF_SCHED_LOCK_(&lockStat, p); } // the priority level be registered with the framework Q_ASSERT(active_[p] != static_cast<QMActive *>(0)); // POST() asserts internally if the queue overflows (void)active_[p]->POST(e, sender); } } while (i != static_cast<uint_fast8_t>(0)); #endif // was the scheduler locked? if (lockStat.m_lockPrio <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) { QF_SCHED_UNLOCK_(&lockStat); // unlock the scheduler } // run the garbage collector gc(e); // NOTE: QP::QF::publish_() increments the reference counter to prevent // premature recycling of the event while the multicasting is still // in progress. At the end of the function, the garbage collector step // decrements the reference counter and recycles the event if the // counter drops to zero. This covers the case when the event was // published without any subscribers. }