bool QMActive::post_(QEvt const * const e, uint_fast16_t const margin, void const * const sender) #endif { uint_fast16_t nFree; bool status; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); nFree = static_cast<uint_fast16_t>(m_eQueue.maxMsg - m_eQueue.nofMsg); if (nFree > margin) { QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_FIFO, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_OBJ_(sender); // the sender object QS_SIG_(e->sig); // the signal of the event QS_OBJ_(this); // this active object (recipient) QS_2U8_(e->poolId_, e->refCtr_); // pool Id & ref Count QS_EQC_(static_cast<QEQueueCtr>(nFree)); // # free entries QS_EQC_(static_cast<QEQueueCtr>(0)); // min # free (unknown) QS_END_NOCRIT_() if (e->poolId_ != static_cast<uint8_t>(0)) { // is it a pool event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } // posting to the embOS mailbox must succeed, see NOTE3 Q_ALLEGE_ID(710, OS_PutMailCond(&m_eQueue, static_cast<OS_CONST_PTR void *>(&e)) == static_cast<char>(0)); status = true; // return success }
bool GuiQMActive::post_(QEvt const * const e, uint_fast16_t const /*margin*/, void const * const sender) #endif { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_FIFO, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_OBJ_(sender); // the sender object QS_SIG_(e->sig); // the signal of the event QS_OBJ_(this); // this active object QS_2U8_(QF_EVT_POOL_ID_(e), /* the poolID of the event */ QF_EVT_REF_CTR_(e)); // the ref Ctr of the event QS_EQC_(0); // number of free entries (not used) QS_EQC_(0); // min number of free entries (not used) QS_END_NOCRIT_() // is it a dynamic event? if (QF_EVT_POOL_ID_(e) != static_cast<uint8_t>(0)) { QF_EVT_REF_CTR_INC_(e); // increment the reference counter } QF_CRIT_EXIT_(); // QCoreApplication::postEvent() is thread-safe per Qt documentation QCoreApplication::postEvent(QApplication::instance(), new QP_Event(e)); return true; }
bool QActive_post_(QActive * const me, QEvt const * const e, uint_fast16_t const margin, void const * const sender) #endif /* Q_SPY */ { uint_fast16_t nFree; bool status; QF_CRIT_STAT_ QF_CRIT_ENTRY_(); nFree = (uint_fast16_t)(me->eQueue.maxMsg - me->eQueue.nofMsg); if (nFree > margin) { QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_FIFO, QS_priv_.aoObjFilter, me) QS_TIME_(); /* timestamp */ QS_OBJ_(sender); /* the sender object */ QS_SIG_(e->sig); /* the signal of the event */ QS_OBJ_(me); /* this active object (recipient) */ QS_2U8_(e->poolId_, e->refCtr_); /* pool Id & ref Count */ QS_EQC_((QEQueueCtr)nFree); /* # free entries available */ QS_EQC_((QEQueueCtr)0); /* min # free entries (unknown) */ QS_END_NOCRIT_() if (e->poolId_ != (uint8_t)0) { /* is it a pool event? */ QF_EVT_REF_CTR_INC_(e); /* increment the reference counter */ } /* posting to the embOS mailbox must succeed, see NOTE3 */ Q_ALLEGE(OS_PutMailCond(&me->eQueue, (OS_CONST_PTR void *)&e) == (char)0); status = true; /* return success */ }
/*..........................................................................*/ uint8_t QTimeEvt_rearm(QTimeEvt * const me, QTimeEvtCtr const nTicks) { uint8_t isArmed; QF_CRIT_STAT_ Q_REQUIRE((nTicks != (QTimeEvtCtr)0) /* cannot arm a timer with 0 ticks */ && (me->act != (QActive *)0) /* active object must be valid */ && (me->super.sig >= (QSignal)Q_USER_SIG)); /* valid signal */ QF_CRIT_ENTRY_(); if (me->ctr == (QTimeEvtCtr)0) { /* is the time evt disarmed? */ isArmed = (uint8_t)0; if (QF_EVT_REF_CTR_(&me->super) == (uint8_t)0) { /* not linked? */ me->next = QF_timeEvtListHead_; QF_timeEvtListHead_ = me; QF_EVT_REF_CTR_INC_(&me->super); /* mark as linked */ } } else { /* the time event is armed */ isArmed = (uint8_t)1; } me->ctr = nTicks; /* re-load the tick counter (shift the phasing) */ QS_BEGIN_NOCRIT_(QS_QF_TIMEEVT_REARM, QS_teObj_, me) QS_TIME_(); /* timestamp */ QS_OBJ_(me); /* this time event object */ QS_OBJ_(me->act); /* the active object */ QS_TEC_(me->ctr); /* the number of ticks */ QS_TEC_(me->interval); /* the interval */ QS_U8_(isArmed); /* was the timer armed? */ QS_END_NOCRIT_() QF_CRIT_EXIT_(); return isArmed; }
void QF_publish_(QEvt const * const e, void const * const sender) #endif { QF_CRIT_STAT_ /* make sure that the published signal is within the configured range */ Q_REQUIRE(e->sig < (QSignal)QF_maxSignal_); QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_PUBLISH, (void *)0, (void *)0) QS_TIME_(); /* the timestamp */ QS_OBJ_(sender); /* the sender object */ QS_SIG_(e->sig); /* the signal of the event */ QS_2U8_(e->poolId_, e->refCtr_);/* pool Id & ref Count of the event */ QS_END_NOCRIT_() if (e->poolId_ != (uint8_t)0) { /* is it a dynamic event? */ QF_EVT_REF_CTR_INC_(e); /* increment reference counter, NOTE01 */ } QF_CRIT_EXIT_(); #if (QF_MAX_ACTIVE <= 8) { uint8_t tmp = QF_subscrList_[e->sig].bits[0]; while (tmp != (uint8_t)0) { uint8_t p = QF_LOG2(tmp); tmp &= Q_ROM_BYTE(QF_invPwr2Lkup[p]); /* clear subscriber bit */ Q_ASSERT(QF_active_[p] != (QActive *)0); /* must be registered */ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(QF_active_[p], e, sender); } } #else { uint_t i = (uint_t)Q_DIM(QF_subscrList_[0].bits); do { /* go through all bytes in the subscription list */ uint8_t tmp; --i; tmp = QF_PTR_AT_(QF_subscrList_, e->sig).bits[i]; while (tmp != (uint8_t)0) { uint8_t p = QF_LOG2(tmp); tmp &= Q_ROM_BYTE(QF_invPwr2Lkup[p]);/*clear subscriber bit */ p = (uint8_t)(p + (uint8_t)(i << 3));/* adjust the priority */ Q_ASSERT(QF_active_[p] != (QActive *)0);/*must be registered*/ /* QACTIVE_POST() asserts internally if the queue overflows */ QACTIVE_POST(QF_active_[p], e, sender); } } while (i != (uint_t)0); } #endif QF_gc(e); /* run the garbage collector, see NOTE01 */ }
/*..........................................................................*/ uint8_t QEQueue_post(QEQueue * const me, QEvt const * const e, uint16_t const margin) { QEQueueCtr nFree; /* temporary to avoid UB for volatile access */ uint8_t status; QF_CRIT_STAT_ Q_REQUIRE(e != (QEvt const *)0); /* event must be valid */ QF_CRIT_ENTRY_(); nFree = me->nFree; /* get volatile into the temporary */ if (nFree > (QEQueueCtr)margin) { /* required margin available? */ QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_POST_FIFO, QS_priv_.eqObjFilter, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this queue object */ QS_2U8_(e->poolId_, e->refCtr_); /* pool Id & ref Count */ QS_EQC_(nFree); /* number of free entries */ QS_EQC_(me->nMin); /* min number of free entries */ QS_END_NOCRIT_() if (e->poolId_ != (uint8_t)0) { /* is it a pool event? */ QF_EVT_REF_CTR_INC_(e); /* increment the reference counter */ } --nFree; /* one free entry just used up */ me->nFree = nFree; /* update the volatile */ if (me->nMin > nFree) { me->nMin = nFree; /* update minimum so far */ } if (me->frontEvt == (QEvt const *)0) { /* was the queue empty? */ me->frontEvt = e; /* deliver event directly */ } else { /* queue was not empty, insert event into the ring-buffer */ /* insert event into the ring buffer (FIFO) */ QF_PTR_AT_(me->ring, me->head) = e; /* insert e into buffer */ if (me->head == (QEQueueCtr)0) { /* need to wrap the head? */ me->head = me->end; /* wrap around */ } --me->head; } status = (uint8_t)1; /* event posted successfully */ }
//............................................................................ void QActive::postLIFO(QEvt const * const e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QEQueueCtr nFree = m_eQueue.m_nFree;// tmp to avoid UB for volatile access // the queue must be able to accept the event (cannot overflow) Q_ASSERT(nFree != static_cast<QEQueueCtr>(0)); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_LIFO, QS::priv_.aoObjFilter, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this active object QS_2U8_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt QS_EQC_(nFree); // number of free entries QS_EQC_(m_eQueue.m_nMin); // min number of free entries QS_END_NOCRIT_() if (e->poolId_ != u8_0) { // is it a dynamic event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } --nFree; // one free entry just used up m_eQueue.m_nFree = nFree; // update the volatile if (m_eQueue.m_nMin > nFree) { m_eQueue.m_nMin = nFree; // update minimum so far } QEvt const *frontEvt = m_eQueue.m_frontEvt;// read volatile into temporary m_eQueue.m_frontEvt = e; // deliver the event directly to the front if (frontEvt == null_evt) { // is the queue empty? QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { // queue is not empty, leave event in the ring-buffer ++m_eQueue.m_tail; if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail? m_eQueue.m_tail = static_cast<QEQueueCtr>(0); // wrap around } QF_PTR_AT_(m_eQueue.m_ring, m_eQueue.m_tail) = frontEvt; } QF_CRIT_EXIT_(); }
//............................................................................ void QActive::postLIFO(QEvt const * const e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_ACTIVE_POST_LIFO, QS::aoObj_, this) QS_TIME_(); // timestamp QS_SIG_(e->sig); // the signal of this event QS_OBJ_(this); // this active object QS_U8_(QF_EVT_POOL_ID_(e)); // the pool Id of the event QS_U8_(QF_EVT_REF_CTR_(e)); // the ref count of the event QS_EQC_(m_eQueue.m_nFree); // number of free entries QS_EQC_(m_eQueue.m_nMin); // min number of free entries QS_END_NOCRIT_() if (QF_EVT_POOL_ID_(e) != u8_0) { // is it a dynamic event? QF_EVT_REF_CTR_INC_(e); // increment the reference counter } if (m_eQueue.m_frontEvt == null_evt) { // is the queue empty? m_eQueue.m_frontEvt = e; // deliver event directly QACTIVE_EQUEUE_SIGNAL_(this); // signal the event queue } else { // queue is not empty, leave event in the ring-buffer // queue must accept all posted events Q_ASSERT(m_eQueue.m_nFree != static_cast<QEQueueCtr>(0)); ++m_eQueue.m_tail; if (m_eQueue.m_tail == m_eQueue.m_end) { // need to wrap the tail? m_eQueue.m_tail = static_cast<QEQueueCtr>(0); // wrap around } QF_PTR_AT_(m_eQueue.m_ring, m_eQueue.m_tail) = m_eQueue.m_frontEvt; m_eQueue.m_frontEvt = e; // put event to front --m_eQueue.m_nFree; // update number of free events if (m_eQueue.m_nMin > m_eQueue.m_nFree) { m_eQueue.m_nMin = m_eQueue.m_nFree; // update minimum so far } } QF_CRIT_EXIT_(); }
/*..........................................................................*/ void QEQueue_postLIFO(QEQueue *me, QEvent const *e) { QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_EQUEUE_POST_LIFO, QS_eqObj_, me) QS_TIME_(); /* timestamp */ QS_SIG_(e->sig); /* the signal of this event */ QS_OBJ_(me); /* this queue object */ QS_U8_(QF_EVT_POOL_ID_(e)); /* the pool Id of the event */ QS_U8_(QF_EVT_REF_CTR_(e)); /* the ref count of the event */ QS_EQC_(me->nFree); /* number of free entries */ QS_EQC_(me->nMin); /* min number of free entries */ QS_END_NOCRIT_() if (QF_EVT_POOL_ID_(e) != (uint8_t)0) { /* is it a pool event? */ QF_EVT_REF_CTR_INC_(e); /* increment the reference counter */ } if (me->frontEvt != (QEvent *)0) { /* is the queue not empty? */ /* the queue must be able to accept the event (cannot overflow) */ Q_ASSERT(me->nFree != (QEQueueCtr)0); ++me->tail; if (me->tail == me->end) { /* need to wrap the tail? */ me->tail = (QEQueueCtr)0; /* wrap around */ } QF_PTR_AT_(me->ring, me->tail) = me->frontEvt;/* save old front evt */ --me->nFree; /* update number of free events */ if (me->nMin > me->nFree) { me->nMin = me->nFree; /* update minimum so far */ } } me->frontEvt = e; /* stick the new event to the front */ QF_CRIT_EXIT_(); }
void QF::publish_(QEvt const * const e) { #else void QF::publish_(QEvt const * const e, void const * const sender) { #endif /// @pre the published signal must be within the configured range Q_REQUIRE_ID(100, static_cast<enum_t>(e->sig) < QF_maxSignal_); QF_CRIT_STAT_ QF_CRIT_ENTRY_(); QS_BEGIN_NOCRIT_(QS_QF_PUBLISH, static_cast<void *>(0), static_cast<void *>(0)) QS_TIME_(); // the timestamp QS_OBJ_(sender); // the sender object QS_SIG_(e->sig); // the signal of the event QS_2U8_(e->poolId_, e->refCtr_); // pool Id & refCtr of the evt QS_END_NOCRIT_() // is it a dynamic event? if (e->poolId_ != static_cast<uint8_t>(0)) { QF_EVT_REF_CTR_INC_(e); // increment the reference counter, NOTE01 } QF_CRIT_EXIT_(); QF_SCHED_STAT_TYPE_ lockStat; lockStat.m_lockPrio = static_cast<uint_fast8_t>(0xFF); // uninitialized #if (QF_MAX_ACTIVE <= 8) uint_fast8_t tmp = static_cast<uint_fast8_t>( QF_PTR_AT_(QF_subscrList_, e->sig).m_bits[0]); while (tmp != static_cast<uint8_t>(0)) { uint_fast8_t p = static_cast<uint_fast8_t>(QF_LOG2(tmp)); // clear the subscriber bit tmp &= static_cast<uint_fast8_t>(QF_invPwr2Lkup[p]); // has the scheduler been locked yet? if (lockStat.m_lockPrio == static_cast<uint_fast8_t>(0xFF)) { QF_SCHED_LOCK_(&lockStat, p); } // the priority of the AO must be registered with the framework Q_ASSERT_ID(110, active_[p] != static_cast<QMActive *>(0)); // POST() asserts internally if the queue overflows (void)active_[p]->POST(e, sender); } #else uint_fast8_t i = static_cast<uint_fast8_t>(QF_SUBSCR_LIST_SIZE); // go through all bytes in the subscription list do { --i; uint_fast8_t tmp = static_cast<uint_fast8_t>( QF_PTR_AT_(QF_subscrList_, e->sig).m_bits[i]); while (tmp != static_cast<uint_fast8_t>(0)) { uint_fast8_t p = static_cast<uint_fast8_t>(QF_LOG2(tmp)); // clear the subscriber bit tmp &= static_cast<uint_fast8_t>(QF_invPwr2Lkup[p]); // adjust the priority p += static_cast<uint_fast8_t>(i << 3); // has the scheduler been locked yet? if (lockStat.m_lockPrio == static_cast<uint_fast8_t>(0xFF)) { QF_SCHED_LOCK_(&lockStat, p); } // the priority level be registered with the framework Q_ASSERT(active_[p] != static_cast<QMActive *>(0)); // POST() asserts internally if the queue overflows (void)active_[p]->POST(e, sender); } } while (i != static_cast<uint_fast8_t>(0)); #endif // was the scheduler locked? if (lockStat.m_lockPrio <= static_cast<uint_fast8_t>(QF_MAX_ACTIVE)) { QF_SCHED_UNLOCK_(&lockStat); // unlock the scheduler } // run the garbage collector gc(e); // NOTE: QP::QF::publish_() increments the reference counter to prevent // premature recycling of the event while the multicasting is still // in progress. At the end of the function, the garbage collector step // decrements the reference counter and recycles the event if the // counter drops to zero. This covers the case when the event was // published without any subscribers. }