/** * @brief Locks the specified mutex. * @post The mutex is locked and inserted in the per-thread stack of owned * mutexes. * * @param[in] mp pointer to the @p mutex_t structure * * @api */ void chMtxLock(mutex_t *mp) { chSysLock(); chMtxLockS(mp); chSysUnlock(); }
/** * @brief Locks the specified mutex. * @post The mutex is locked and inserted in the per-thread stack of owned * mutexes. * * @param[in] mp pointer to the @p Mutex structure * * @api */ void chMtxLock(Mutex *mp) { chSysLock(); chMtxLockS(mp); chSysUnlock(); }
/************************************************************************ * NAME: fnet_os_mutex_lock; * * DESCRIPTION: *************************************************************************/ void fnet_os_mutex_lock(void) { chSysLock(); if (chThdSelf() != FnetMutex.m_owner) { // Not owned. Lock. chMtxLockS(&FnetMutex); } FnetMutexCount++; chSysUnlock(); }
static THD_FUNCTION(thread4B, p) { (void)p; chThdSleepMilliseconds(150); chSysLock(); chMtxLockS(&m2); /* For coverage of the chMtxLockS() function variant.*/ chMtxUnlockS(&m2); /* For coverage of the chMtxUnlockS() function variant.*/ chSchRescheduleS(); chSysUnlock(); }
/** * @brief Gains exclusive access to the ILI9341 module. * @details This function tries to gain ownership to the ILI9341 module, if the * module is already being used then the invoking thread is queued. * @pre In order to use this function the option * @p ILI9341_USE_MUTUAL_EXCLUSION must be enabled. * @pre ILI9341 is ready. * * @param[in] driverp pointer to the @p ILI9341Driver object * * @sclass */ void ili9341AcquireBusS(ILI9341Driver *driverp) { osalDbgCheckClassS(); osalDbgCheck(driverp == &ILI9341D1); osalDbgAssert(driverp->state == ILI9341_READY, "not ready"); #if (TRUE == CH_CFG_USE_MUTEXES) chMtxLockS(&driverp->lock); #else chSemWaitS(&driverp->lock); #endif }
/** * * @brief Locks a recursive mutex. * * @param[in] mtx pointer to instance of @p struct pios_recursive_mutex * @param[in] timeout_ms timeout for acquiring the lock in milliseconds * * @returns true on success or false on timeout or failure * */ bool PIOS_Recursive_Mutex_Lock(struct pios_recursive_mutex *mtx, uint32_t timeout_ms) { PIOS_Assert(mtx != NULL); chSysLock(); if (chThdSelf() != mtx->mtx.m_owner) chMtxLockS(&mtx->mtx); ++mtx->count; chSysUnlock(); return true; }
/** * @brief Waits on the condition variable releasing the mutex lock. * @details Releases the currently owned mutex, waits on the condition * variable, and finally acquires the mutex again. All the sequence * is performed atomically. * @pre The invoking thread <b>must</b> have at least one owned mutex. * @pre The configuration option @p CH_USE_CONDVARS_TIMEOUT must be enabled * in order to use this function. * @post Exiting the function because a timeout does not re-acquire the * mutex, the mutex ownership is lost. * * @param[in] cp pointer to the @p CondVar structure * @param[in] time the number of ticks before the operation timeouts, the * special values are handled as follow: * - @a TIME_INFINITE no timeout. * - @a TIME_IMMEDIATE this value is not allowed. * . * @return A message specifying how the invoking thread has been * released from the condition variable. * @retval RDY_OK if the condvar has been signaled using * @p chCondSignal(). * @retval RDY_RESET if the condvar has been signaled using * @p chCondBroadcast(). * @retval RDY_TIMEOUT if the condvar has not been signaled within the * specified timeout. * * @sclass */ msg_t chCondWaitTimeoutS(CondVar *cp, systime_t time) { Mutex *mp; msg_t msg; chDbgCheck((cp != NULL) && (time != TIME_IMMEDIATE), "chCondWaitTimeoutS"); chDbgAssert(currp->p_mtxlist != NULL, "chCondWaitTimeoutS(), #1", "not owning a mutex"); mp = chMtxUnlockS(); currp->p_u.wtobjp = cp; prio_insert(currp, &cp->c_queue); msg = chSchGoSleepTimeoutS(THD_STATE_WTCOND, time); if (msg != RDY_TIMEOUT) chMtxLockS(mp); return msg; }
/** * @brief Waits on the condition variable releasing the mutex lock. * @details Releases the currently owned mutex, waits on the condition * variable, and finally acquires the mutex again. All the sequence * is performed atomically. * @pre The invoking thread <b>must</b> have at least one owned mutex. * @pre The configuration option @p CH_CFG_USE_CONDVARS_TIMEOUT must be enabled * in order to use this function. * @post Exiting the function because a timeout does not re-acquire the * mutex, the mutex ownership is lost. * * @param[in] cp pointer to the @p condition_variable_t structure * @param[in] time the number of ticks before the operation timeouts, the * special values are handled as follow: * - @a TIME_INFINITE no timeout. * - @a TIME_IMMEDIATE this value is not allowed. * . * @return A message specifying how the invoking thread has been * released from the condition variable. * @retval MSG_OK if the condition variable has been signaled using * @p chCondSignal(). * @retval MSG_RESET if the condition variable has been signaled using * @p chCondBroadcast(). * @retval MSG_TIMEOUT if the condition variable has not been signaled within * the specified timeout. * * @sclass */ msg_t chCondWaitTimeoutS(condition_variable_t *cp, systime_t time) { mutex_t *mp; msg_t msg; chDbgCheckClassS(); chDbgCheck((cp != NULL) && (time != TIME_IMMEDIATE)); chDbgAssert(currp->p_mtxlist != NULL, "not owning a mutex"); mp = chMtxGetNextMutexS(); chMtxUnlockS(mp); currp->p_u.wtobjp = cp; queue_prio_insert(currp, &cp->c_queue); msg = chSchGoSleepTimeoutS(CH_STATE_WTCOND, time); if (msg != MSG_TIMEOUT) chMtxLockS(mp); return msg; }
/** * @brief Waits on the condition variable releasing the mutex lock. * @details Releases the currently owned mutex, waits on the condition * variable, and finally acquires the mutex again. All the sequence * is performed atomically. * @pre The invoking thread <b>must</b> have at least one owned mutex. * * @param[in] cp pointer to the @p CondVar structure * @return A message specifying how the invoking thread has been * released from the condition variable. * @retval RDY_OK if the condvar has been signaled using * @p chCondSignal(). * @retval RDY_RESET if the condvar has been signaled using * @p chCondBroadcast(). * * @sclass */ msg_t chCondWaitS(CondVar *cp) { Thread *ctp = currp; Mutex *mp; msg_t msg; chDbgCheck(cp != NULL, "chCondWaitS"); chDbgAssert(ctp->p_mtxlist != NULL, "chCondWaitS(), #1", "not owning a mutex"); mp = chMtxUnlockS(); ctp->p_u.wtobjp = cp; prio_insert(ctp, &cp->c_queue); chSchGoSleepS(THD_STATE_WTCOND); msg = ctp->p_u.rdymsg; chMtxLockS(mp); return msg; }
/** * @brief Waits on the condition variable releasing the mutex lock. * @details Releases the currently owned mutex, waits on the condition * variable, and finally acquires the mutex again. All the sequence * is performed atomically. * @pre The invoking thread <b>must</b> have at least one owned mutex. * * @param[in] cp pointer to the @p condition_variable_t structure * @return A message specifying how the invoking thread has been * released from the condition variable. * @retval MSG_OK if the condition variable has been signaled using * @p chCondSignal(). * @retval MSG_RESET if the condition variable has been signaled using * @p chCondBroadcast(). * * @sclass */ msg_t chCondWaitS(condition_variable_t *cp) { thread_t *ctp = currp; mutex_t *mp; msg_t msg; chDbgCheckClassS(); chDbgCheck(cp != NULL); chDbgAssert(ctp->p_mtxlist != NULL, "not owning a mutex"); mp = chMtxGetNextMutexS(); chMtxUnlockS(mp); ctp->p_u.wtobjp = cp; queue_prio_insert(ctp, &cp->c_queue); chSchGoSleepS(CH_STATE_WTCOND); msg = ctp->p_u.rdymsg; chMtxLockS(mp); return msg; }
/** * @brief Waits on the condition variable releasing the mutex lock. * @details Releases the currently owned mutex, waits on the condition * variable, and finally acquires the mutex again. All the sequence * is performed atomically. * @pre The invoking thread <b>must</b> have at least one owned mutex. * @pre The configuration option @p CH_CFG_USE_CONDVARS_TIMEOUT must be enabled * in order to use this function. * @post Exiting the function because a timeout does not re-acquire the * mutex, the mutex ownership is lost. * * @param[in] cp pointer to the @p condition_variable_t structure * @param[in] time the number of ticks before the operation timeouts, the * special values are handled as follow: * - @a TIME_INFINITE no timeout. * - @a TIME_IMMEDIATE this value is not allowed. * . * @return A message specifying how the invoking thread has been * released from the condition variable. * @retval MSG_OK if the condition variable has been signaled using * @p chCondSignal(). * @retval MSG_RESET if the condition variable has been signaled using * @p chCondBroadcast(). * @retval MSG_TIMEOUT if the condition variable has not been signaled within * the specified timeout. * * @sclass */ msg_t chCondWaitTimeoutS(condition_variable_t *cp, systime_t time) { mutex_t *mp; msg_t msg; chDbgCheckClassS(); chDbgCheck((cp != NULL) && (time != TIME_IMMEDIATE)); chDbgAssert(currp->p_mtxlist != NULL, "not owning a mutex"); /* Getting "current" mutex and releasing it.*/ mp = chMtxGetNextMutexS(); chMtxUnlockS(mp); /* Start waiting on the condition variable, on exit the mutex is taken again.*/ currp->p_u.wtobjp = cp; queue_prio_insert(currp, &cp->c_queue); msg = chSchGoSleepTimeoutS(CH_STATE_WTCOND, time); if (msg != MSG_TIMEOUT) { chMtxLockS(mp); } return msg; }
/** * @brief Waits on the condition variable releasing the mutex lock. * @details Releases the currently owned mutex, waits on the condition * variable, and finally acquires the mutex again. All the sequence * is performed atomically. * @pre The invoking thread <b>must</b> have at least one owned mutex. * * @param[in] cp pointer to the @p condition_variable_t structure * @return A message specifying how the invoking thread has been * released from the condition variable. * @retval MSG_OK if the condition variable has been signaled using * @p chCondSignal(). * @retval MSG_RESET if the condition variable has been signaled using * @p chCondBroadcast(). * * @sclass */ msg_t chCondWaitS(condition_variable_t *cp) { thread_t *ctp = currp; mutex_t *mp; msg_t msg; chDbgCheckClassS(); chDbgCheck(cp != NULL); chDbgAssert(ctp->p_mtxlist != NULL, "not owning a mutex"); /* Getting "current" mutex and releasing it.*/ mp = chMtxGetNextMutexS(); chMtxUnlockS(mp); /* Start waiting on the condition variable, on exit the mutex is taken again.*/ ctp->p_u.wtobjp = cp; queue_prio_insert(ctp, &cp->c_queue); chSchGoSleepS(CH_STATE_WTCOND); msg = ctp->p_u.rdymsg; chMtxLockS(mp); return msg; }
void Mutex::lockS(void) { chMtxLockS(&mutex); }
static void test_005_006_execute(void) { bool b; tprio_t prio; /* [5.6.1] Getting current thread priority for later checks.*/ test_set_step(1); { prio = chThdGetPriorityX(); } /* [5.6.2] Locking the mutex first time, it must be possible because it is not owned.*/ test_set_step(2); { b = chMtxTryLock(&m1); test_assert(b, "already locked"); } /* [5.6.3] Locking the mutex second time, it must be possible because it is recursive.*/ test_set_step(3); { b = chMtxTryLock(&m1); test_assert(b, "already locked"); } /* [5.6.4] Unlocking the mutex then it must be still owned because recursivity.*/ test_set_step(4); { chMtxUnlock(&m1); test_assert(m1.owner != NULL, "not owned"); } /* [5.6.5] Unlocking the mutex then it must not be owned anymore and the queue must be empty.*/ test_set_step(5); { chMtxUnlock(&m1); test_assert(m1.owner == NULL, "still owned"); test_assert(queue_isempty(&m1.queue), "queue not empty"); } /* [5.6.6] Testing that priority has not changed after operations.*/ test_set_step(6); { test_assert(chThdGetPriorityX() == prio, "wrong priority level"); } /* [5.6.7] Testing consecutive chMtxTryLock()/chMtxTryLockS() calls and a final chMtxUnlockAllS().*/ test_set_step(7); { b = chMtxTryLock(&m1); test_assert(b, "already locked"); chSysLock(); b = chMtxTryLockS(&m1); chSysUnlock(); test_assert(b, "already locked"); test_assert(m1.cnt == 2, "invalid recursion counter"); chSysLock(); chMtxUnlockAllS(); chSysUnlock(); test_assert(m1.owner == NULL, "still owned"); test_assert(queue_isempty(&m1.queue), "queue not empty"); test_assert(m1.cnt == 0, "invalid recursion counter"); } /* [5.6.8] Testing consecutive chMtxLock()/chMtxLockS() calls and a final chMtxUnlockAll().*/ test_set_step(8); { chMtxLock(&m1); test_assert(m1.owner != NULL, "not owned"); chSysLock(); chMtxLockS(&m1); chSysUnlock(); test_assert(m1.owner != NULL, "not owned"); test_assert(m1.cnt == 2, "invalid recursion counter"); chMtxUnlockAll(); test_assert(m1.owner == NULL, "still owned"); test_assert(queue_isempty(&m1.queue), "queue not empty"); test_assert(m1.cnt == 0, "invalid recursion counter"); } /* [5.6.9] Testing that priority has not changed after operations.*/ test_set_step(9); { test_assert(chThdGetPriorityX() == prio, "wrong priority level"); } }