void OSTimeDly (OS_TICK dly, OS_OPT opt, OS_ERR *p_err) { CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0u) { /* Not allowed to call from an ISR */ *p_err = OS_ERR_TIME_DLY_ISR; return; } #endif if (OSSchedLockNestingCtr > (OS_NESTING_CTR)0u) { /* Can't delay when the scheduler is locked */ *p_err = OS_ERR_SCHED_LOCKED; return; } switch (opt) { case OS_OPT_TIME_DLY: case OS_OPT_TIME_TIMEOUT: case OS_OPT_TIME_PERIODIC: if (dly == (OS_TICK)0u) { /* 0 means no delay! */ *p_err = OS_ERR_TIME_ZERO_DLY; return; } break; case OS_OPT_TIME_MATCH: break; default: *p_err = OS_ERR_OPT_INVALID; return; } OS_CRITICAL_ENTER(); OSTCBCurPtr->TaskState = OS_TASK_STATE_DLY; OS_TickListInsert(OSTCBCurPtr, dly, opt, p_err); if (*p_err != OS_ERR_NONE) { OS_CRITICAL_EXIT_NO_SCHED(); return; } OS_RdyListRemove(OSTCBCurPtr); /* Remove current task from ready list */ OS_CRITICAL_EXIT_NO_SCHED(); OSSched(); /* Find next task to run! */ *p_err = OS_ERR_NONE; }
void OSTimeDlyHMSM (CPU_INT16U hours, CPU_INT16U minutes, CPU_INT16U seconds, CPU_INT32U milli, OS_OPT opt, OS_ERR *p_err) { #if OS_CFG_ARG_CHK_EN > 0u CPU_BOOLEAN opt_invalid; CPU_BOOLEAN opt_non_strict; #endif OS_OPT opt_time; OS_RATE_HZ tick_rate; OS_TICK ticks; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0u) { /* Not allowed to call from an ISR */ *p_err = OS_ERR_TIME_DLY_ISR; return; } #endif if (OSSchedLockNestingCtr > (OS_NESTING_CTR)0u) { /* Can't delay when the scheduler is locked */ *p_err = OS_ERR_SCHED_LOCKED; return; } opt_time = opt & OS_OPT_TIME_MASK; /* Retrieve time options only. */ switch (opt_time) { case OS_OPT_TIME_DLY: case OS_OPT_TIME_TIMEOUT: case OS_OPT_TIME_PERIODIC: if (milli == (CPU_INT32U)0u) { /* Make sure we didn't specify a 0 delay */ if (seconds == (CPU_INT16U)0u) { if (minutes == (CPU_INT16U)0u) { if (hours == (CPU_INT16U)0u) { *p_err = OS_ERR_TIME_ZERO_DLY; return; } } } } break; case OS_OPT_TIME_MATCH: break; default: *p_err = OS_ERR_OPT_INVALID; return; } #if OS_CFG_ARG_CHK_EN > 0u /* Validate arguments to be within range */ opt_invalid = DEF_BIT_IS_SET_ANY(opt, ~OS_OPT_TIME_OPTS_MASK); if (opt_invalid == DEF_YES) { *p_err = OS_ERR_OPT_INVALID; return; } opt_non_strict = DEF_BIT_IS_SET(opt, OS_OPT_TIME_HMSM_NON_STRICT); if (opt_non_strict != DEF_YES) { if (milli > (CPU_INT32U)999u) { *p_err = OS_ERR_TIME_INVALID_MILLISECONDS; return; } if (seconds > (CPU_INT16U)59u) { *p_err = OS_ERR_TIME_INVALID_SECONDS; return; } if (minutes > (CPU_INT16U)59u) { *p_err = OS_ERR_TIME_INVALID_MINUTES; return; } if (hours > (CPU_INT16U)99u) { *p_err = OS_ERR_TIME_INVALID_HOURS; return; } } else { if (minutes > (CPU_INT16U)9999u) { *p_err = OS_ERR_TIME_INVALID_MINUTES; return; } if (hours > (CPU_INT16U)999u) { *p_err = OS_ERR_TIME_INVALID_HOURS; return; } } #endif /* Compute the total number of clock ticks required.. */ /* .. (rounded to the nearest tick) */ tick_rate = OSCfg_TickRate_Hz; ticks = ((OS_TICK)hours * (OS_TICK)3600u + (OS_TICK)minutes * (OS_TICK)60u + (OS_TICK)seconds) * tick_rate + (tick_rate * ((OS_TICK)milli + (OS_TICK)500u / tick_rate)) / (OS_TICK)1000u; if (ticks > (OS_TICK)0u) { OS_CRITICAL_ENTER(); OSTCBCurPtr->TaskState = OS_TASK_STATE_DLY; OS_TickListInsert(OSTCBCurPtr, ticks, opt_time, p_err); if (*p_err != OS_ERR_NONE) { OS_CRITICAL_EXIT_NO_SCHED(); return; } OS_RdyListRemove(OSTCBCurPtr); /* Remove current task from ready list */ OS_CRITICAL_EXIT_NO_SCHED(); OSSched(); /* Find next task to run! */ *p_err = OS_ERR_NONE; } else { *p_err = OS_ERR_TIME_ZERO_DLY; } }
void OSTimeDly (OS_TICK dly, OS_OPT opt, OS_ERR *p_err) { #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) CPU_SR_ALLOC(); #endif #ifdef OS_SAFETY_CRITICAL if (p_err == DEF_NULL) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if (OS_CFG_CALLED_FROM_ISR_CHK_EN == DEF_ENABLED) if (OSIntNestingCtr > 0u) { /* Not allowed to call from an ISR */ *p_err = OS_ERR_TIME_DLY_ISR; return; } #endif #if (OS_CFG_INVALID_OS_CALLS_CHK_EN == DEF_ENABLED) /* Is the kernel running? */ if (OSRunning != OS_STATE_OS_RUNNING) { *p_err = OS_ERR_OS_NOT_RUNNING; return; } #endif if (OSSchedLockNestingCtr > 0u) { /* Can't delay when the scheduler is locked */ *p_err = OS_ERR_SCHED_LOCKED; return; } switch (opt) { case OS_OPT_TIME_DLY: case OS_OPT_TIME_TIMEOUT: case OS_OPT_TIME_PERIODIC: if (dly == 0u) { /* 0 means no delay! */ *p_err = OS_ERR_TIME_ZERO_DLY; return; } break; case OS_OPT_TIME_MATCH: break; default: *p_err = OS_ERR_OPT_INVALID; return; } #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) CPU_CRITICAL_ENTER(); OS_TickListInsertDly(OSTCBCurPtr, dly, opt, p_err); if (*p_err != OS_ERR_NONE) { CPU_CRITICAL_EXIT(); return; } OS_TRACE_TASK_DLY(dly); OS_RdyListRemove(OSTCBCurPtr); /* Remove current task from ready list */ CPU_CRITICAL_EXIT(); OSSched(); /* Find next task to run! */ #endif }
void OSMutexPost (OS_MUTEX *p_mutex, OS_OPT opt, OS_ERR *p_err) { OS_PEND_LIST *p_pend_list; OS_TCB *p_tcb; CPU_TS ts; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST_FAILED(p_mutex); /* Record the event. */ #endif OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0) { /* Not allowed to call from an ISR */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_POST_ISR; return; } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_mutex == (OS_MUTEX *)0) { /* Validate 'p_mutex' */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_PTR_NULL; return; } switch (opt) { /* Validate 'opt' */ case OS_OPT_POST_NONE: case OS_OPT_POST_NO_SCHED: break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OPT_INVALID; return; } #endif #if OS_CFG_OBJ_TYPE_CHK_EN > 0u if (p_mutex->Type != OS_OBJ_TYPE_MUTEX) { /* Make sure mutex was created */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_TYPE; return; } #endif CPU_CRITICAL_ENTER(); if (OSTCBCurPtr != p_mutex->OwnerTCBPtr) { /* Make sure the mutex owner is releasing the mutex */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_MUTEX_NOT_OWNER; return; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_POST(p_mutex); /* Record the event. */ #endif OS_CRITICAL_ENTER_CPU_EXIT(); ts = OS_TS_GET(); /* Get timestamp */ p_mutex->TS = ts; p_mutex->OwnerNestingCtr--; /* Decrement owner's nesting counter */ if (p_mutex->OwnerNestingCtr > (OS_NESTING_CTR)0) { /* Are we done with all nestings? */ OS_CRITICAL_EXIT(); /* No */ *p_err = OS_ERR_MUTEX_NESTING; return; } p_pend_list = &p_mutex->PendList; if (p_pend_list->NbrEntries == (OS_OBJ_QTY)0) { /* Any task waiting on mutex? */ p_mutex->OwnerTCBPtr = (OS_TCB *)0; /* No */ p_mutex->OwnerNestingCtr = (OS_NESTING_CTR)0; OS_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; return; } /* Yes */ if (OSTCBCurPtr->Prio != p_mutex->OwnerOriginalPrio) { OS_RdyListRemove(OSTCBCurPtr); OSTCBCurPtr->Prio = p_mutex->OwnerOriginalPrio; /* Lower owner's priority back to its original one */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_DISINHERIT(OSTCBCurPtr, OSTCBCurPtr->Prio); #endif OS_PrioInsert(OSTCBCurPtr->Prio); OS_RdyListInsertTail(OSTCBCurPtr); /* Insert owner in ready list at new priority */ OSPrioCur = OSTCBCurPtr->Prio; } /* Get TCB from head of pend list */ p_tcb = p_pend_list->HeadPtr->TCBPtr; p_mutex->OwnerTCBPtr = p_tcb; /* Give mutex to new owner */ p_mutex->OwnerOriginalPrio = p_tcb->Prio; p_mutex->OwnerNestingCtr = (OS_NESTING_CTR)1; /* Post to mutex */ OS_Post((OS_PEND_OBJ *)((void *)p_mutex), (OS_TCB *)p_tcb, (void *)0, (OS_MSG_SIZE )0, (CPU_TS )ts); OS_CRITICAL_EXIT_NO_SCHED(); if ((opt & OS_OPT_POST_NO_SCHED) == (OS_OPT)0) { OSSched(); /* Run the scheduler */ } *p_err = OS_ERR_NONE; }
void OSMutexPend (OS_MUTEX *p_mutex, OS_TICK timeout, OS_OPT opt, CPU_TS *p_ts, OS_ERR *p_err) { OS_PEND_DATA pend_data; OS_TCB *p_tcb; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0) { /* Not allowed to call from an ISR */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ISR; return; } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_mutex == (OS_MUTEX *)0) { /* Validate arguments */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_PTR_NULL; return; } switch (opt) { /* Validate 'opt' */ case OS_OPT_PEND_BLOCKING: case OS_OPT_PEND_NON_BLOCKING: break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OPT_INVALID; return; } #endif #if OS_CFG_OBJ_TYPE_CHK_EN > 0u if (p_mutex->Type != OS_OBJ_TYPE_MUTEX) { /* Make sure mutex was created */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_TYPE; return; } #endif if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; /* Initialize the returned timestamp */ } CPU_CRITICAL_ENTER(); if (p_mutex->OwnerNestingCtr == (OS_NESTING_CTR)0) { /* Resource available? */ p_mutex->OwnerTCBPtr = OSTCBCurPtr; /* Yes, caller may proceed */ p_mutex->OwnerOriginalPrio = OSTCBCurPtr->Prio; p_mutex->OwnerNestingCtr = (OS_NESTING_CTR)1; if (p_ts != (CPU_TS *)0) { *p_ts = p_mutex->TS; } CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_NONE; return; } if (OSTCBCurPtr == p_mutex->OwnerTCBPtr) { /* See if current task is already the owner of the mutex */ p_mutex->OwnerNestingCtr++; if (p_ts != (CPU_TS *)0) { *p_ts = p_mutex->TS; } CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_MUTEX_OWNER; /* Indicate that current task already owns the mutex */ return; } if ((opt & OS_OPT_PEND_NON_BLOCKING) != (OS_OPT)0) { /* Caller wants to block if not available? */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_PEND_WOULD_BLOCK; /* No */ return; } else { if (OSSchedLockNestingCtr > (OS_NESTING_CTR)0) { /* Can't pend when the scheduler is locked */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_SCHED_LOCKED; return; } } /* Lock the scheduler/re-enable interrupts */ OS_CRITICAL_ENTER_CPU_EXIT(); p_tcb = p_mutex->OwnerTCBPtr; /* Point to the TCB of the Mutex owner */ if (p_tcb->Prio > OSTCBCurPtr->Prio) { /* See if mutex owner has a lower priority than current */ switch (p_tcb->TaskState) { case OS_TASK_STATE_RDY: OS_RdyListRemove(p_tcb); /* Remove from ready list at current priority */ p_tcb->Prio = OSTCBCurPtr->Prio; /* Raise owner's priority */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_INHERIT(p_tcb, p_tcb->Prio); #endif OS_PrioInsert(p_tcb->Prio); OS_RdyListInsertHead(p_tcb); /* Insert in ready list at new priority */ break; case OS_TASK_STATE_DLY: case OS_TASK_STATE_DLY_SUSPENDED: case OS_TASK_STATE_SUSPENDED: p_tcb->Prio = OSTCBCurPtr->Prio; /* Only need to raise the owner's priority */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_INHERIT(p_tcb, p_tcb->Prio); #endif break; case OS_TASK_STATE_PEND: /* Change the position of the task in the wait list */ case OS_TASK_STATE_PEND_TIMEOUT: case OS_TASK_STATE_PEND_SUSPENDED: case OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED: OS_PendListChangePrio(p_tcb, OSTCBCurPtr->Prio); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_INHERIT(p_tcb, p_tcb->Prio); #endif break; default: OS_CRITICAL_EXIT(); *p_err = OS_ERR_STATE_INVALID; return; } } OS_Pend(&pend_data, /* Block task pending on Mutex */ (OS_PEND_OBJ *)((void *)p_mutex), OS_TASK_PEND_ON_MUTEX, timeout); OS_CRITICAL_EXIT_NO_SCHED(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_BLOCK(p_mutex); /* Record the event. */ #endif OSSched(); /* Find the next highest priority task ready to run */ CPU_CRITICAL_ENTER(); switch (OSTCBCurPtr->PendStatus) { case OS_STATUS_PEND_OK: /* We got the mutex */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_NONE; break; case OS_STATUS_PEND_ABORT: /* Indicate that we aborted */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ABORT; break; case OS_STATUS_PEND_TIMEOUT: /* Indicate that we didn't get mutex within timeout */ if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_TIMEOUT; break; case OS_STATUS_PEND_DEL: /* Indicate that object pended on has been deleted */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_DEL; break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_STATUS_INVALID; break; } CPU_CRITICAL_EXIT(); }
OS_OBJ_QTY OSMutexDel (OS_MUTEX *p_mutex, OS_OPT opt, OS_ERR *p_err) { OS_OBJ_QTY cnt; OS_OBJ_QTY nbr_tasks; OS_PEND_DATA *p_pend_data; OS_PEND_LIST *p_pend_list; OS_TCB *p_tcb; OS_TCB *p_tcb_owner; CPU_TS ts; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { OS_SAFETY_CRITICAL_EXCEPTION(); return ((OS_OBJ_QTY)0); } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0) { /* Not allowed to delete a mutex from an ISR */ *p_err = OS_ERR_DEL_ISR; return ((OS_OBJ_QTY)0); } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_mutex == (OS_MUTEX *)0) { /* Validate 'p_mutex' */ *p_err = OS_ERR_OBJ_PTR_NULL; return ((OS_OBJ_QTY)0); } switch (opt) { /* Validate 'opt' */ case OS_OPT_DEL_NO_PEND: case OS_OPT_DEL_ALWAYS: break; default: *p_err = OS_ERR_OPT_INVALID; return ((OS_OBJ_QTY)0); } #endif #if OS_CFG_OBJ_TYPE_CHK_EN > 0u if (p_mutex->Type != OS_OBJ_TYPE_MUTEX) { /* Make sure mutex was created */ *p_err = OS_ERR_OBJ_TYPE; return ((OS_OBJ_QTY)0); } #endif OS_CRITICAL_ENTER(); p_pend_list = &p_mutex->PendList; cnt = p_pend_list->NbrEntries; nbr_tasks = cnt; switch (opt) { case OS_OPT_DEL_NO_PEND: /* Delete mutex only if no task waiting */ if (nbr_tasks == (OS_OBJ_QTY)0) { #if OS_CFG_DBG_EN > 0u OS_MutexDbgListRemove(p_mutex); #endif OSMutexQty--; #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_DEL(p_mutex); /* Record the event. */ #endif OS_MutexClr(p_mutex); OS_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; } else { OS_CRITICAL_EXIT(); *p_err = OS_ERR_TASK_WAITING; } break; case OS_OPT_DEL_ALWAYS: /* Always delete the mutex */ p_tcb_owner = p_mutex->OwnerTCBPtr; /* Did we had to change the prio of owner? */ if ((p_tcb_owner != (OS_TCB *)0) && (p_tcb_owner->Prio != p_mutex->OwnerOriginalPrio)) { switch (p_tcb_owner->TaskState) { /* yes */ case OS_TASK_STATE_RDY: OS_RdyListRemove(p_tcb_owner); p_tcb_owner->Prio = p_mutex->OwnerOriginalPrio; /* Lower owner's prio back */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_DISINHERIT(p_tcb_owner, p_tcb_owner->Prio) #endif OS_PrioInsert(p_tcb_owner->Prio); OS_RdyListInsertTail(p_tcb_owner); /* Insert owner in ready list at new prio */ break; case OS_TASK_STATE_DLY: case OS_TASK_STATE_SUSPENDED: case OS_TASK_STATE_DLY_SUSPENDED: p_tcb_owner->Prio = p_mutex->OwnerOriginalPrio; /* Not in any pend list, change the prio */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_DISINHERIT(p_tcb_owner, p_tcb_owner->Prio) #endif break; case OS_TASK_STATE_PEND: case OS_TASK_STATE_PEND_TIMEOUT: case OS_TASK_STATE_PEND_SUSPENDED: case OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED: OS_PendListChangePrio(p_tcb_owner, /* Owner is pending on another object */ p_mutex->OwnerOriginalPrio); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_DISINHERIT(p_tcb_owner, p_tcb_owner->Prio) #endif break; default: OS_CRITICAL_EXIT(); *p_err = OS_ERR_STATE_INVALID; return ((OS_OBJ_QTY)0); } } ts = OS_TS_GET(); /* Get timestamp */ while (cnt > 0u) { /* Remove all tasks from the pend list */ p_pend_data = p_pend_list->HeadPtr; p_tcb = p_pend_data->TCBPtr; OS_PendObjDel((OS_PEND_OBJ *)((void *)p_mutex), p_tcb, ts); cnt--; } #if OS_CFG_DBG_EN > 0u OS_MutexDbgListRemove(p_mutex); #endif OSMutexQty--; #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_DEL(p_mutex); /* Record the event. */ #endif OS_MutexClr(p_mutex); OS_CRITICAL_EXIT_NO_SCHED(); OSSched(); /* Find highest priority task ready to run */ *p_err = OS_ERR_NONE; break; default: OS_CRITICAL_EXIT(); *p_err = OS_ERR_OPT_INVALID; break; } return (nbr_tasks); }