void *OSQPend (OS_Q *p_q, OS_TICK timeout, OS_OPT opt, OS_MSG_SIZE *p_msg_size, CPU_TS *p_ts, OS_ERR *p_err) { OS_PEND_DATA pend_data; void *p_void; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif OS_SAFETY_CRITICAL_EXCEPTION(); return ((void *)0); } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0) { /* Not allowed to call from an ISR */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ISR; return ((void *)0); } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_q == (OS_Q *)0) { /* Validate arguments */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_PTR_NULL; return ((void *)0); } if (p_msg_size == (OS_MSG_SIZE *)0) { #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_PTR_INVALID; return ((void *)0); } switch (opt) { case OS_OPT_PEND_BLOCKING: case OS_OPT_PEND_NON_BLOCKING: break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_OPT_INVALID; return ((void *)0); } #endif #if OS_CFG_OBJ_TYPE_CHK_EN > 0u if (p_q->Type != OS_OBJ_TYPE_Q) { /* Make sure message queue was created */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_TYPE; return ((void *)0); } #endif if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; /* Initialize the returned timestamp */ } CPU_CRITICAL_ENTER(); p_void = OS_MsgQGet(&p_q->MsgQ, /* Any message waiting in the message queue? */ p_msg_size, p_ts, p_err); if (*p_err == OS_ERR_NONE) { CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND(p_q); /* Record the event. */ #endif return (p_void); /* Yes, Return message received */ } if ((opt & OS_OPT_PEND_NON_BLOCKING) != (OS_OPT)0) { /* Caller wants to block if not available? */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_PEND_WOULD_BLOCK; /* No */ return ((void *)0); } else { if (OSSchedLockNestingCtr > (OS_NESTING_CTR)0) { /* Can't pend when the scheduler is locked */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_SCHED_LOCKED; return ((void *)0); } } /* Lock the scheduler/re-enable interrupts */ OS_CRITICAL_ENTER_CPU_EXIT(); OS_Pend(&pend_data, /* Block task pending on Message Queue */ (OS_PEND_OBJ *)((void *)p_q), OS_TASK_PEND_ON_Q, timeout); OS_CRITICAL_EXIT_NO_SCHED(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_BLOCK(p_q); /* Record the event. */ #endif OSSched(); /* Find the next highest priority task ready to run */ CPU_CRITICAL_ENTER(); switch (OSTCBCurPtr->PendStatus) { case OS_STATUS_PEND_OK: /* Extract message from TCB (Put there by Post) */ p_void = OSTCBCurPtr->MsgPtr; *p_msg_size = OSTCBCurPtr->MsgSize; if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND(p_q); /* Record the event. */ #endif *p_err = OS_ERR_NONE; break; case OS_STATUS_PEND_ABORT: /* Indicate that we aborted */ p_void = (void *)0; *p_msg_size = (OS_MSG_SIZE)0; if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ABORT; break; case OS_STATUS_PEND_TIMEOUT: /* Indicate that we didn't get event within TO */ p_void = (void *)0; *p_msg_size = (OS_MSG_SIZE)0; if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_TIMEOUT; break; case OS_STATUS_PEND_DEL: /* Indicate that object pended on has been deleted */ p_void = (void *)0; *p_msg_size = (OS_MSG_SIZE)0; if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_DEL; break; default: p_void = (void *)0; *p_msg_size = (OS_MSG_SIZE)0; #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_Q_PEND_FAILED(p_q); /* Record the event. */ #endif *p_err = OS_ERR_STATUS_INVALID; break; } CPU_CRITICAL_EXIT(); return (p_void); }
void OSMonOp (OS_MON *p_mon, OS_TICK timeout, void *p_arg, OS_MON_ON_ENTER_PTR p_on_enter, OS_MON_ON_EVAL_PTR p_on_eval, OS_OPT opt, OS_ERR *p_err) { CPU_INT32U op_res; CPU_INT32U mon_res; OS_PEND_LIST *p_pend_list; OS_TCB *p_tcb; OS_TCB *p_tcb_next; void *p_eval_data; CPU_BOOLEAN sched; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == DEF_NULL) { OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if (OS_CFG_INVALID_OS_CALLS_CHK_EN == DEF_ENABLED) /* Is the kernel running? */ if (OSRunning != OS_STATE_OS_RUNNING) { *p_err = OS_ERR_OS_NOT_RUNNING; return; } #endif #if (OS_CFG_ARG_CHK_EN == DEF_ENABLED) if (p_mon == DEF_NULL) { /* Validate 'p_mon' */ *p_err = OS_ERR_OBJ_PTR_NULL; return; } #endif sched = DEF_NO; CPU_CRITICAL_ENTER(); if (p_on_enter != DEF_NULL) { op_res = (*p_on_enter)(p_mon, p_arg); } else { op_res = OS_MON_RES_BLOCK | OS_MON_RES_STOP_EVAL; } if (DEF_BIT_IS_SET(op_res, OS_MON_RES_BLOCK) == DEF_YES) { OS_Pend((OS_PEND_OBJ *)(p_mon), /* Block task pending on Condition Variable */ OS_TASK_PEND_ON_COND_VAR, timeout); sched = DEF_YES; } OSTCBCurPtr->MonData.p_eval_data = p_arg; OSTCBCurPtr->MonData.p_on_eval = p_on_eval; if (DEF_BIT_IS_CLR(op_res, OS_MON_RES_STOP_EVAL) == DEF_YES) { p_pend_list = &p_mon->PendList; if (p_pend_list->HeadPtr != DEF_NULL) { p_tcb = p_pend_list->HeadPtr; while (p_tcb != DEF_NULL) { p_tcb_next = p_tcb->PendNextPtr; p_on_eval = p_tcb->MonData.p_on_eval; p_eval_data = p_tcb->MonData.p_eval_data; if (p_on_eval != DEF_NULL) { mon_res = (*p_on_eval)(p_mon, p_eval_data, p_arg); } else { mon_res = OS_MON_RES_STOP_EVAL; } if (DEF_BIT_IS_CLR(mon_res, OS_MON_RES_BLOCK) == DEF_YES) { OS_Post((OS_PEND_OBJ *)(p_mon), p_tcb, DEF_NULL, 0u, 0u); if (DEF_BIT_IS_CLR(opt, OS_OPT_POST_NO_SCHED) == DEF_YES) { sched = DEF_YES; } } if (DEF_BIT_IS_SET(mon_res, OS_MON_RES_STOP_EVAL) == DEF_YES) { break; } p_tcb = p_tcb_next; } } } CPU_CRITICAL_EXIT(); if (sched == DEF_YES) { OSSched(); /* Find the next highest priority task ready to run */ } if (DEF_BIT_IS_SET(op_res, OS_MON_RES_BLOCK) == DEF_YES) { CPU_CRITICAL_ENTER(); switch (OSTCBCurPtr->PendStatus) { case OS_STATUS_PEND_OK: /* We got the monitor */ *p_err = OS_ERR_NONE; break; case OS_STATUS_PEND_ABORT: /* Indicate that we aborted */ *p_err = OS_ERR_PEND_ABORT; break; case OS_STATUS_PEND_TIMEOUT: /* Indicate that we didn't get monitor within timeout */ *p_err = OS_ERR_TIMEOUT; break; case OS_STATUS_PEND_DEL: /* Indicate that object pended on has been deleted */ *p_err = OS_ERR_OBJ_DEL; break; default: *p_err = OS_ERR_STATUS_INVALID; } CPU_CRITICAL_EXIT(); } else { *p_err = OS_ERR_NONE; } }
OS_SEM_CTR OSSemPend (OS_SEM *p_sem, OS_TICK timeout, OS_OPT opt, CPU_TS *p_ts, OS_ERR *p_err) { OS_SEM_CTR ctr; OS_PEND_DATA pend_data; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif OS_SAFETY_CRITICAL_EXCEPTION(); return ((OS_SEM_CTR)0); } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0) { /* Not allowed to call from an ISR */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ISR; return ((OS_SEM_CTR)0); } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_sem == (OS_SEM *)0) { /* Validate 'p_sem' */ *p_err = OS_ERR_OBJ_PTR_NULL; return ((OS_SEM_CTR)0); } switch (opt) { /* Validate 'opt' */ case OS_OPT_PEND_BLOCKING: case OS_OPT_PEND_NON_BLOCKING: break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_OPT_INVALID; return ((OS_SEM_CTR)0); } #endif #if OS_CFG_OBJ_TYPE_CHK_EN > 0u if (p_sem->Type != OS_OBJ_TYPE_SEM) { /* Make sure semaphore was created */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_TYPE; return ((OS_SEM_CTR)0); } #endif if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS)0; /* Initialize the returned timestamp */ } CPU_CRITICAL_ENTER(); if (p_sem->Ctr > (OS_SEM_CTR)0) { /* Resource available? */ p_sem->Ctr--; /* Yes, caller may proceed */ if (p_ts != (CPU_TS *)0) { *p_ts = p_sem->TS; /* get timestamp of last post */ } ctr = p_sem->Ctr; CPU_CRITICAL_EXIT(); *p_err = OS_ERR_NONE; #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND(p_sem); /* Record the event. */ #endif return (ctr); } if ((opt & OS_OPT_PEND_NON_BLOCKING) != (OS_OPT)0) { /* Caller wants to block if not available? */ ctr = p_sem->Ctr; /* No */ CPU_CRITICAL_EXIT(); *p_err = OS_ERR_PEND_WOULD_BLOCK; #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif return (ctr); } else { /* Yes */ if (OSSchedLockNestingCtr > (OS_NESTING_CTR)0) { /* Can't pend when the scheduler is locked */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_SCHED_LOCKED; return ((OS_SEM_CTR)0); } } /* Lock the scheduler/re-enable interrupts */ OS_CRITICAL_ENTER_CPU_EXIT(); OS_Pend(&pend_data, /* Block task pending on Semaphore */ (OS_PEND_OBJ *)((void *)p_sem), OS_TASK_PEND_ON_SEM, timeout); OS_CRITICAL_EXIT_NO_SCHED(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_BLOCK(p_sem); /* Record the event. */ #endif OSSched(); /* Find the next highest priority task ready to run */ CPU_CRITICAL_ENTER(); switch (OSTCBCurPtr->PendStatus) { case OS_STATUS_PEND_OK: /* We got the semaphore */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_NONE; break; case OS_STATUS_PEND_ABORT: /* Indicate that we aborted */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ABORT; break; case OS_STATUS_PEND_TIMEOUT: /* Indicate that we didn't get semaphore within timeout */ if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_TIMEOUT; break; case OS_STATUS_PEND_DEL: /* Indicate that object pended on has been deleted */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_DEL; break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_SEM_PEND_FAILED(p_sem); /* Record the event. */ #endif *p_err = OS_ERR_STATUS_INVALID; CPU_CRITICAL_EXIT(); return ((OS_SEM_CTR)0); } ctr = p_sem->Ctr; CPU_CRITICAL_EXIT(); return (ctr); }
void OSMutexPend (OS_MUTEX *p_mutex, OS_TICK timeout, OS_OPT opt, CPU_TS *p_ts, OS_ERR *p_err) { OS_PEND_DATA pend_data; OS_TCB *p_tcb; CPU_SR_ALLOC(); #ifdef OS_SAFETY_CRITICAL if (p_err == (OS_ERR *)0) { #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif OS_SAFETY_CRITICAL_EXCEPTION(); return; } #endif #if OS_CFG_CALLED_FROM_ISR_CHK_EN > 0u if (OSIntNestingCtr > (OS_NESTING_CTR)0) { /* Not allowed to call from an ISR */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ISR; return; } #endif #if OS_CFG_ARG_CHK_EN > 0u if (p_mutex == (OS_MUTEX *)0) { /* Validate arguments */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_PTR_NULL; return; } switch (opt) { /* Validate 'opt' */ case OS_OPT_PEND_BLOCKING: case OS_OPT_PEND_NON_BLOCKING: break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OPT_INVALID; return; } #endif #if OS_CFG_OBJ_TYPE_CHK_EN > 0u if (p_mutex->Type != OS_OBJ_TYPE_MUTEX) { /* Make sure mutex was created */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_TYPE; return; } #endif if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; /* Initialize the returned timestamp */ } CPU_CRITICAL_ENTER(); if (p_mutex->OwnerNestingCtr == (OS_NESTING_CTR)0) { /* Resource available? */ p_mutex->OwnerTCBPtr = OSTCBCurPtr; /* Yes, caller may proceed */ p_mutex->OwnerOriginalPrio = OSTCBCurPtr->Prio; p_mutex->OwnerNestingCtr = (OS_NESTING_CTR)1; if (p_ts != (CPU_TS *)0) { *p_ts = p_mutex->TS; } CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_NONE; return; } if (OSTCBCurPtr == p_mutex->OwnerTCBPtr) { /* See if current task is already the owner of the mutex */ p_mutex->OwnerNestingCtr++; if (p_ts != (CPU_TS *)0) { *p_ts = p_mutex->TS; } CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_MUTEX_OWNER; /* Indicate that current task already owns the mutex */ return; } if ((opt & OS_OPT_PEND_NON_BLOCKING) != (OS_OPT)0) { /* Caller wants to block if not available? */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_PEND_WOULD_BLOCK; /* No */ return; } else { if (OSSchedLockNestingCtr > (OS_NESTING_CTR)0) { /* Can't pend when the scheduler is locked */ CPU_CRITICAL_EXIT(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_SCHED_LOCKED; return; } } /* Lock the scheduler/re-enable interrupts */ OS_CRITICAL_ENTER_CPU_EXIT(); p_tcb = p_mutex->OwnerTCBPtr; /* Point to the TCB of the Mutex owner */ if (p_tcb->Prio > OSTCBCurPtr->Prio) { /* See if mutex owner has a lower priority than current */ switch (p_tcb->TaskState) { case OS_TASK_STATE_RDY: OS_RdyListRemove(p_tcb); /* Remove from ready list at current priority */ p_tcb->Prio = OSTCBCurPtr->Prio; /* Raise owner's priority */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_INHERIT(p_tcb, p_tcb->Prio); #endif OS_PrioInsert(p_tcb->Prio); OS_RdyListInsertHead(p_tcb); /* Insert in ready list at new priority */ break; case OS_TASK_STATE_DLY: case OS_TASK_STATE_DLY_SUSPENDED: case OS_TASK_STATE_SUSPENDED: p_tcb->Prio = OSTCBCurPtr->Prio; /* Only need to raise the owner's priority */ #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_INHERIT(p_tcb, p_tcb->Prio); #endif break; case OS_TASK_STATE_PEND: /* Change the position of the task in the wait list */ case OS_TASK_STATE_PEND_TIMEOUT: case OS_TASK_STATE_PEND_SUSPENDED: case OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED: OS_PendListChangePrio(p_tcb, OSTCBCurPtr->Prio); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_TASK_PRIO_INHERIT(p_tcb, p_tcb->Prio); #endif break; default: OS_CRITICAL_EXIT(); *p_err = OS_ERR_STATE_INVALID; return; } } OS_Pend(&pend_data, /* Block task pending on Mutex */ (OS_PEND_OBJ *)((void *)p_mutex), OS_TASK_PEND_ON_MUTEX, timeout); OS_CRITICAL_EXIT_NO_SCHED(); #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_BLOCK(p_mutex); /* Record the event. */ #endif OSSched(); /* Find the next highest priority task ready to run */ CPU_CRITICAL_ENTER(); switch (OSTCBCurPtr->PendStatus) { case OS_STATUS_PEND_OK: /* We got the mutex */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_NONE; break; case OS_STATUS_PEND_ABORT: /* Indicate that we aborted */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_PEND_ABORT; break; case OS_STATUS_PEND_TIMEOUT: /* Indicate that we didn't get mutex within timeout */ if (p_ts != (CPU_TS *)0) { *p_ts = (CPU_TS )0; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_TIMEOUT; break; case OS_STATUS_PEND_DEL: /* Indicate that object pended on has been deleted */ if (p_ts != (CPU_TS *)0) { *p_ts = OSTCBCurPtr->TS; } #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_OBJ_DEL; break; default: #if (defined(TRACE_CFG_EN) && (TRACE_CFG_EN > 0u)) TRACE_OS_MUTEX_PEND_FAILED(p_mutex); /* Record the event. */ #endif *p_err = OS_ERR_STATUS_INVALID; break; } CPU_CRITICAL_EXIT(); }