nOS_Error nOS_SemDelete (nOS_Sem *sem) { nOS_Error err; #if (NOS_CONFIG_SAFE > 0) if (sem == NULL) { err = NOS_E_INV_OBJ; } else if (sem->e.type != NOS_EVENT_SEM) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(); sem->count = 0; sem->max = 0; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (nOS_DeleteEvent((nOS_Event*)sem)) { nOS_Schedule(); } #else nOS_DeleteEvent((nOS_Event*)sem); #endif nOS_LeaveCritical(); err = NOS_OK; } return err; }
nOS_Error nOS_ThreadSuspendAll (void) { nOS_Error err; nOS_StatusReg sr; #if (NOS_CONFIG_SAFE > 0) #if (NOS_CONFIG_SCHED_LOCK_ENABLE > 0) /* Can't suspend all threads from any thread (except idle) when scheduler is locked */ if ((nOS_lockNestingCounter > 0) && (nOS_runningThread != &nOS_idleHandle)) { err = NOS_E_LOCKED; } else #endif #endif { nOS_EnterCritical(sr); nOS_WalkInList(&nOS_allThreadsList, _SuspendThread, NULL); if (nOS_runningThread != &nOS_idleHandle) { nOS_Schedule(); } nOS_LeaveCritical(sr); err = NOS_OK; } return err; }
nOS_Error nOS_FlagDelete (nOS_Flag *flag) { nOS_Error err; nOS_StatusReg sr; #if (NOS_CONFIG_SAFE > 0) if (flag == NULL) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (flag->e.type != NOS_EVENT_FLAG) { err = NOS_E_INV_OBJ; } else #endif { flag->flags = NOS_FLAG_NONE; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (nOS_DeleteEvent((nOS_Event*)flag)) { nOS_Schedule(); } #else nOS_DeleteEvent((nOS_Event*)flag); #endif err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_QueueRead (nOS_Queue *queue, void *block, nOS_TickCounter timeout) { nOS_Error err; nOS_StatusReg sr; nOS_Thread *thread; #if (NOS_CONFIG_SAFE > 0) if (queue == NULL) { err = NOS_E_INV_OBJ; } else if (block == NULL) { err = NOS_E_NULL; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (queue->e.type != NOS_EVENT_QUEUE) { err = NOS_E_INV_OBJ; } else #endif { /* No chance a thread waiting to read from queue if count is higher than 0 */ if (queue->bcount > 0) { _Read(queue, block); /* Check if thread waiting to write in queue */ thread = nOS_SendEvent((nOS_Event*)queue, NOS_OK); if (thread != NULL) { /* Write thread's block in queue */ _Write(queue, thread->ext); #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if ((thread->state == NOS_THREAD_READY) && (thread->prio > nOS_runningThread->prio)) { nOS_Schedule(); } #endif } err = NOS_OK; } else if (timeout == NOS_NO_WAIT) { err = NOS_E_EMPTY; } else if (nOS_isrNestingCounter > 0) { err = NOS_E_ISR; } #if (NOS_CONFIG_SCHED_LOCK_ENABLE > 0) else if (nOS_lockNestingCounter > 0) { err = NOS_E_LOCKED; } #endif else if (nOS_runningThread == &nOS_idleHandle) { err = NOS_E_IDLE; } else { nOS_runningThread->ext = block; err = nOS_WaitForEvent((nOS_Event*)queue, NOS_THREAD_READING_QUEUE, timeout); } } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_QueueRead (nOS_Queue *queue, void *block, nOS_TickCounter timeout) { nOS_Error err; nOS_StatusReg sr; nOS_Thread *thread; #if (NOS_CONFIG_SAFE > 0) if (queue == NULL) { err = NOS_E_INV_OBJ; } else if (block == NULL) { err = NOS_E_NULL; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (queue->e.type != NOS_EVENT_QUEUE) { err = NOS_E_INV_OBJ; } else #endif /* No chance a thread waiting to read from queue if count is higher than 0 */ if (queue->bcount > 0) { _Read(queue, block); /* Check if thread waiting to write in queue */ thread = nOS_SendEvent((nOS_Event*)queue, NOS_OK); if (thread != NULL) { /* Write thread's block in queue */ _Write(queue, thread->ext); #if (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) /* Verify if a highest prio thread is ready to run */ nOS_Schedule(); #endif } err = NOS_OK; } else if (timeout == NOS_NO_WAIT) { err = NOS_E_EMPTY; } else { nOS_runningThread->ext = block; err = nOS_WaitForEvent((nOS_Event*)queue, NOS_THREAD_READING_QUEUE #if (NOS_CONFIG_WAITING_TIMEOUT_ENABLE > 0) ,timeout #elif (NOS_CONFIG_SLEEP_ENABLE > 0) || (NOS_CONFIG_SLEEP_UNTIL_ENABLE > 0) ,NOS_WAIT_INFINITE #endif ); } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_ThreadSuspend (nOS_Thread *thread) { nOS_Error err; nOS_StatusReg sr; if (thread == NULL) { thread = nOS_runningThread; } #if (NOS_CONFIG_SAFE > 0) if (thread == &nOS_idleHandle) { err = NOS_E_INV_OBJ; } else if (thread == nOS_runningThread) { #if (NOS_CONFIG_SCHED_LOCK_ENABLE > 0) if (nOS_lockNestingCounter > 0) { /* Can't switch context if scheduler is locked */ err = NOS_E_LOCKED; } else #endif { err = NOS_OK; } } else { err = NOS_OK; } if (err == NOS_OK) #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (thread->state == NOS_THREAD_STOPPED) { err = NOS_E_INV_OBJ; } else if (thread->state & NOS_THREAD_SUSPENDED) { err = NOS_E_INV_STATE; } else #endif { _SuspendThread(thread, NULL); if (thread == nOS_runningThread) { nOS_Schedule(); } } nOS_LeaveCritical(sr); err = NOS_OK; } return err; }
nOS_Error nOS_ThreadResume (nOS_Thread *thread) { nOS_Error err; nOS_StatusReg sr; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) bool sched = false; #endif #if (NOS_CONFIG_SAFE > 0) if (thread == NULL) { err = NOS_E_INV_OBJ; } else if (thread == nOS_runningThread) { err = NOS_E_INV_OBJ; } else if (thread == &nOS_idleHandle) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (thread->state == NOS_THREAD_STOPPED) { err = NOS_E_INV_OBJ; } else if ( !(thread->state & NOS_THREAD_SUSPENDED) ) { err = NOS_E_INV_STATE; } else #endif { #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) _ResumeThread(thread, &sched); #else _ResumeThread(thread, NULL); #endif #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (sched) { nOS_Schedule(); } #endif err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_SemGive (nOS_Sem *sem) { nOS_Error err; nOS_StatusReg sr; nOS_Thread *thread; #if (NOS_CONFIG_SAFE > 0) if (sem == NULL) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (sem->e.type != NOS_EVENT_SEM) { err = NOS_E_INV_OBJ; } else #endif { thread = nOS_SendEvent((nOS_Event*)sem, NOS_OK); if (thread != NULL) { #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if ((thread->state == NOS_THREAD_READY) && (thread->prio > nOS_runningThread->prio)) { nOS_Schedule(); } #endif err = NOS_OK; } else if (sem->count < sem->max) { sem->count++; err = NOS_OK; } else if (sem->max > 0) { err = NOS_E_OVERFLOW; } else { /* No thread waiting to consume sem, inform producer */ err = NOS_E_NO_CONSUMER; } } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_FlagSend (nOS_Flag *flag, nOS_FlagBits flags, nOS_FlagBits mask) { nOS_Error err; nOS_StatusReg sr; nOS_FlagResult res; #if (NOS_CONFIG_SAFE > 0) if (flag == NULL) { err = NOS_E_INV_OBJ; } else #endif { res.rflags = NOS_FLAG_NONE; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) res.sched = false; #endif nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (flag->e.type != NOS_EVENT_FLAG) { err = NOS_E_INV_OBJ; } else #endif { flag->flags ^= ((flag->flags ^ flags) & mask); nOS_WalkInList(&flag->e.waitList, _TestFlag, &res); /* Clear all flags that have awoken the waiting threads. */ flag->flags &=~ res.rflags; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) /* Schedule only if one of awoken thread has an higher priority. */ if (res.sched) { nOS_Schedule(); } #endif err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_ThreadAbort (nOS_Thread *thread) { nOS_Error err; nOS_StatusReg sr; #if (NOS_CONFIG_SAFE > 0) if (thread == NULL) { err = NOS_E_INV_OBJ; } else if (thread == nOS_runningThread) { err = NOS_E_INV_OBJ; } else if (thread == &nOS_idleHandle) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (thread->state == NOS_THREAD_STOPPED) { err = NOS_E_INV_OBJ; } else if (thread->state & NOS_THREAD_SUSPENDED) { err = NOS_E_INV_STATE; } else if ( !(thread->state & NOS_THREAD_WAITING_MASK) ) { err = NOS_E_INV_STATE; } else #endif { nOS_WakeUpThread(thread, NOS_E_ABORT); #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (thread->prio > nOS_runningThread->prio) { nOS_Schedule(); } #endif err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_ThreadResumeAll (void) { nOS_StatusReg sr; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) bool sched = false; #endif nOS_EnterCritical(sr); #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) nOS_WalkInList(&nOS_allThreadsList, _ResumeThread, &sched); #else nOS_WalkInList(&nOS_allThreadsList, _ResumeThread, NULL); #endif #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (sched) { nOS_Schedule(); } #endif nOS_LeaveCritical(sr); return NOS_OK; }
nOS_Error nOS_QueueDelete (nOS_Queue *queue) { nOS_Error err; nOS_StatusReg sr; #if (NOS_CONFIG_SAFE > 0) if (queue == NULL) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (queue->e.type != NOS_EVENT_QUEUE) { err = NOS_E_INV_OBJ; } else #endif { queue->buffer = NULL; queue->bsize = 0; queue->bcount = 0; queue->bmax = 0; queue->r = 0; queue->w = 0; #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (nOS_DeleteEvent((nOS_Event*)queue)) { nOS_Schedule(); } #else nOS_DeleteEvent((nOS_Event*)queue); #endif err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_ThreadSetPriority (nOS_Thread *thread, uint8_t prio) { nOS_Error err; nOS_StatusReg sr; if (thread == NULL) { thread = nOS_runningThread; } #if (NOS_CONFIG_SAFE > 0) if (prio > NOS_CONFIG_HIGHEST_THREAD_PRIO) { err = NOS_E_INV_VAL; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (thread->state == NOS_THREAD_STOPPED) { err = NOS_E_INV_OBJ; } else #endif { if (prio != thread->prio) { nOS_SetThreadPrio(thread, prio); #if (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (prio > nOS_runningThread->prio) { nOS_Schedule(); } #endif } err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_FlagSend (nOS_Flag *flag, nOS_FlagBits flags, nOS_FlagBits mask) { nOS_Error err; nOS_StatusReg sr; nOS_FlagBits res; #if (NOS_CONFIG_SAFE > 0) if (flag == NULL) { err = NOS_E_INV_OBJ; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (flag->e.type != NOS_EVENT_FLAG) { err = NOS_E_INV_OBJ; } else #endif { flag->flags ^= ((flag->flags ^ flags) & mask); res = NOS_FLAG_NONE; nOS_WalkInList(&flag->e.waitList, _TestFlag, &res); /* Clear all flags that have awoken the waiting threads. */ flag->flags &=~ res; #if (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) nOS_Schedule(); #endif err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_QueueWrite (nOS_Queue *queue, void *block, nOS_TickCounter timeout) { nOS_Error err; nOS_StatusReg sr; nOS_Thread *thread; #if (NOS_CONFIG_SAFE > 0) if (queue == NULL) { err = NOS_E_INV_OBJ; } else if (block == NULL) { err = NOS_E_NULL; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (queue->e.type != NOS_EVENT_QUEUE) { err = NOS_E_INV_OBJ; } else #endif /* If count equal 0, there are chances some threads can wait to read from queue */ if (queue->bcount == 0) { /* Check if thread waiting to read from queue */ thread = nOS_SendEvent((nOS_Event*)queue, NOS_OK); if (thread != NULL) { /* Direct copy between thread's buffers */ memcpy(thread->ext, block, queue->bsize); #if (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) /* Verify if a highest prio thread is ready to run */ nOS_Schedule(); #endif err = NOS_OK; } else if (queue->buffer != NULL) { /* No thread waiting to read from queue, then store it */ _Write(queue, block); err = NOS_OK; } else { /* No thread waiting to consume message, inform producer */ err = NOS_E_NO_CONSUMER; } } else if (queue->bcount < queue->bmax) { /* No chance a thread waiting to read from queue if count is higher than 0 */ _Write(queue, block); err = NOS_OK; } else if (timeout == NOS_NO_WAIT) { err = NOS_E_FULL; } else { nOS_runningThread->ext = block; err = nOS_WaitForEvent((nOS_Event*)queue, NOS_THREAD_WRITING_QUEUE #if (NOS_CONFIG_WAITING_TIMEOUT_ENABLE > 0) ,timeout #elif (NOS_CONFIG_SLEEP_ENABLE > 0) || (NOS_CONFIG_SLEEP_UNTIL_ENABLE > 0) ,NOS_WAIT_INFINITE #endif ); } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_ThreadDelete (nOS_Thread *thread) { nOS_Error err; nOS_StatusReg sr; if (thread == NULL) { thread = nOS_runningThread; } #if (NOS_CONFIG_SAFE > 0) /* Main thread can't be deleted */ if (thread == &nOS_idleHandle) { err = NOS_E_INV_OBJ; } else if (thread == nOS_runningThread) { #if (NOS_CONFIG_SCHED_LOCK_ENABLE > 0) /* Can't switch context if scheduler is locked */ if (nOS_lockNestingCounter > 0) { err = NOS_E_LOCKED; } else #endif { err = NOS_OK; } } else #endif { err = NOS_OK; } if (err == NOS_OK) { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (thread->state == NOS_THREAD_STOPPED) { err = NOS_E_INV_OBJ; } else #endif { #if (NOS_CONFIG_THREAD_SUSPEND_ALL_ENABLE > 0) nOS_RemoveFromList(&nOS_allThreadsList, &thread->node); #endif if (thread->state == NOS_THREAD_READY) { nOS_RemoveThreadFromReadyList(thread); } else if (thread->state & NOS_THREAD_WAITING_MASK) { if (thread->event != NULL) { nOS_RemoveFromList(&thread->event->waitList, &thread->readyWait); } #if (NOS_CONFIG_WAITING_TIMEOUT_ENABLE > 0) || (NOS_CONFIG_SLEEP_ENABLE > 0) || (NOS_CONFIG_SLEEP_UNTIL_ENABLE > 0) if (thread->state & NOS_THREAD_WAIT_TIMEOUT) { nOS_RemoveFromList(&nOS_timeoutThreadsList, &thread->tout); } #endif } thread->state = NOS_THREAD_STOPPED; thread->event = NULL; thread->ext = NULL; #if (NOS_CONFIG_WAITING_TIMEOUT_ENABLE > 0) || (NOS_CONFIG_SLEEP_ENABLE > 0) || (NOS_CONFIG_SLEEP_UNTIL_ENABLE > 0) thread->timeout = 0; #endif thread->error = NOS_OK; if (thread == nOS_runningThread) { /* Will never return */ nOS_Schedule(); } } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_QueueWrite (nOS_Queue *queue, void *block, nOS_TickCounter timeout) { nOS_Error err; nOS_StatusReg sr; nOS_Thread *thread; #if (NOS_CONFIG_SAFE > 0) if (queue == NULL) { err = NOS_E_INV_OBJ; } else if (block == NULL) { err = NOS_E_NULL; } else #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (queue->e.type != NOS_EVENT_QUEUE) { err = NOS_E_INV_OBJ; } else #endif { /* If count equal 0, there are chances some threads can wait to read from queue */ if (queue->bcount == 0) { /* Check if thread waiting to read from queue */ thread = nOS_SendEvent((nOS_Event*)queue, NOS_OK); if (thread != NULL) { /* Direct copy between thread's buffers */ memcpy(thread->ext, block, queue->bsize); #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) && (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if ((thread->state == NOS_THREAD_READY) && (thread->prio > nOS_runningThread->prio)) { nOS_Schedule(); } #endif err = NOS_OK; } else if (queue->buffer != NULL) { /* No thread waiting to read from queue, then store it */ _Write(queue, block); err = NOS_OK; } else { /* No thread waiting to consume message, inform producer */ err = NOS_E_NO_CONSUMER; } } else if (queue->bcount < queue->bmax) { /* No chance a thread waiting to read from queue if count is higher than 0 */ _Write(queue, block); err = NOS_OK; } else if (timeout == NOS_NO_WAIT) { err = NOS_E_FULL; } else if (nOS_isrNestingCounter > 0) { err = NOS_E_ISR; } #if (NOS_CONFIG_SCHED_LOCK_ENABLE > 0) else if (nOS_lockNestingCounter > 0) { err = NOS_E_LOCKED; } #endif else if (nOS_runningThread == &nOS_idleHandle) { /* Main threadv can't wait. */ err = NOS_E_IDLE; } else { nOS_runningThread->ext = block; err = nOS_WaitForEvent((nOS_Event*)queue, NOS_THREAD_WRITING_QUEUE, timeout); } } nOS_LeaveCritical(sr); } return err; }
nOS_Error nOS_ThreadCreate (nOS_Thread *thread, nOS_ThreadEntry entry, void *arg, nOS_Stack *stack, size_t ssize #ifdef NOS_USE_SEPARATE_CALL_STACK ,size_t cssize #endif #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) ,uint8_t prio #endif #if (NOS_CONFIG_THREAD_SUSPEND_ENABLE > 0) ,nOS_ThreadState state #endif #if (NOS_CONFIG_THREAD_NAME_ENABLE > 0) ,const char *name #endif ) { nOS_Error err; nOS_StatusReg sr; #if (NOS_CONFIG_SAFE > 0) if (thread == NULL) { err = NOS_E_INV_OBJ; } else if (thread == &nOS_idleHandle) { err = NOS_E_INV_OBJ; } else if (entry == NULL) { err = NOS_E_INV_VAL; } else if (stack == NULL) { err = NOS_E_INV_VAL; } else #ifndef NOS_SIMULATED_STACK if (ssize == 0) { err = NOS_E_INV_VAL; } else #endif #ifdef NOS_USE_SEPARATE_CALL_STACK if (cssize == 0) { err = NOS_E_INV_VAL; } else #endif #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) if (prio > NOS_CONFIG_HIGHEST_THREAD_PRIO) { err = NOS_E_INV_PRIO; } else #endif #if (NOS_CONFIG_THREAD_SUSPEND_ENABLE > 0) if ((state != NOS_THREAD_READY) && (state != NOS_THREAD_SUSPENDED)) { err = NOS_E_INV_STATE; } else #endif #endif { nOS_EnterCritical(sr); #if (NOS_CONFIG_SAFE > 0) if (thread->state != NOS_THREAD_STOPPED) { err = NOS_E_INV_OBJ; } else #endif { #if (NOS_CONFIG_HIGHEST_THREAD_PRIO > 0) thread->prio = prio; #endif thread->state = NOS_THREAD_READY; #if (NOS_CONFIG_THREAD_SUSPEND_ENABLE > 0) if (state == NOS_THREAD_SUSPENDED) { thread->state = (nOS_ThreadState)(thread->state | NOS_THREAD_SUSPENDED); } #endif thread->event = NULL; thread->ext = NULL; #if (NOS_CONFIG_THREAD_NAME_ENABLE > 0) thread->name = name; #endif thread->error = NOS_OK; thread->readyWait.payload = thread; #if (NOS_CONFIG_WAITING_TIMEOUT_ENABLE > 0) || (NOS_CONFIG_SLEEP_ENABLE > 0) || (NOS_CONFIG_SLEEP_UNTIL_ENABLE > 0) thread->tout.payload = thread; thread->timeout = 0; #endif #if (NOS_CONFIG_THREAD_SUSPEND_ALL_ENABLE > 0) thread->node.payload = thread; nOS_AppendToList(&nOS_allThreadsList, &thread->node); #endif nOS_InitContext(thread, stack, ssize #ifdef NOS_USE_SEPARATE_CALL_STACK ,cssize #endif ,entry, arg); #if (NOS_CONFIG_THREAD_SUSPEND_ENABLE > 0) if (thread->state == NOS_THREAD_READY) #endif { nOS_AppendThreadToReadyList(thread); #if (NOS_CONFIG_SCHED_PREEMPTIVE_ENABLE > 0) if (prio > nOS_runningThread->prio) { nOS_Schedule(); } #endif } err = NOS_OK; } nOS_LeaveCritical(sr); } return err; }