err_t thread_suspend(tid_t tid) { cpu_sr_t cpu_sr; thread_struct *pthread; if (tid < IDLE_THREAD_TID || tid > MAX_THREAD) return RET_ERR; if (thread_table[tid] == NULL) return RET_ERR; cpu_sr = save_cpu_sr(); pthread = thread_table[tid]; if (current_thread->tid == tid) { pthread->state = BLOCK; insert_back_list(&blocked_list, &pthread->node); restore_cpu_sr(cpu_sr); schedule(SCHED_THREAD_REQUEST); return RET_NO_ERR; } else if (pthread->prio > current_thread->prio && pthread->state == READY) { pthread->state = BLOCK; delete_list(&pthread->node); insert_back_list(&blocked_list, &pthread->node); restore_cpu_sr(cpu_sr); schedule(SCHED_THREAD_REQUEST); return RET_NO_ERR; } restore_cpu_sr(cpu_sr); return RET_ERR; }
err_t thread_resume(tid_t tid) { cpu_sr_t cpu_sr; thread_struct *pthread; if (tid < IDLE_THREAD_TID || tid > MAX_THREAD) return RET_ERR; if (thread_table[tid] == NULL) return RET_ERR; cpu_sr = save_cpu_sr(); pthread = thread_table[tid]; if (pthread->state == BLOCK) { pthread->state = READY; delete_list(&pthread->node); insert_back_list(&ready_list[pthread->prio], &pthread->node); prio_exist_flag[pthread->prio] = true; restore_cpu_sr(cpu_sr); schedule(SCHED_THREAD_REQUEST); return RET_NO_ERR; } restore_cpu_sr(cpu_sr); return RET_ERR; }
void sem_pend(sem_struct *sem) { cpu_sr_t cpu_sr; list_node_t *pnode; thread_struct *pthread; cpu_sr = save_cpu_sr(); if (sem->value == 0) { if (!is_empty_list(&sem->wait_list)) { for (pnode = begin_list(&sem->wait_list); pnode != end_list(&sem->wait_list); pnode = next_list(pnode)) { pthread = entry_list(pnode, thread_struct, node); if (current_thread->prio < pthread->prio) { current_thread->state = EVENT_WAIT; insert_before_list( pnode, ¤t_thread->node); break; } } } if (current_thread->state != EVENT_WAIT) { current_thread->state = EVENT_WAIT; insert_back_list(&sem->wait_list, ¤t_thread->node); } schedule(SCHED_THREAD_REQUEST); return; } sem->value--; restore_cpu_sr(cpu_sr); }
/** * @brief time tick advanced * Maintain time_quantum */ void advance_time_tick() { cpu_sr_t cpu_sr; list_node_t *pnode; thread_struct *pthread; thread_struct *readyed_thread = NULL; cpu_sr = save_cpu_sr(); os_time_tick++; /* If there are delays in the list of threads... */ if (!is_empty_list(&delayed_list)) { for (pnode = begin_list(&delayed_list); pnode != end_list(&delayed_list); pnode = next_list(pnode) ) { pthread = entry_list(pnode, thread_struct, node); pthread->delayed_time--; /* ready to change the status */ if (readyed_thread != NULL) { delete_list(&readyed_thread->node); readyed_thread->state = READY; readyed_thread->time_quantum = TIME_QUANTUM; insert_back_list( &ready_list[readyed_thread->prio], &readyed_thread->node); prio_exist_flag[readyed_thread->prio] = true; readyed_thread = NULL; } if (pthread->delayed_time <= 0) { readyed_thread = pthread; } } if (readyed_thread != NULL) { delete_list(&readyed_thread->node); readyed_thread->state = READY; readyed_thread->time_quantum = TIME_QUANTUM; insert_back_list( &ready_list[readyed_thread->prio], &readyed_thread->node); prio_exist_flag[readyed_thread->prio] = true; } } current_thread->time_quantum--; restore_cpu_sr(cpu_sr); }
/* let current thread to be in ready list */ static void put_current_ready() { /* Ready to change the status of the currently executing thread */ current_thread->state = READY; /* insert this ready thread to the last one in its priority, it means CuRT use priority round-robin scheduling polcy, we can mark this line to get FIFO policy */ insert_back_list(&ready_list[current_thread->prio], ¤t_thread->node); prio_exist_flag[current_thread->prio] = true; }
/** * @brief Create thread * * @param thread - thread of the information contained threads structure * @param thread_stk - Created a pointer to the thread stack space * @param func - Generated a thread of the function * @param name - Thread name * @param prio - The priority of threads * @param pdata - Pass parameters to the function of a thread running * @retval RET_NO_ERR * @retval RET_ERR */ tid_t thread_create(thread_struct *thread, stk_t *thread_stk, THREAD_FUNC func, char *name, u8_t prio, void *pdata) { cpu_sr_t cpu_sr; stk_t *pstk; thread_struct *pthread; tid_t tid; if (thread == NULL || thread_stk == NULL || func == NULL || name == NULL || (prio >= MAX_PRIO)) return RET_ERR; pstk = thread_stk; pthread = thread; /* no failback */ if ((tid = get_tid()) == RET_ERR) { return RET_ERR; } /* constrct thread_struct */ pthread->tid = tid; pthread->stack_ptr = init_thread_stack(func, pdata, pstk); pthread->name = name; pthread->prio = prio; pthread->time_quantum = TIME_QUANTUM; pthread->delayed_time = 0; pthread->state = READY; cpu_sr = save_cpu_sr(); thread_table[tid] = pthread; prio_exist_flag[prio] = true; total_thread_cnt++; insert_back_list(&ready_list[prio], &pthread->node); restore_cpu_sr(cpu_sr); /* if priority higher than existing thread, invoke the scheduler. */ if (is_start_os == true && current_thread->prio > prio) { schedule(SCHED_THREAD_REQUEST); } printf("thread_create:%d\n", prio); return tid; }
err_t thread_delay(u32_t tick) { cpu_sr_t cpu_sr; if (tick > 0) { /* Delay must be greater than 0. */ cpu_sr = save_cpu_sr(); current_thread->state = DELAY; current_thread->delayed_time = tick; insert_back_list(&delayed_list, ¤t_thread->node); restore_cpu_sr(cpu_sr); schedule(SCHED_THREAD_REQUEST); return RET_NO_ERR; } return RET_ERR; }
void sem_post(sem_struct *sem) { thread_struct *pthread; cpu_sr_t cpu_sr; cpu_sr = save_cpu_sr(); if (!is_empty_list(&sem->wait_list)) { pthread = entry_list(delete_front_list(&sem->wait_list), thread_struct, node); pthread->state = READY; insert_back_list(&ready_list[pthread->prio], &pthread->node); prio_exist_flag[pthread->prio] = true; restore_cpu_sr(cpu_sr); schedule(SCHED_THREAD_REQUEST); return; } sem->value++; restore_cpu_sr(cpu_sr); }
err_t thread_delete(tid_t tid) { cpu_sr_t cpu_sr; if (tid < IDLE_THREAD_TID || tid > MAX_THREAD) return RET_ERR; if (thread_table[tid] == NULL) return RET_ERR; cpu_sr = save_cpu_sr(); if (tid == current_thread->tid) { current_thread->state = TERMINATE; insert_back_list(&termination_wait_list, ¤t_thread->node); restore_cpu_sr(cpu_sr); schedule(SCHED_THREAD_REQUEST); return RET_NO_ERR; } return RET_ERR; }