/*this function can be used in interrupt*/ int process_post(struct process *p, process_event_t ev, process_data_t data) { static process_num_events_t snum; RAW_SR_ALLOC(); if(nevents == PROCESS_CONF_NUMEVENTS) { RAW_ASSERT(0); } RAW_CRITICAL_ENTER(); snum = (process_num_events_t)(fevent + nevents) & (PROCESS_CONF_NUMEVENTS - 1); ++nevents; RAW_CRITICAL_EXIT(); events[snum].ev = ev; events[snum].data = data; events[snum].p = p; if(nevents > process_maxevents) { process_maxevents = nevents; } return PROCESS_ERR_OK; }
RAW_U16 raw_queue_delete(RAW_QUEUE *p_q) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; p_q->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_QUEUE_DELETE(raw_task_active, p_q); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_semaphore_delete(RAW_SEMAPHORE *semaphore_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; semaphore_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_DELETE(raw_task_active, semaphore_ptr); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_mutex_delete(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &mutex_ptr->common_block_obj.block_list; mutex_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; if (mutex_ptr->mtxtsk) { release_mutex(mutex_ptr->mtxtsk, mutex_ptr); } /*All task blocked on this mutex is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_MUTEX_DELETE(raw_task_active, mutex_ptr); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_queue_size_get_information(RAW_QUEUE_SIZE *p_q, MSG_SIZE_TYPE *queue_free_msg_size, MSG_SIZE_TYPE *queue_peak_msg_size, MSG_SIZE_TYPE *queue_current_msg) { RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (queue_free_msg_size == 0) { return RAW_NULL_POINTER; } if (queue_current_msg == 0) { return RAW_NULL_POINTER; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } *queue_free_msg_size = p_q->queue_msg_size - p_q->queue_current_msg; *queue_current_msg = p_q->queue_current_msg; *queue_peak_msg_size = p_q->peak_numbers; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Check whether queue size obj is full or not * * Description: This function is called to Check whether queue size obj is full or not. * * Arguments :p_q is the address of the queue object * ----- * * * Returns * RAW_QUEUE_SIZE_CHECK_FULL: queue_size obj is full * RAW_QUEUE_SIZE_CHECK_NOT_FULL: queue_size obj is not full * *Note(s) * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_queue_size_full_check(RAW_QUEUE_SIZE *p_q) { RAW_SR_ALLOC(); RAW_OS_ERROR full_check_ret; #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (p_q->queue_current_msg >= p_q->queue_msg_size) { full_check_ret = RAW_QUEUE_SIZE_CHECK_FULL; } else { full_check_ret = RAW_QUEUE_SIZE_CHECK_NOT_FULL; } RAW_CRITICAL_EXIT(); return full_check_ret; }
RAW_U16 raw_queue_flush(RAW_QUEUE *p_q) { RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*Reset read and write pointer to init position*/ p_q->msg_q.write = p_q->msg_q.queue_start; p_q->msg_q.read = p_q->msg_q.queue_start; p_q->msg_q.current_numbers = 0u; RAW_CRITICAL_EXIT(); TRACE_QUEUE_FLUSH(raw_task_active, p_q); return RAW_SUCCESS; }
RAW_U16 raw_queue_buffer_flush(RAW_QUEUE_BUFFER *q_b) { RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if (q_b == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } q_b->frbufsz = q_b->bufsz; q_b->head = 0; q_b->tail = 0; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
void process_poll(struct process *p) { RAW_SR_ALLOC(); if (p) { if(p->state == PROCESS_STATE_RUNNING || p->state == PROCESS_STATE_CALLED) { RAW_CRITICAL_ENTER(); p->needspoll = 1; poll_requested++; RAW_CRITICAL_EXIT(); } } }
/* ************************************************************************************************************************ * Set system time * * Description: This function is called to set system time. * * Arguments :NONE * * * * * * Returns * raw_tick_count: The raw_os time. * Note(s) * ************************************************************************************************************************ */ RAW_OS_ERROR raw_system_time_set(RAW_TICK_TYPE time) { RAW_SR_ALLOC(); if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } RAW_CRITICAL_ENTER(); raw_tick_count = time; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
static void do_poll(void) { struct process *p; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); poll_requested--; RAW_CRITICAL_EXIT(); /* Call the processes that needs to be polled. */ for(p = process_list; p != 0; p = p->next) { if(p->needspoll) { p->state = PROCESS_STATE_RUNNING; p->needspoll = 0; call_process(p, PROCESS_EVENT_POLL, 0); } } }
int pthread_detach(pthread_t thread) { RAW_TASK_OBJ *task; task = &thread->task_obj; if (task->task_state == RAW_DELETED) { } else { RAW_CRITICAL_ENTER(); thread->attr.detachstate = PTHREAD_CREATE_DETACHED; RAW_CRITICAL_EXIT(); raw_semaphore_delete(&thread->task_sem); } return 0; }
/* ************************************************************************************************************************ * Receive a msg * * Description: This function is called to receive a msg * * Arguments :p_q is the address of the queue object * ----- * msg is the address of a point, and this pointer contains address of the msg. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) if no msg received then msg will get null pointer(0). ISR can call this function if only wait_option equal RAW_NO_WAIT. * * ************************************************************************************************************************ */ RAW_U16 raw_queue_receive(RAW_QUEUE *p_q, RAW_TICK_TYPE wait_option, RAW_VOID **msg) { RAW_VOID *pmsg; RAW_U16 result; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (raw_int_nesting && (wait_option != RAW_NO_WAIT)) { return RAW_NOT_CALLED_BY_ISR; } if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if queue has msgs, just receive it*/ if (p_q->msg_q.current_numbers) { pmsg = *p_q->msg_q.read++; if (p_q->msg_q.read == p_q->msg_q.queue_end) { /*wrap around to start*/ p_q->msg_q.read = p_q->msg_q.queue_start; } *msg = pmsg; p_q->msg_q.current_numbers--; RAW_CRITICAL_EXIT(); TRACE_QUEUE_GET_MSG(raw_task_active, p_q, wait_option, *msg); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { *msg = (RAW_VOID *)0; RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*if system is locked, block operation is not allowed*/ SYSTEM_LOCK_PROCESS_QUEUE(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)p_q, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_QUEUE_GET_BLOCK(raw_task_active, p_q, wait_option); raw_sched(); *msg = (RAW_VOID *)0; result = block_state_post_process(raw_task_active, msg); return result; }
RAW_U16 event_set(RAW_EVENT *event_ptr, RAW_U32 flags_to_set, RAW_U8 set_option) { LIST *iter; LIST *event_head_ptr; LIST *iter_temp; RAW_TASK_OBJ *task_ptr; RAW_U8 status; RAW_SR_ALLOC(); status = RAW_FALSE; RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } event_head_ptr = &event_ptr->common_block_obj.block_list; /*if the set_option is AND_MASK, it just clear the flags and will return immediately!*/ if (set_option & RAW_FLAGS_AND_MASK) { event_ptr->flags &= flags_to_set; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; } /*if it is or mask then set the flag and continue.........*/ else { event_ptr->flags |= flags_to_set; } iter = event_head_ptr->next; /*if list is not empty*/ while (iter != event_head_ptr) { task_ptr = list_entry(iter, RAW_TASK_OBJ, task_list); iter_temp = iter->next; if (task_ptr->raw_suspend_option & RAW_FLAGS_AND_MASK) { if ((event_ptr->flags & task_ptr ->raw_suspend_flags) == task_ptr ->raw_suspend_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } else { if (event_ptr->flags & task_ptr ->raw_suspend_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } if (status == RAW_TRUE) { (*(RAW_U32 *)(task_ptr->raw_additional_suspend_info)) = event_ptr->flags; /*Ok the task condition is met, just wake this task*/ raw_wake_object(task_ptr); TRACE_EVENT_WAKE(raw_task_active, task_ptr); } iter = iter_temp; } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Get an event * * Description: This service retrieves event flags from the specified event flags group. * Each event flags group contains 32 event flags. Each flag is represented * by a single bit. This service can retrieve a variety of event flag * combinations, as selected by the input parameters. * * Arguments :event_ptr: is the address of event object * ----- * requested_flags: is the 32-bit unsigned variable that represents the requested event flags. * ----- * get_option: is the option specifies whether all or any of the requested event flags are required. The following are valid * selections: * RAW_AND * RAW_AND_CLEAR * RAW_OR * RAW_OR_CLEAR * Selecting RAW_AND or RAW_AND_CLEAR * specifies that all event flags must be present in * the group. Selecting RAW_OR or RAW_OR_CLEAR * specifies that any event flag is satisfactory. Event * flags that satisfy the request are cleared (set to * zero) if RAW_AND_CLEAR or RAW_OR_CLEAR are specified. * ----- * wait_option: is how the service behaves if the selected * event flags are not set. The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * actual_flags_ptr:will be filled the actual flags when the function is returned. * * Returns RAW_SUCCESS : Get event success. * RAW_BLOCK_ABORT: event is aborted by other task or ISR. * RAW_NO_PEND_WAIT: event is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: system is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this event is deleted * Note(s) :RAW_STATE_UNKNOWN wrong task state, probally sysytem error. * * ************************************************************************************************************************ */ RAW_U16 raw_event_get(RAW_EVENT *event_ptr, RAW_U32 requested_flags, RAW_U8 get_option, RAW_U32 *actual_flags_ptr, RAW_TICK_TYPE wait_option) { RAW_U16 error_status; RAW_U8 status; RAW_SR_ALLOC(); #if (RAW_EVENT_FUNCTION_CHECK > 0) if (event_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if ((get_option != RAW_AND) && (get_option != RAW_OR) && (get_option != RAW_AND_CLEAR) && (get_option != RAW_OR_CLEAR)) { return RAW_NO_THIS_OPTION; } #endif RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if option is and flag*/ if (get_option & RAW_FLAGS_AND_MASK) { if ((event_ptr->flags & requested_flags) == requested_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } /*if option is or flag*/ else { if (event_ptr->flags & requested_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } if (status == RAW_TRUE) { *actual_flags_ptr = event_ptr->flags; /*does it need to clear the flags*/ if (get_option & RAW_FLAGS_CLEAR_MASK) { event_ptr->flags &= ~requested_flags; } RAW_CRITICAL_EXIT(); TRACE_EVENT_GET(raw_task_active, event_ptr); return RAW_SUCCESS; } /*Cann't get event, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*system is locked so task can not be blocked just return immediately*/ SYSTEM_LOCK_PROCESS(); /*Remember the passed information*/ raw_task_active->raw_suspend_option = get_option; raw_task_active->raw_suspend_flags = requested_flags; raw_task_active->raw_additional_suspend_info = actual_flags_ptr; raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)event_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_EVENT_GET_BLOCK(raw_task_active, event_ptr, wait_option); raw_sched(); /*So the task is waked up, need know which reason cause wake up.*/ error_status = block_state_post_process(raw_task_active, 0); /*if it is not successed then we do not need to clear the flags just return the error status*/ if (error_status != RAW_SUCCESS) { return error_status; } RAW_CRITICAL_ENTER(); /*does it need to clear the flags*/ if (get_option & RAW_FLAGS_CLEAR_MASK) { event_ptr->flags &= ~requested_flags; } RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_OS_ERROR msg_size_post(RAW_QUEUE_SIZE *p_q, RAW_MSG_SIZE *p_void, MSG_SIZE_TYPE size, RAW_U8 opt_send_method, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_MSG_SIZE *msg_temp; RAW_MSG_SIZE *p_msg_in; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; /*queue is full condition!*/ if (p_q->queue_current_msg >= p_q->queue_msg_size) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_MSG_MAX(raw_task_active, p_q, p_void, size, opt_send_method); if (p_q->queue_size_full_callback) { p_q->queue_size_full_callback(p_q, p_void, size); } return RAW_MSG_MAX; } /*Queue is not full here, If there is no blocked receive task*/ if (is_list_empty(block_list_head)) { /*delete msg from free msg list*/ msg_temp = p_q->free_msg; p_q->free_msg = msg_temp->next; /* If it is the first message placed in the queue*/ if (p_q->queue_current_msg == 0) { p_q->write = msg_temp; p_q->read = msg_temp; } else { if (opt_send_method == SEND_TO_END) { p_msg_in = p_q->write; p_msg_in->next = msg_temp; msg_temp->next = 0; p_q->write = msg_temp; } else { msg_temp->next = p_q->read; p_q->read = msg_temp; } } p_q->queue_current_msg++; if (p_q->queue_current_msg > p_q->peak_numbers) { p_q->peak_numbers = p_q->queue_current_msg; } /*Assign value to msg*/ msg_temp->msg_ptr = p_void; msg_temp->msg_size = size; RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_MSG_POST(raw_task_active, p_q, p_void, size, opt_send_method); return RAW_SUCCESS; } /*wake all the task blocked on this queue*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { wake_send_msg_size(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size); TRACE_QUEUE_SIZE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size, opt_wake_all); } } /*wake hignhest priority task blocked on this queue and send msg to it*/ else { wake_send_msg_size(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size); TRACE_QUEUE_SIZE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size, opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Release a mutex * * Description: This function is called to release a mutex. * * Arguments :mutex_ptr is the address of the mutex object want to be released * * * * Returns RAW_SUCCESS: raw os return success * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_put(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_TASK_OBJ *tcb; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*Must release the mutex by self*/ if (raw_task_active != mutex_ptr->mtxtsk) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_NOT_RELEASE_BY_OCCYPY; } mutex_ptr->owner_nested--; if (mutex_ptr->owner_nested) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } release_mutex(raw_task_active, mutex_ptr); block_list_head = &mutex_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { /* No wait task */ mutex_ptr->mtxtsk = 0; RAW_CRITICAL_EXIT(); TRACE_MUTEX_RELEASE_SUCCESS(raw_task_active, mutex_ptr); return RAW_SUCCESS; } /* there must have task blocked on this mutex object*/ tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); /*Wake up the occupy task, which is the highst priority task on the list*/ raw_wake_object(tcb); /* Change mutex get task */ mutex_ptr->mtxtsk = tcb; mutex_ptr->mtxlist = tcb->mtxlist; tcb->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (tcb->priority > mutex_ptr->ceiling_prio) { /* Raise the priority of the task that got lock to the highest priority limit */ change_internal_task_priority(tcb, mutex_ptr->ceiling_prio); } } TRACE_MUTEX_WAKE_TASK(raw_task_active, tcb); RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Get a mutex * * Description: This function is called to get a mutex. * * Arguments :mutex_ptr: is the address of the mutex object want to be released * wait_option: if equals 0 return immeadiately and 0xffffffff wait foreverand * and others are timeout value * ----------------------------- * wait_option * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * Returns * RAW_SUCCESS : Get mutex success. * RAW_BLOCK_ABORT: mutex is aborted by other task or ISR. * RAW_NO_PEND_WAIT: mutex is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: system is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this mutex is deleted * * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_get(RAW_MUTEX *mutex_ptr, RAW_TICK_TYPE wait_option) { RAW_OS_ERROR error_status; RAW_TASK_OBJ *mtxtsk; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if the same task get the same mutex again, it causes mutex owner nested*/ if (raw_task_active == mutex_ptr->mtxtsk) { if (mutex_ptr->owner_nested == (RAW_MUTEX_NESTED_TYPE) - 1) { /*likely design error here, system must be stoped here!*/ port_system_error_process(RAW_MUTEX_NESTED_OVERFLOW, 0, 0, 0, 0, 0, 0); } else { mutex_ptr->owner_nested++; } RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (raw_task_active->bpriority < mutex_ptr->ceiling_prio) { /* Violation of highest priority limit */ RAW_CRITICAL_EXIT(); TRACE_MUTEX_EX_CE_PRI(raw_task_active, mutex_ptr, wait_option); return RAW_EXCEED_CEILING_PRIORITY; } } mtxtsk = mutex_ptr->mtxtsk; if (mtxtsk == 0) { /* Get lock */ mutex_ptr->mtxtsk = raw_task_active; mutex_ptr->mtxlist = raw_task_active->mtxlist; raw_task_active->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (raw_task_active->priority > mutex_ptr->ceiling_prio) { /* Raise its own task to the highest priority limit */ change_internal_task_priority(raw_task_active, mutex_ptr->ceiling_prio); } } RAW_CRITICAL_EXIT(); TRACE_MUTEX_GET(raw_task_active, mutex_ptr, wait_option); return RAW_SUCCESS; } /*Cann't get mutex, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*system is locked so task can not be blocked just return immediately*/ SYSTEM_LOCK_PROCESS(); /*if current task is a higher priority task and block on the mutex *priority inverse condition happened, priority inherit method is used here*/ if (mutex_ptr->policy == RAW_MUTEX_INHERIT_POLICY) { if (raw_task_active->priority < mtxtsk->priority) { TRACE_TASK_PRI_INV(raw_task_active, mtxtsk); change_internal_task_priority(mtxtsk, raw_task_active->priority); } } /*Any way block the current task*/ raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)mutex_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_MUTEX_GET_BLOCK(raw_task_active, mutex_ptr, wait_option); /*find the next highest priority task ready to run*/ raw_sched(); /*So the task is waked up, need know which reason cause wake up.*/ error_status = block_state_post_process(raw_task_active, 0); return error_status; }
/* ************************************************************************************************************************ * Receive a msg with size * * Description: This function is called to receive a msg with size * * Arguments :p_q is the address of the queue object * ----- * msg is the address a point, and this pointer contains address of the msg. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * ------ * receive_size: is the message size which is to be received * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) if no msg received then msg will get null pointer(0).ISR can call this function if only wait_option equal RAW_NO_WAIT. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_queue_size_receive(RAW_QUEUE_SIZE *p_q, RAW_TICK_TYPE wait_option, void **msg_ptr, MSG_SIZE_TYPE *receive_size) { RAW_OS_ERROR result; RAW_MSG_SIZE *msg_tmp; RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (raw_int_nesting && (wait_option != RAW_NO_WAIT)) { return RAW_NOT_CALLED_BY_ISR; } if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg_ptr == 0) { return RAW_NULL_POINTER; } if (receive_size == 0) { return RAW_NULL_POINTER; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if queue has msg then receive it*/ if (p_q->queue_current_msg) { msg_tmp = p_q->read; *msg_ptr = msg_tmp->msg_ptr; *receive_size = msg_tmp->msg_size; p_q->read = msg_tmp->next; if (p_q->read) { p_q->queue_current_msg--; } else { p_q->write = 0; p_q->queue_current_msg = 0; } msg_tmp->next = p_q->free_msg; p_q->free_msg = msg_tmp; RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_GET_MSG(raw_task_active, p_q, wait_option, *msg_ptr, *receive_size); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { *msg_ptr = 0; *receive_size = 0; RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS_QUEUE_SIZE(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)p_q, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_GET_BLOCK(raw_task_active, p_q, wait_option); raw_sched(); result = block_state_post_process(raw_task_active, 0); /*if get the msg successful then take it*/ if (result == RAW_SUCCESS) { *receive_size = raw_task_active->msg_size; *msg_ptr = raw_task_active->msg; } else { *msg_ptr = 0; *receive_size = 0; } return result; }
/* ************************************************************************************************************************ * Receive a msg * * Description: This function is called to receive a msg * * Arguments :q_b is the address of the queue buffer object * ----- * msg is the address of a point, and it will be filled data lwithin this api. * if you want to use the extension memcpy, make sure the msg address is 4 bytes aligned. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * receive_size:is the msg size received. * * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) * * ************************************************************************************************************************ */ RAW_U16 raw_queue_buffer_receive(RAW_QUEUE_BUFFER *q_b, RAW_TICK_TYPE wait_option, RAW_VOID *msg, MSG_SIZE_TYPE *receive_size) { RAW_U16 result; RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if (q_b == 0) { return RAW_NULL_OBJECT; } if (msg == 0) { return RAW_NULL_POINTER; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (!is_buffer_empty(q_b)) { *receive_size = buffer_to_msg(q_b, msg); RAW_CRITICAL_EXIT(); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS(); raw_task_active->msg = msg; raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)q_b, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); raw_sched(); result = block_state_post_process(raw_task_active, 0); /*if get the msg successful then take it*/ if (result == RAW_SUCCESS) { *receive_size = raw_task_active->qb_msg_size; } return result; }
void idle_run(void) { ACTIVE_EVENT_STRUCT *a; STATE_EVENT temp; ACTIVE_EVENT_STRUCT_CB *acb; RAW_U8 x; RAW_U8 y; RAW_U8 idle_high_priority; RAW_SR_ALLOC(); while (1) { RAW_CRITICAL_ENTER(); /*if get events then process it*/ if (raw_idle_rdy_grp) { y = raw_idle_map_table[raw_idle_rdy_grp]; x = y >> 3; idle_high_priority = (y + raw_idle_map_table[raw_rdy_tbl[x]]); acb = &active_idle_task[idle_high_priority]; a = active_idle_task[idle_high_priority].act; --a->nUsed; if (a->nUsed == 0) { raw_rdy_tbl[a->priority_y] &= (RAW_U8)~a->priority_bit_x; if (raw_rdy_tbl[a->priority_y] == 0) { /* Clear event grp bit if this was only task pending */ raw_idle_rdy_grp &= (RAW_U8)~a->priority_bit_y; } } temp.sig = acb->queue[a->tail].sig; temp.which_pool = acb->queue[a->tail].para; a->tail++; if (a->tail == acb->end) { a->tail = 0; } RAW_CRITICAL_EXIT(); #if (RAW_FSM_ACTIVE > 0) fsm_exceute(&a->super, &temp); #else hsm_exceute(&a->super, &temp); #endif } else { RAW_CRITICAL_EXIT(); RAW_CPU_DISABLE(); if (raw_idle_rdy_grp == 0) { idle_event_user(); } RAW_CPU_ENABLE(); } }
RAW_OS_ERROR event_post(ACTIVE_EVENT_STRUCT *me, RAW_U16 sig, void *para, RAW_U8 opt_send_method) { ACTIVE_EVENT_STRUCT_CB *acb; RAW_SR_ALLOC(); acb = &active_idle_task[me->prio]; RAW_CRITICAL_ENTER(); if (me->nUsed == acb->end) { RAW_CRITICAL_EXIT(); return RAW_IDLE_EVENT_EXHAUSTED; } if (opt_send_method == SEND_TO_END) { acb->queue[me->head].sig = sig; acb->queue[me->head].para = para; me->head++; if (me->head == acb->end) { me->head = 0; } } else { if (me->tail == 0) { me->tail = acb->end; } me->tail--; acb->queue[me->tail].sig = sig; acb->queue[me->tail].para = para; } ++me->nUsed; if (me->nUsed == 1) { raw_idle_rdy_grp |= acb->act->priority_bit_y; raw_rdy_tbl[acb->act->priority_y] |= acb->act->priority_bit_x; } RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/*---------------------------------------------------------------------------*/ static void do_event(void) { static process_event_t ev; static process_data_t data; static struct process *receiver; static struct process *p; RAW_SR_ALLOC(); /* * If there are any events in the queue, take the first one and walk * through the list of processes to see if the event should be * delivered to any of them. If so, we call the event handler * function for the process. We only process one event at a time and * call the poll handlers inbetween. */ RAW_CRITICAL_ENTER(); if (nevents > 0) { /* There are events that we should deliver. */ ev = events[fevent].ev; data = events[fevent].data; receiver = events[fevent].p; /* Since we have seen the new event, we move pointer upwards and decrese the number of events. */ fevent = (fevent + 1) & (PROCESS_CONF_NUMEVENTS - 1); --nevents; RAW_CRITICAL_EXIT(); /* If this is a broadcast event, we deliver it to all events, in order of their priority. */ if(receiver == PROCESS_BROADCAST) { for(p = process_list; p != 0; p = p->next) { /* If we have been requested to poll a process, we do this in between processing the broadcast event. */ if(poll_requested) { do_poll(); } call_process(p, ev, data); } } else { /* This is not a broadcast event, so we deliver it to the specified process. */ /* If the event was an INIT event, we should also update the state of the process. */ if(ev == PROCESS_EVENT_INIT) { receiver->state = PROCESS_STATE_RUNNING; } /* Make sure that the process actually is running. */ call_process(receiver, ev, data); } } else { RAW_CRITICAL_EXIT(); } }
RAW_U16 msg_post(RAW_QUEUE *p_q, RAW_VOID *p_void, RAW_U8 opt_send_method, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; if (p_q->msg_q.current_numbers >= p_q->msg_q.size) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_MSG_MAX(raw_task_active, p_q, p_void, opt_send_method); return RAW_MSG_MAX; } /*Queue is not full here, if there is no blocked receive task*/ if (is_list_empty(block_list_head)) { p_q->msg_q.current_numbers++; /*update peak_numbers for debug*/ if (p_q->msg_q.current_numbers > p_q->msg_q.peak_numbers) { p_q->msg_q.peak_numbers = p_q->msg_q.current_numbers; } if (opt_send_method == SEND_TO_END) { *p_q->msg_q.write++ = p_void; if (p_q->msg_q.write == p_q->msg_q.queue_end) { p_q->msg_q.write = p_q->msg_q.queue_start; } } else { /* Wrap read pointer to end if we are at the 1st queue entry */ if (p_q->msg_q.read == p_q->msg_q.queue_start) { p_q->msg_q.read = p_q->msg_q.queue_end; } p_q->msg_q.read--; *p_q->msg_q.read = p_void; /* Insert message into queue */ } RAW_CRITICAL_EXIT(); /*if queue is registered with notify function just call it*/ if (p_q->queue_send_notify) { p_q->queue_send_notify(p_q); } TRACE_QUEUE_MSG_POST(raw_task_active, p_q, p_void, opt_send_method); return RAW_SUCCESS; } /*wake all the task blocked on this queue*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { wake_send_msg(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void); TRACE_QUEUE_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all); } } /*wake hignhest priority task blocked on this queue and send msg to it*/ else { wake_send_msg(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void); TRACE_QUEUE_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Update system tick time * * Description: This function is called to update system tick time. * * Arguments :None * * * * * Returns None * * Note(s) :This function is called by internal, users shoud not touch this function. * * ************************************************************************************************************************ */ void tick_list_update(void) { LIST *tick_head_ptr; RAW_TASK_OBJ *p_tcb; LIST *iter; LIST *iter_temp; RAW_U16 spoke; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); raw_tick_count++; spoke = (RAW_U16)(raw_tick_count & (TICK_HEAD_ARRAY - 1) ); tick_head_ptr = &tick_head[spoke]; iter = tick_head_ptr->next; while (RAW_TRUE) { /*search all the time list if possible*/ if (iter != tick_head_ptr) { iter_temp = iter->next; p_tcb = list_entry(iter, RAW_TASK_OBJ, tick_list); /*Since time list is sorted by remain time, so just campare the absolute time*/ if (raw_tick_count == p_tcb->tick_match) { switch (p_tcb->task_state) { case RAW_DLY: p_tcb->block_status = RAW_B_OK; p_tcb->task_state = RAW_RDY; tick_list_remove(p_tcb); add_ready_list(&raw_ready_queue, p_tcb); break; case RAW_PEND_TIMEOUT: tick_list_remove(p_tcb); /*remove task on the block list because task is timeout*/ list_delete(&p_tcb->task_list); add_ready_list(&raw_ready_queue, p_tcb); #if (CONFIG_RAW_MUTEX > 0) mutex_state_change(p_tcb); #endif p_tcb->block_status = RAW_B_TIMEOUT; p_tcb->task_state = RAW_RDY; p_tcb->block_obj = 0; break; case RAW_PEND_TIMEOUT_SUSPENDED: tick_list_remove(p_tcb); /*remove task on the block list because task is timeout*/ list_delete(&p_tcb->task_list); #if (CONFIG_RAW_MUTEX > 0) mutex_state_change(p_tcb); #endif p_tcb->block_status = RAW_B_TIMEOUT; p_tcb->task_state = RAW_SUSPENDED; p_tcb->block_obj = 0; break; case RAW_DLY_SUSPENDED: p_tcb->task_state = RAW_SUSPENDED; p_tcb->block_status = RAW_B_OK; tick_list_remove(p_tcb); break; default: RAW_ASSERT(0); } iter = iter_temp; } /*if current task time out absolute time is not equal current system time, just break because timer list is sorted*/ else { break; } } /*finish all the time list search */ else { break; } } RAW_CRITICAL_EXIT(); }
RAW_OS_ERROR semaphore_put(RAW_SEMAPHORE *semaphore_ptr, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { if (semaphore_ptr->count == (RAW_PROCESSOR_UINT) - 1) { RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_OVERFLOW(raw_task_active, semaphore_ptr); return RAW_SEMAPHORE_OVERFLOW; } /*increase resource*/ semaphore_ptr->count++; if (semaphore_ptr->count > semaphore_ptr->peak_count) { semaphore_ptr->peak_count = semaphore_ptr->count; } RAW_CRITICAL_EXIT(); /*if semphore is registered with notify function just call it*/ if (semaphore_ptr->semphore_send_notify) { semaphore_ptr->semphore_send_notify(semaphore_ptr); } TRACE_SEMAPHORE_COUNT_INCREASE(raw_task_active, semaphore_ptr); return RAW_SUCCESS; } /*wake all the task blocked on this semphore*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all); } } else { /*Wake up the highest priority task block on the semaphore*/ raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
RAW_U16 queue_buffer_post(RAW_QUEUE_BUFFER *q_b, RAW_VOID *p_void, MSG_SIZE_TYPE msg_size, RAW_U8 opt_send_method) { LIST *block_list_head; RAW_TASK_OBJ *task_ptr; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &q_b->common_block_obj.block_list; if (!is_queue_buffer_free(q_b, msg_size)) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_BUFFER_MAX(raw_task_active, q_b, p_void, msg_size, opt_send_method); return RAW_QUEUE_BUFFER_FULL; } /*Queue buffer is not full here, if there is no blocked receive task*/ if (is_list_empty(block_list_head)) { if (opt_send_method == SEND_TO_END) { msg_to_end_buffer(q_b, p_void, msg_size); } else { } RAW_CRITICAL_EXIT(); TRACE_QUEUE_BUFFER_POST(raw_task_active, q_b, p_void, msg_size, opt_send_method); return RAW_SUCCESS; } task_ptr = list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); raw_memcpy(task_ptr->msg, p_void, msg_size); task_ptr->qb_msg_size = msg_size; raw_wake_object(task_ptr); RAW_CRITICAL_EXIT(); TRACE_QUEUE_BUFFER_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, msg_size, opt_send_method); raw_sched(); return RAW_SUCCESS; }