/* ************************************************************************************************************************ * Get a semaphore * * Description: This function is called to get a semaphore. * * Arguments :semaphore_ptr is the address of semphore object want to be initialized * ----- * wait_option: 0 means return immediately if not get semphore * * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * Returns * RAW_SUCCESS : Get semphore success. * RAW_BLOCK_ABORT: semphore is aborted by other task or ISR. * RAW_NO_PEND_WAIT: semphore is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: semphore is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this mutex is deleted * * Note(s) * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_semaphore_get(RAW_SEMAPHORE *semaphore_ptr, RAW_TICK_TYPE wait_option) { RAW_OS_ERROR error_status; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (semaphore_ptr->count) { semaphore_ptr->count--; RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_GET_SUCCESS(raw_task_active, semaphore_ptr); return RAW_SUCCESS; } /*Cann't get semphore, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)semaphore_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_GET_BLOCK(raw_task_active, semaphore_ptr, wait_option); raw_sched(); error_status = block_state_post_process(raw_task_active, 0); return error_status; }
RAW_U16 raw_event_delete(RAW_EVENT *event_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_EVENT_FUNCTION_CHECK > 0) if (event_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &event_ptr->common_block_obj.block_list; event_ptr->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up until list is empty*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } event_ptr->flags = 0u; RAW_CRITICAL_EXIT(); TRACE_EVENT_DELETE(raw_task_active, event_ptr); raw_sched(); return RAW_SUCCESS; }
RAW_U16 raw_queue_delete(RAW_QUEUE *p_q) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; p_q->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_QUEUE_DELETE(raw_task_active, p_q); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_semaphore_delete(RAW_SEMAPHORE *semaphore_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; semaphore_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_DELETE(raw_task_active, semaphore_ptr); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_mutex_delete(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &mutex_ptr->common_block_obj.block_list; mutex_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; if (mutex_ptr->mtxtsk) { release_mutex(mutex_ptr->mtxtsk, mutex_ptr); } /*All task blocked on this mutex is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_MUTEX_DELETE(raw_task_active, mutex_ptr); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR semaphore_put(RAW_SEMAPHORE *semaphore_ptr, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { if (semaphore_ptr->count == (RAW_PROCESSOR_UINT) - 1) { RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_OVERFLOW(raw_task_active, semaphore_ptr); return RAW_SEMAPHORE_OVERFLOW; } /*increase resource*/ semaphore_ptr->count++; if (semaphore_ptr->count > semaphore_ptr->peak_count) { semaphore_ptr->peak_count = semaphore_ptr->count; } RAW_CRITICAL_EXIT(); /*if semphore is registered with notify function just call it*/ if (semaphore_ptr->semphore_send_notify) { semaphore_ptr->semphore_send_notify(semaphore_ptr); } TRACE_SEMAPHORE_COUNT_INCREASE(raw_task_active, semaphore_ptr); return RAW_SUCCESS; } /*wake all the task blocked on this semphore*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all); } } else { /*Wake up the highest priority task block on the semaphore*/ raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
RAW_U16 msg_post(RAW_QUEUE *p_q, RAW_VOID *p_void, RAW_U8 opt_send_method, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; if (p_q->msg_q.current_numbers >= p_q->msg_q.size) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_MSG_MAX(raw_task_active, p_q, p_void, opt_send_method); return RAW_MSG_MAX; } /*Queue is not full here, if there is no blocked receive task*/ if (is_list_empty(block_list_head)) { p_q->msg_q.current_numbers++; /*update peak_numbers for debug*/ if (p_q->msg_q.current_numbers > p_q->msg_q.peak_numbers) { p_q->msg_q.peak_numbers = p_q->msg_q.current_numbers; } if (opt_send_method == SEND_TO_END) { *p_q->msg_q.write++ = p_void; if (p_q->msg_q.write == p_q->msg_q.queue_end) { p_q->msg_q.write = p_q->msg_q.queue_start; } } else { /* Wrap read pointer to end if we are at the 1st queue entry */ if (p_q->msg_q.read == p_q->msg_q.queue_start) { p_q->msg_q.read = p_q->msg_q.queue_end; } p_q->msg_q.read--; *p_q->msg_q.read = p_void; /* Insert message into queue */ } RAW_CRITICAL_EXIT(); /*if queue is registered with notify function just call it*/ if (p_q->queue_send_notify) { p_q->queue_send_notify(p_q); } TRACE_QUEUE_MSG_POST(raw_task_active, p_q, p_void, opt_send_method); return RAW_SUCCESS; } /*wake all the task blocked on this queue*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { wake_send_msg(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void); TRACE_QUEUE_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all); } } /*wake hignhest priority task blocked on this queue and send msg to it*/ else { wake_send_msg(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void); TRACE_QUEUE_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Receive a msg * * Description: This function is called to receive a msg * * Arguments :p_q is the address of the queue object * ----- * msg is the address of a point, and this pointer contains address of the msg. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) if no msg received then msg will get null pointer(0). ISR can call this function if only wait_option equal RAW_NO_WAIT. * * ************************************************************************************************************************ */ RAW_U16 raw_queue_receive(RAW_QUEUE *p_q, RAW_TICK_TYPE wait_option, RAW_VOID **msg) { RAW_VOID *pmsg; RAW_U16 result; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (raw_int_nesting && (wait_option != RAW_NO_WAIT)) { return RAW_NOT_CALLED_BY_ISR; } if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if queue has msgs, just receive it*/ if (p_q->msg_q.current_numbers) { pmsg = *p_q->msg_q.read++; if (p_q->msg_q.read == p_q->msg_q.queue_end) { /*wrap around to start*/ p_q->msg_q.read = p_q->msg_q.queue_start; } *msg = pmsg; p_q->msg_q.current_numbers--; RAW_CRITICAL_EXIT(); TRACE_QUEUE_GET_MSG(raw_task_active, p_q, wait_option, *msg); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { *msg = (RAW_VOID *)0; RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*if system is locked, block operation is not allowed*/ SYSTEM_LOCK_PROCESS_QUEUE(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)p_q, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_QUEUE_GET_BLOCK(raw_task_active, p_q, wait_option); raw_sched(); *msg = (RAW_VOID *)0; result = block_state_post_process(raw_task_active, msg); return result; }
RAW_U16 event_set(RAW_EVENT *event_ptr, RAW_U32 flags_to_set, RAW_U8 set_option) { LIST *iter; LIST *event_head_ptr; LIST *iter_temp; RAW_TASK_OBJ *task_ptr; RAW_U8 status; RAW_SR_ALLOC(); status = RAW_FALSE; RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } event_head_ptr = &event_ptr->common_block_obj.block_list; /*if the set_option is AND_MASK, it just clear the flags and will return immediately!*/ if (set_option & RAW_FLAGS_AND_MASK) { event_ptr->flags &= flags_to_set; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; } /*if it is or mask then set the flag and continue.........*/ else { event_ptr->flags |= flags_to_set; } iter = event_head_ptr->next; /*if list is not empty*/ while (iter != event_head_ptr) { task_ptr = list_entry(iter, RAW_TASK_OBJ, task_list); iter_temp = iter->next; if (task_ptr->raw_suspend_option & RAW_FLAGS_AND_MASK) { if ((event_ptr->flags & task_ptr ->raw_suspend_flags) == task_ptr ->raw_suspend_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } else { if (event_ptr->flags & task_ptr ->raw_suspend_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } if (status == RAW_TRUE) { (*(RAW_U32 *)(task_ptr->raw_additional_suspend_info)) = event_ptr->flags; /*Ok the task condition is met, just wake this task*/ raw_wake_object(task_ptr); TRACE_EVENT_WAKE(raw_task_active, task_ptr); } iter = iter_temp; } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Get an event * * Description: This service retrieves event flags from the specified event flags group. * Each event flags group contains 32 event flags. Each flag is represented * by a single bit. This service can retrieve a variety of event flag * combinations, as selected by the input parameters. * * Arguments :event_ptr: is the address of event object * ----- * requested_flags: is the 32-bit unsigned variable that represents the requested event flags. * ----- * get_option: is the option specifies whether all or any of the requested event flags are required. The following are valid * selections: * RAW_AND * RAW_AND_CLEAR * RAW_OR * RAW_OR_CLEAR * Selecting RAW_AND or RAW_AND_CLEAR * specifies that all event flags must be present in * the group. Selecting RAW_OR or RAW_OR_CLEAR * specifies that any event flag is satisfactory. Event * flags that satisfy the request are cleared (set to * zero) if RAW_AND_CLEAR or RAW_OR_CLEAR are specified. * ----- * wait_option: is how the service behaves if the selected * event flags are not set. The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * actual_flags_ptr:will be filled the actual flags when the function is returned. * * Returns RAW_SUCCESS : Get event success. * RAW_BLOCK_ABORT: event is aborted by other task or ISR. * RAW_NO_PEND_WAIT: event is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: system is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this event is deleted * Note(s) :RAW_STATE_UNKNOWN wrong task state, probally sysytem error. * * ************************************************************************************************************************ */ RAW_U16 raw_event_get(RAW_EVENT *event_ptr, RAW_U32 requested_flags, RAW_U8 get_option, RAW_U32 *actual_flags_ptr, RAW_TICK_TYPE wait_option) { RAW_U16 error_status; RAW_U8 status; RAW_SR_ALLOC(); #if (RAW_EVENT_FUNCTION_CHECK > 0) if (event_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if ((get_option != RAW_AND) && (get_option != RAW_OR) && (get_option != RAW_AND_CLEAR) && (get_option != RAW_OR_CLEAR)) { return RAW_NO_THIS_OPTION; } #endif RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if option is and flag*/ if (get_option & RAW_FLAGS_AND_MASK) { if ((event_ptr->flags & requested_flags) == requested_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } /*if option is or flag*/ else { if (event_ptr->flags & requested_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } if (status == RAW_TRUE) { *actual_flags_ptr = event_ptr->flags; /*does it need to clear the flags*/ if (get_option & RAW_FLAGS_CLEAR_MASK) { event_ptr->flags &= ~requested_flags; } RAW_CRITICAL_EXIT(); TRACE_EVENT_GET(raw_task_active, event_ptr); return RAW_SUCCESS; } /*Cann't get event, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*system is locked so task can not be blocked just return immediately*/ SYSTEM_LOCK_PROCESS(); /*Remember the passed information*/ raw_task_active->raw_suspend_option = get_option; raw_task_active->raw_suspend_flags = requested_flags; raw_task_active->raw_additional_suspend_info = actual_flags_ptr; raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)event_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_EVENT_GET_BLOCK(raw_task_active, event_ptr, wait_option); raw_sched(); /*So the task is waked up, need know which reason cause wake up.*/ error_status = block_state_post_process(raw_task_active, 0); /*if it is not successed then we do not need to clear the flags just return the error status*/ if (error_status != RAW_SUCCESS) { return error_status; } RAW_CRITICAL_ENTER(); /*does it need to clear the flags*/ if (get_option & RAW_FLAGS_CLEAR_MASK) { event_ptr->flags &= ~requested_flags; } RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Release a mutex * * Description: This function is called to release a mutex. * * Arguments :mutex_ptr is the address of the mutex object want to be released * * * * Returns RAW_SUCCESS: raw os return success * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_put(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_TASK_OBJ *tcb; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*Must release the mutex by self*/ if (raw_task_active != mutex_ptr->mtxtsk) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_NOT_RELEASE_BY_OCCYPY; } mutex_ptr->owner_nested--; if (mutex_ptr->owner_nested) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } release_mutex(raw_task_active, mutex_ptr); block_list_head = &mutex_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { /* No wait task */ mutex_ptr->mtxtsk = 0; RAW_CRITICAL_EXIT(); TRACE_MUTEX_RELEASE_SUCCESS(raw_task_active, mutex_ptr); return RAW_SUCCESS; } /* there must have task blocked on this mutex object*/ tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); /*Wake up the occupy task, which is the highst priority task on the list*/ raw_wake_object(tcb); /* Change mutex get task */ mutex_ptr->mtxtsk = tcb; mutex_ptr->mtxlist = tcb->mtxlist; tcb->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (tcb->priority > mutex_ptr->ceiling_prio) { /* Raise the priority of the task that got lock to the highest priority limit */ change_internal_task_priority(tcb, mutex_ptr->ceiling_prio); } } TRACE_MUTEX_WAKE_TASK(raw_task_active, tcb); RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Get a mutex * * Description: This function is called to get a mutex. * * Arguments :mutex_ptr: is the address of the mutex object want to be released * wait_option: if equals 0 return immeadiately and 0xffffffff wait foreverand * and others are timeout value * ----------------------------- * wait_option * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * Returns * RAW_SUCCESS : Get mutex success. * RAW_BLOCK_ABORT: mutex is aborted by other task or ISR. * RAW_NO_PEND_WAIT: mutex is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: system is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this mutex is deleted * * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_get(RAW_MUTEX *mutex_ptr, RAW_TICK_TYPE wait_option) { RAW_OS_ERROR error_status; RAW_TASK_OBJ *mtxtsk; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if the same task get the same mutex again, it causes mutex owner nested*/ if (raw_task_active == mutex_ptr->mtxtsk) { if (mutex_ptr->owner_nested == (RAW_MUTEX_NESTED_TYPE) - 1) { /*likely design error here, system must be stoped here!*/ port_system_error_process(RAW_MUTEX_NESTED_OVERFLOW, 0, 0, 0, 0, 0, 0); } else { mutex_ptr->owner_nested++; } RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (raw_task_active->bpriority < mutex_ptr->ceiling_prio) { /* Violation of highest priority limit */ RAW_CRITICAL_EXIT(); TRACE_MUTEX_EX_CE_PRI(raw_task_active, mutex_ptr, wait_option); return RAW_EXCEED_CEILING_PRIORITY; } } mtxtsk = mutex_ptr->mtxtsk; if (mtxtsk == 0) { /* Get lock */ mutex_ptr->mtxtsk = raw_task_active; mutex_ptr->mtxlist = raw_task_active->mtxlist; raw_task_active->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (raw_task_active->priority > mutex_ptr->ceiling_prio) { /* Raise its own task to the highest priority limit */ change_internal_task_priority(raw_task_active, mutex_ptr->ceiling_prio); } } RAW_CRITICAL_EXIT(); TRACE_MUTEX_GET(raw_task_active, mutex_ptr, wait_option); return RAW_SUCCESS; } /*Cann't get mutex, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*system is locked so task can not be blocked just return immediately*/ SYSTEM_LOCK_PROCESS(); /*if current task is a higher priority task and block on the mutex *priority inverse condition happened, priority inherit method is used here*/ if (mutex_ptr->policy == RAW_MUTEX_INHERIT_POLICY) { if (raw_task_active->priority < mtxtsk->priority) { TRACE_TASK_PRI_INV(raw_task_active, mtxtsk); change_internal_task_priority(mtxtsk, raw_task_active->priority); } } /*Any way block the current task*/ raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)mutex_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_MUTEX_GET_BLOCK(raw_task_active, mutex_ptr, wait_option); /*find the next highest priority task ready to run*/ raw_sched(); /*So the task is waked up, need know which reason cause wake up.*/ error_status = block_state_post_process(raw_task_active, 0); return error_status; }
/* ************************************************************************************************************************ * Receive a msg * * Description: This function is called to receive a msg * * Arguments :q_b is the address of the queue buffer object * ----- * msg is the address of a point, and it will be filled data lwithin this api. * if you want to use the extension memcpy, make sure the msg address is 4 bytes aligned. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * receive_size:is the msg size received. * * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) * * ************************************************************************************************************************ */ RAW_U16 raw_queue_buffer_receive(RAW_QUEUE_BUFFER *q_b, RAW_TICK_TYPE wait_option, RAW_VOID *msg, MSG_SIZE_TYPE *receive_size) { RAW_U16 result; RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if (q_b == 0) { return RAW_NULL_OBJECT; } if (msg == 0) { return RAW_NULL_POINTER; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (!is_buffer_empty(q_b)) { *receive_size = buffer_to_msg(q_b, msg); RAW_CRITICAL_EXIT(); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS(); raw_task_active->msg = msg; raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)q_b, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); raw_sched(); result = block_state_post_process(raw_task_active, 0); /*if get the msg successful then take it*/ if (result == RAW_SUCCESS) { *receive_size = raw_task_active->qb_msg_size; } return result; }
RAW_U16 queue_buffer_post(RAW_QUEUE_BUFFER *q_b, RAW_VOID *p_void, MSG_SIZE_TYPE msg_size, RAW_U8 opt_send_method) { LIST *block_list_head; RAW_TASK_OBJ *task_ptr; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &q_b->common_block_obj.block_list; if (!is_queue_buffer_free(q_b, msg_size)) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_BUFFER_MAX(raw_task_active, q_b, p_void, msg_size, opt_send_method); return RAW_QUEUE_BUFFER_FULL; } /*Queue buffer is not full here, if there is no blocked receive task*/ if (is_list_empty(block_list_head)) { if (opt_send_method == SEND_TO_END) { msg_to_end_buffer(q_b, p_void, msg_size); } else { } RAW_CRITICAL_EXIT(); TRACE_QUEUE_BUFFER_POST(raw_task_active, q_b, p_void, msg_size, opt_send_method); return RAW_SUCCESS; } task_ptr = list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); raw_memcpy(task_ptr->msg, p_void, msg_size); task_ptr->qb_msg_size = msg_size; raw_wake_object(task_ptr); RAW_CRITICAL_EXIT(); TRACE_QUEUE_BUFFER_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, msg_size, opt_send_method); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Receive a msg with size * * Description: This function is called to receive a msg with size * * Arguments :p_q is the address of the queue object * ----- * msg is the address a point, and this pointer contains address of the msg. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * ------ * receive_size: is the message size which is to be received * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) if no msg received then msg will get null pointer(0).ISR can call this function if only wait_option equal RAW_NO_WAIT. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_queue_size_receive(RAW_QUEUE_SIZE *p_q, RAW_TICK_TYPE wait_option, void **msg_ptr, MSG_SIZE_TYPE *receive_size) { RAW_OS_ERROR result; RAW_MSG_SIZE *msg_tmp; RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (raw_int_nesting && (wait_option != RAW_NO_WAIT)) { return RAW_NOT_CALLED_BY_ISR; } if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg_ptr == 0) { return RAW_NULL_POINTER; } if (receive_size == 0) { return RAW_NULL_POINTER; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if queue has msg then receive it*/ if (p_q->queue_current_msg) { msg_tmp = p_q->read; *msg_ptr = msg_tmp->msg_ptr; *receive_size = msg_tmp->msg_size; p_q->read = msg_tmp->next; if (p_q->read) { p_q->queue_current_msg--; } else { p_q->write = 0; p_q->queue_current_msg = 0; } msg_tmp->next = p_q->free_msg; p_q->free_msg = msg_tmp; RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_GET_MSG(raw_task_active, p_q, wait_option, *msg_ptr, *receive_size); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { *msg_ptr = 0; *receive_size = 0; RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS_QUEUE_SIZE(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)p_q, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_GET_BLOCK(raw_task_active, p_q, wait_option); raw_sched(); result = block_state_post_process(raw_task_active, 0); /*if get the msg successful then take it*/ if (result == RAW_SUCCESS) { *receive_size = raw_task_active->msg_size; *msg_ptr = raw_task_active->msg; } else { *msg_ptr = 0; *receive_size = 0; } return result; }
RAW_OS_ERROR msg_size_post(RAW_QUEUE_SIZE *p_q, RAW_MSG_SIZE *p_void, MSG_SIZE_TYPE size, RAW_U8 opt_send_method, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_MSG_SIZE *msg_temp; RAW_MSG_SIZE *p_msg_in; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; /*queue is full condition!*/ if (p_q->queue_current_msg >= p_q->queue_msg_size) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_MSG_MAX(raw_task_active, p_q, p_void, size, opt_send_method); if (p_q->queue_size_full_callback) { p_q->queue_size_full_callback(p_q, p_void, size); } return RAW_MSG_MAX; } /*Queue is not full here, If there is no blocked receive task*/ if (is_list_empty(block_list_head)) { /*delete msg from free msg list*/ msg_temp = p_q->free_msg; p_q->free_msg = msg_temp->next; /* If it is the first message placed in the queue*/ if (p_q->queue_current_msg == 0) { p_q->write = msg_temp; p_q->read = msg_temp; } else { if (opt_send_method == SEND_TO_END) { p_msg_in = p_q->write; p_msg_in->next = msg_temp; msg_temp->next = 0; p_q->write = msg_temp; } else { msg_temp->next = p_q->read; p_q->read = msg_temp; } } p_q->queue_current_msg++; if (p_q->queue_current_msg > p_q->peak_numbers) { p_q->peak_numbers = p_q->queue_current_msg; } /*Assign value to msg*/ msg_temp->msg_ptr = p_void; msg_temp->msg_size = size; RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_MSG_POST(raw_task_active, p_q, p_void, size, opt_send_method); return RAW_SUCCESS; } /*wake all the task blocked on this queue*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { wake_send_msg_size(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size); TRACE_QUEUE_SIZE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size, opt_wake_all); } } /*wake hignhest priority task blocked on this queue and send msg to it*/ else { wake_send_msg_size(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size); TRACE_QUEUE_SIZE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size, opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }