/*this function can be used in interrupt*/ int process_post(struct process *p, process_event_t ev, process_data_t data) { static process_num_events_t snum; RAW_SR_ALLOC(); if(nevents == PROCESS_CONF_NUMEVENTS) { RAW_ASSERT(0); } RAW_CRITICAL_ENTER(); snum = (process_num_events_t)(fevent + nevents) & (PROCESS_CONF_NUMEVENTS - 1); ++nevents; RAW_CRITICAL_EXIT(); events[snum].ev = ev; events[snum].data = data; events[snum].p = p; if(nevents > process_maxevents) { process_maxevents = nevents; } return PROCESS_ERR_OK; }
/* ************************************************************************************************************************ * Register notify function to queue * * Description: This function is called to Register notify function to queue. * * Arguments :p_q is the address of the queue object * ----- * notify_function is the function to be called whennever send queue data to it. * ----- * * * Returns * RAW_SUCCESS: raw os return success * * Note(s) This function is normally used to implement pending on multi object function. * * ************************************************************************************************************************ */ RAW_U16 raw_queue_send_notify(RAW_QUEUE *p_q, QUEUE_SEND_NOTIFY notify_function) { RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } p_q->queue_send_notify = notify_function; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Register notify function to semaphore * * Description: This function is called to Register notify function to semphore. * * Arguments :semaphore_ptr is the address of the semphore object * ----- * notify_function is the function to be called whennever put semphore. * * * * Returns * RAW_SUCCESS: raw os return success * * Note(s) This function is normally used to implement pending on multi object function. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_semphore_send_notify(RAW_SEMAPHORE *semaphore_ptr, SEMPHORE_SEND_NOTIFY notify_function) { RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } semaphore_ptr->semphore_send_notify = notify_function; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Get a semaphore * * Description: This function is called to get a semaphore. * * Arguments :semaphore_ptr is the address of semphore object want to be initialized * ----- * wait_option: 0 means return immediately if not get semphore * * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * Returns * RAW_SUCCESS : Get semphore success. * RAW_BLOCK_ABORT: semphore is aborted by other task or ISR. * RAW_NO_PEND_WAIT: semphore is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: semphore is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this mutex is deleted * * Note(s) * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_semaphore_get(RAW_SEMAPHORE *semaphore_ptr, RAW_TICK_TYPE wait_option) { RAW_OS_ERROR error_status; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (semaphore_ptr->count) { semaphore_ptr->count--; RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_GET_SUCCESS(raw_task_active, semaphore_ptr); return RAW_SUCCESS; } /*Cann't get semphore, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)semaphore_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_GET_BLOCK(raw_task_active, semaphore_ptr, wait_option); raw_sched(); error_status = block_state_post_process(raw_task_active, 0); return error_status; }
RAW_U16 raw_queue_get_information(RAW_QUEUE *p_q, RAW_MSG_INFO *msg_information) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg_information == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; msg_information->msg_q.peak_numbers = p_q->msg_q.peak_numbers; msg_information->msg_q.current_numbers = p_q->msg_q.current_numbers; msg_information->msg_q.queue_start = p_q->msg_q.queue_start; msg_information->msg_q.queue_end = p_q->msg_q.queue_end; msg_information->msg_q.read = p_q->msg_q.read; msg_information->msg_q.write = p_q->msg_q.write; msg_information->msg_q.size = p_q->msg_q.size; msg_information->suspend_entry = block_list_head->next; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
void calculate_time_slice(RAW_U8 task_prio) { RAW_TASK_OBJ *task_ptr; LIST *head; RAW_SR_ALLOC(); head = &raw_ready_queue.task_ready_list[task_prio]; RAW_CRITICAL_ENTER(); /*if ready list is empty then just return because nothing is to be caculated*/ if (is_list_empty(head)) { RAW_CRITICAL_EXIT(); return; } /*Always look at the first task on the ready list*/ task_ptr = list_entry(head->next, RAW_TASK_OBJ, task_list); /*SCHED_FIFO does not has timeslice, just return*/ if (task_ptr->sched_way == SCHED_FIFO) { RAW_CRITICAL_EXIT(); return; } /*there is only one task on this ready list, so do not need to caculate time slice*/ /*idle task must satisfy this condition*/ if (head->next->next == head) { RAW_CRITICAL_EXIT(); return; } if (task_ptr->time_slice) { task_ptr->time_slice--; } /*if current active task has time_slice, just return*/ if (task_ptr->time_slice) { RAW_CRITICAL_EXIT(); return; } /*Move current active task to the end of ready list for the same priority*/ move_to_ready_list_end(&raw_ready_queue, task_ptr); /*restore the task time slice*/ task_ptr->time_slice = task_ptr->time_total; RAW_CRITICAL_EXIT(); }
RAW_OS_ERROR raw_semaphore_set(RAW_SEMAPHORE *semaphore_ptr, RAW_U32 sem_count) { LIST *block_list_head; RAW_SR_ALLOC(); block_list_head = &semaphore_ptr->common_block_obj.block_list; #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (semaphore_ptr->count) { semaphore_ptr->count = sem_count; } else { if (is_list_empty(block_list_head)) { semaphore_ptr->count = sem_count; } else { RAW_CRITICAL_EXIT(); return RAW_SEMAPHORE_TASK_WAITING; } } RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_queue_size_get_information(RAW_QUEUE_SIZE *p_q, MSG_SIZE_TYPE *queue_free_msg_size, MSG_SIZE_TYPE *queue_peak_msg_size, MSG_SIZE_TYPE *queue_current_msg) { RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (queue_free_msg_size == 0) { return RAW_NULL_POINTER; } if (queue_current_msg == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } *queue_free_msg_size = p_q->queue_msg_size - p_q->queue_current_msg; *queue_current_msg = p_q->queue_current_msg; *queue_peak_msg_size = p_q->peak_numbers; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_U16 raw_queue_buffer_get_information(RAW_QUEUE_BUFFER *q_b, RAW_U32 *queue_buffer_free_size, RAW_U32 *queue_buffer_size) { RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (q_b == 0) { return RAW_NULL_OBJECT; } if (queue_buffer_free_size == 0) { return RAW_NULL_OBJECT; } if (queue_buffer_size == 0) { return RAW_NULL_OBJECT; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } *queue_buffer_free_size = q_b->frbufsz; *queue_buffer_size = q_b->bufsz; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Check whether queue size obj is full or not * * Description: This function is called to Check whether queue size obj is full or not. * * Arguments :p_q is the address of the queue object * ----- * * * Returns * 1: queue_size obj is full * 0: queue_size obj is not full * *Note(s) * * ************************************************************************************************************************ */ RAW_U16 raw_queue_size_full_check(RAW_QUEUE_SIZE *p_q) { RAW_SR_ALLOC(); RAW_U16 full_check_ret; #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (p_q->queue_current_msg >= p_q->queue_msg_size) { full_check_ret = 1u; } else { full_check_ret = 0u; } RAW_CRITICAL_EXIT(); return full_check_ret; }
RAW_OS_ERROR raw_queue_size_flush(RAW_QUEUE_SIZE *p_q) { RAW_MSG_SIZE *p_msg; RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (p_q->queue_current_msg) { p_msg = p_q->write; p_msg->next = p_q->free_msg; /*free msg reset to queue read*/ p_q->free_msg = p_q->read; p_q->queue_current_msg = 0; p_q->read = 0; p_q->write = 0; } RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_FLUSH(raw_task_active, p_q); return RAW_SUCCESS; }
RAW_U16 raw_event_delete(RAW_EVENT *event_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_EVENT_FUNCTION_CHECK > 0) if (event_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &event_ptr->common_block_obj.block_list; event_ptr->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up until list is empty*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } event_ptr->flags = 0u; RAW_CRITICAL_EXIT(); TRACE_EVENT_DELETE(raw_task_active, event_ptr); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Set system time * * Description: This function is called to set system time. * * Arguments :NONE * * * * * * Returns * raw_tick_count: The raw_os time. * Note(s) * ************************************************************************************************************************ */ RAW_OS_ERROR raw_system_time_set(RAW_TICK_TYPE time) { RAW_SR_ALLOC(); if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } RAW_CRITICAL_ENTER(); raw_tick_count = time; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
void process_poll(struct process *p) { RAW_SR_ALLOC(); if (p) { if(p->state == PROCESS_STATE_RUNNING || p->state == PROCESS_STATE_CALLED) { RAW_CRITICAL_ENTER(); p->needspoll = 1; poll_requested++; RAW_CRITICAL_EXIT(); } } }
RAW_U16 raw_queue_delete(RAW_QUEUE *p_q) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; p_q->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_QUEUE_DELETE(raw_task_active, p_q); raw_sched(); return RAW_SUCCESS; }
static void do_poll(void) { struct process *p; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); poll_requested--; RAW_CRITICAL_EXIT(); /* Call the processes that needs to be polled. */ for(p = process_list; p != 0; p = p->next) { if(p->needspoll) { p->state = PROCESS_STATE_RUNNING; p->needspoll = 0; call_process(p, PROCESS_EVENT_POLL, 0); } } }
RAW_OS_ERROR raw_semaphore_delete(RAW_SEMAPHORE *semaphore_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; semaphore_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_DELETE(raw_task_active, semaphore_ptr); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_mutex_delete(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &mutex_ptr->common_block_obj.block_list; mutex_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; if (mutex_ptr->mtxtsk) { release_mutex(mutex_ptr->mtxtsk, mutex_ptr); } /*All task blocked on this mutex is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_MUTEX_DELETE(raw_task_active, mutex_ptr); raw_sched(); return RAW_SUCCESS; }
int pthread_detach(pthread_t thread) { RAW_TASK_OBJ *task; task = &thread->task_obj; if (task->task_state == RAW_DELETED) { } else { RAW_CRITICAL_ENTER(); thread->attr.detachstate = PTHREAD_CREATE_DETACHED; RAW_CRITICAL_EXIT(); raw_semaphore_delete(&thread->task_sem); } return 0; }
/* ************************************************************************************************************************ * Check whether queue size obj is full or not * * Description: This function is called to Check whether queue size obj is full or not. * * Arguments :p_q is the address of the queue object * ----- * * * Returns * RAW_QUEUE_SIZE_CHECK_FULL: queue_size obj is full * RAW_QUEUE_SIZE_CHECK_NOT_FULL: queue_size obj is not full * *Note(s) * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_queue_size_full_check(RAW_QUEUE_SIZE *p_q) { RAW_SR_ALLOC(); RAW_OS_ERROR full_check_ret; #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (p_q->queue_current_msg >= p_q->queue_msg_size) { full_check_ret = RAW_QUEUE_SIZE_CHECK_FULL; } else { full_check_ret = RAW_QUEUE_SIZE_CHECK_NOT_FULL; } RAW_CRITICAL_EXIT(); return full_check_ret; }
RAW_U16 raw_queue_flush(RAW_QUEUE *p_q) { RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*Reset read and write pointer to init position*/ p_q->msg_q.write = p_q->msg_q.queue_start; p_q->msg_q.read = p_q->msg_q.queue_start; p_q->msg_q.current_numbers = 0u; RAW_CRITICAL_EXIT(); TRACE_QUEUE_FLUSH(raw_task_active, p_q); return RAW_SUCCESS; }
RAW_U16 raw_queue_buffer_flush(RAW_QUEUE_BUFFER *q_b) { RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if (q_b == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } q_b->frbufsz = q_b->bufsz; q_b->head = 0; q_b->tail = 0; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_OS_ERROR semaphore_put(RAW_SEMAPHORE *semaphore_ptr, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { if (semaphore_ptr->count == (RAW_PROCESSOR_UINT) - 1) { RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_OVERFLOW(raw_task_active, semaphore_ptr); return RAW_SEMAPHORE_OVERFLOW; } /*increase resource*/ semaphore_ptr->count++; if (semaphore_ptr->count > semaphore_ptr->peak_count) { semaphore_ptr->peak_count = semaphore_ptr->count; } RAW_CRITICAL_EXIT(); /*if semphore is registered with notify function just call it*/ if (semaphore_ptr->semphore_send_notify) { semaphore_ptr->semphore_send_notify(semaphore_ptr); } TRACE_SEMAPHORE_COUNT_INCREASE(raw_task_active, semaphore_ptr); return RAW_SUCCESS; } /*wake all the task blocked on this semphore*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all); } } else { /*Wake up the highest priority task block on the semaphore*/ raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Receive a msg * * Description: This function is called to receive a msg * * Arguments :q_b is the address of the queue buffer object * ----- * msg is the address of a point, and it will be filled data lwithin this api. * if you want to use the extension memcpy, make sure the msg address is 4 bytes aligned. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * receive_size:is the msg size received. * * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) * * ************************************************************************************************************************ */ RAW_U16 raw_queue_buffer_receive(RAW_QUEUE_BUFFER *q_b, RAW_TICK_TYPE wait_option, RAW_VOID *msg, MSG_SIZE_TYPE *receive_size) { RAW_U16 result; RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if (q_b == 0) { return RAW_NULL_OBJECT; } if (msg == 0) { return RAW_NULL_POINTER; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (!is_buffer_empty(q_b)) { *receive_size = buffer_to_msg(q_b, msg); RAW_CRITICAL_EXIT(); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS(); raw_task_active->msg = msg; raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)q_b, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); raw_sched(); result = block_state_post_process(raw_task_active, 0); /*if get the msg successful then take it*/ if (result == RAW_SUCCESS) { *receive_size = raw_task_active->qb_msg_size; } return result; }
/* ************************************************************************************************************************ * Get a mutex * * Description: This function is called to get a mutex. * * Arguments :mutex_ptr: is the address of the mutex object want to be released * wait_option: if equals 0 return immeadiately and 0xffffffff wait foreverand * and others are timeout value * ----------------------------- * wait_option * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * Returns * RAW_SUCCESS : Get mutex success. * RAW_BLOCK_ABORT: mutex is aborted by other task or ISR. * RAW_NO_PEND_WAIT: mutex is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: system is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this mutex is deleted * * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_get(RAW_MUTEX *mutex_ptr, RAW_TICK_TYPE wait_option) { RAW_OS_ERROR error_status; RAW_TASK_OBJ *mtxtsk; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if the same task get the same mutex again, it causes mutex owner nested*/ if (raw_task_active == mutex_ptr->mtxtsk) { if (mutex_ptr->owner_nested == (RAW_MUTEX_NESTED_TYPE) - 1) { /*likely design error here, system must be stoped here!*/ port_system_error_process(RAW_MUTEX_NESTED_OVERFLOW, 0, 0, 0, 0, 0, 0); } else { mutex_ptr->owner_nested++; } RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (raw_task_active->bpriority < mutex_ptr->ceiling_prio) { /* Violation of highest priority limit */ RAW_CRITICAL_EXIT(); TRACE_MUTEX_EX_CE_PRI(raw_task_active, mutex_ptr, wait_option); return RAW_EXCEED_CEILING_PRIORITY; } } mtxtsk = mutex_ptr->mtxtsk; if (mtxtsk == 0) { /* Get lock */ mutex_ptr->mtxtsk = raw_task_active; mutex_ptr->mtxlist = raw_task_active->mtxlist; raw_task_active->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (raw_task_active->priority > mutex_ptr->ceiling_prio) { /* Raise its own task to the highest priority limit */ change_internal_task_priority(raw_task_active, mutex_ptr->ceiling_prio); } } RAW_CRITICAL_EXIT(); TRACE_MUTEX_GET(raw_task_active, mutex_ptr, wait_option); return RAW_SUCCESS; } /*Cann't get mutex, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*system is locked so task can not be blocked just return immediately*/ SYSTEM_LOCK_PROCESS(); /*if current task is a higher priority task and block on the mutex *priority inverse condition happened, priority inherit method is used here*/ if (mutex_ptr->policy == RAW_MUTEX_INHERIT_POLICY) { if (raw_task_active->priority < mtxtsk->priority) { TRACE_TASK_PRI_INV(raw_task_active, mtxtsk); change_internal_task_priority(mtxtsk, raw_task_active->priority); } } /*Any way block the current task*/ raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)mutex_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_MUTEX_GET_BLOCK(raw_task_active, mutex_ptr, wait_option); /*find the next highest priority task ready to run*/ raw_sched(); /*So the task is waked up, need know which reason cause wake up.*/ error_status = block_state_post_process(raw_task_active, 0); return error_status; }
RAW_U16 msg_post(RAW_QUEUE *p_q, RAW_VOID *p_void, RAW_U8 opt_send_method, RAW_U8 opt_wake_all) { LIST *block_list_head; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; if (p_q->msg_q.current_numbers >= p_q->msg_q.size) { RAW_CRITICAL_EXIT(); TRACE_QUEUE_MSG_MAX(raw_task_active, p_q, p_void, opt_send_method); return RAW_MSG_MAX; } /*Queue is not full here, if there is no blocked receive task*/ if (is_list_empty(block_list_head)) { p_q->msg_q.current_numbers++; /*update peak_numbers for debug*/ if (p_q->msg_q.current_numbers > p_q->msg_q.peak_numbers) { p_q->msg_q.peak_numbers = p_q->msg_q.current_numbers; } if (opt_send_method == SEND_TO_END) { *p_q->msg_q.write++ = p_void; if (p_q->msg_q.write == p_q->msg_q.queue_end) { p_q->msg_q.write = p_q->msg_q.queue_start; } } else { /* Wrap read pointer to end if we are at the 1st queue entry */ if (p_q->msg_q.read == p_q->msg_q.queue_start) { p_q->msg_q.read = p_q->msg_q.queue_end; } p_q->msg_q.read--; *p_q->msg_q.read = p_void; /* Insert message into queue */ } RAW_CRITICAL_EXIT(); /*if queue is registered with notify function just call it*/ if (p_q->queue_send_notify) { p_q->queue_send_notify(p_q); } TRACE_QUEUE_MSG_POST(raw_task_active, p_q, p_void, opt_send_method); return RAW_SUCCESS; } /*wake all the task blocked on this queue*/ if (opt_wake_all) { while (!is_list_empty(block_list_head)) { wake_send_msg(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void); TRACE_QUEUE_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all); } } /*wake hignhest priority task blocked on this queue and send msg to it*/ else { wake_send_msg(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void); TRACE_QUEUE_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all); } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Release a mutex * * Description: This function is called to release a mutex. * * Arguments :mutex_ptr is the address of the mutex object want to be released * * * * Returns RAW_SUCCESS: raw os return success * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_put(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_TASK_OBJ *tcb; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*Must release the mutex by self*/ if (raw_task_active != mutex_ptr->mtxtsk) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_NOT_RELEASE_BY_OCCYPY; } mutex_ptr->owner_nested--; if (mutex_ptr->owner_nested) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } release_mutex(raw_task_active, mutex_ptr); block_list_head = &mutex_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { /* No wait task */ mutex_ptr->mtxtsk = 0; RAW_CRITICAL_EXIT(); TRACE_MUTEX_RELEASE_SUCCESS(raw_task_active, mutex_ptr); return RAW_SUCCESS; } /* there must have task blocked on this mutex object*/ tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); /*Wake up the occupy task, which is the highst priority task on the list*/ raw_wake_object(tcb); /* Change mutex get task */ mutex_ptr->mtxtsk = tcb; mutex_ptr->mtxlist = tcb->mtxlist; tcb->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (tcb->priority > mutex_ptr->ceiling_prio) { /* Raise the priority of the task that got lock to the highest priority limit */ change_internal_task_priority(tcb, mutex_ptr->ceiling_prio); } } TRACE_MUTEX_WAKE_TASK(raw_task_active, tcb); RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Receive a msg * * Description: This function is called to receive a msg * * Arguments :p_q is the address of the queue object * ----- * msg is the address of a point, and this pointer contains address of the msg. * ----- * wait_option: is how the service behaves if the msg queue is full. * The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * * Returns * RAW_SUCCESS: raw os return success * RAW_BLOCK_DEL: if this queue is deleted. * RAW_BLOCK_TIMEOUT: queue is still full during waiting time when sending msg. * RAW_BLOCK_ABORT:queue is aborted during waiting time when sending msg. * RAW_STATE_UNKNOWN: possibly system error. * Note(s) if no msg received then msg will get null pointer(0). ISR can call this function if only wait_option equal RAW_NO_WAIT. * * ************************************************************************************************************************ */ RAW_U16 raw_queue_receive(RAW_QUEUE *p_q, RAW_TICK_TYPE wait_option, RAW_VOID **msg) { RAW_VOID *pmsg; RAW_U16 result; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (raw_int_nesting && (wait_option != RAW_NO_WAIT)) { return RAW_NOT_CALLED_BY_ISR; } if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if queue has msgs, just receive it*/ if (p_q->msg_q.current_numbers) { pmsg = *p_q->msg_q.read++; if (p_q->msg_q.read == p_q->msg_q.queue_end) { /*wrap around to start*/ p_q->msg_q.read = p_q->msg_q.queue_start; } *msg = pmsg; p_q->msg_q.current_numbers--; RAW_CRITICAL_EXIT(); TRACE_QUEUE_GET_MSG(raw_task_active, p_q, wait_option, *msg); return RAW_SUCCESS; } if (wait_option == RAW_NO_WAIT) { *msg = (RAW_VOID *)0; RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*if system is locked, block operation is not allowed*/ SYSTEM_LOCK_PROCESS_QUEUE(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)p_q, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_QUEUE_GET_BLOCK(raw_task_active, p_q, wait_option); raw_sched(); *msg = (RAW_VOID *)0; result = block_state_post_process(raw_task_active, msg); return result; }
/* ************************************************************************************************************************ * Get an event * * Description: This service retrieves event flags from the specified event flags group. * Each event flags group contains 32 event flags. Each flag is represented * by a single bit. This service can retrieve a variety of event flag * combinations, as selected by the input parameters. * * Arguments :event_ptr: is the address of event object * ----- * requested_flags: is the 32-bit unsigned variable that represents the requested event flags. * ----- * get_option: is the option specifies whether all or any of the requested event flags are required. The following are valid * selections: * RAW_AND * RAW_AND_CLEAR * RAW_OR * RAW_OR_CLEAR * Selecting RAW_AND or RAW_AND_CLEAR * specifies that all event flags must be present in * the group. Selecting RAW_OR or RAW_OR_CLEAR * specifies that any event flag is satisfactory. Event * flags that satisfy the request are cleared (set to * zero) if RAW_AND_CLEAR or RAW_OR_CLEAR are specified. * ----- * wait_option: is how the service behaves if the selected * event flags are not set. The wait options are * defined as follows: * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * actual_flags_ptr:will be filled the actual flags when the function is returned. * * Returns RAW_SUCCESS : Get event success. * RAW_BLOCK_ABORT: event is aborted by other task or ISR. * RAW_NO_PEND_WAIT: event is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: system is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this event is deleted * Note(s) :RAW_STATE_UNKNOWN wrong task state, probally sysytem error. * * ************************************************************************************************************************ */ RAW_U16 raw_event_get(RAW_EVENT *event_ptr, RAW_U32 requested_flags, RAW_U8 get_option, RAW_U32 *actual_flags_ptr, RAW_TICK_TYPE wait_option) { RAW_U16 error_status; RAW_U8 status; RAW_SR_ALLOC(); #if (RAW_EVENT_FUNCTION_CHECK > 0) if (event_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } if ((get_option != RAW_AND) && (get_option != RAW_OR) && (get_option != RAW_AND_CLEAR) && (get_option != RAW_OR_CLEAR)) { return RAW_NO_THIS_OPTION; } #endif RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*if option is and flag*/ if (get_option & RAW_FLAGS_AND_MASK) { if ((event_ptr->flags & requested_flags) == requested_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } /*if option is or flag*/ else { if (event_ptr->flags & requested_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } if (status == RAW_TRUE) { *actual_flags_ptr = event_ptr->flags; /*does it need to clear the flags*/ if (get_option & RAW_FLAGS_CLEAR_MASK) { event_ptr->flags &= ~requested_flags; } RAW_CRITICAL_EXIT(); TRACE_EVENT_GET(raw_task_active, event_ptr); return RAW_SUCCESS; } /*Cann't get event, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } /*system is locked so task can not be blocked just return immediately*/ SYSTEM_LOCK_PROCESS(); /*Remember the passed information*/ raw_task_active->raw_suspend_option = get_option; raw_task_active->raw_suspend_flags = requested_flags; raw_task_active->raw_additional_suspend_info = actual_flags_ptr; raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)event_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_EVENT_GET_BLOCK(raw_task_active, event_ptr, wait_option); raw_sched(); /*So the task is waked up, need know which reason cause wake up.*/ error_status = block_state_post_process(raw_task_active, 0); /*if it is not successed then we do not need to clear the flags just return the error status*/ if (error_status != RAW_SUCCESS) { return error_status; } RAW_CRITICAL_ENTER(); /*does it need to clear the flags*/ if (get_option & RAW_FLAGS_CLEAR_MASK) { event_ptr->flags &= ~requested_flags; } RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_U16 event_set(RAW_EVENT *event_ptr, RAW_U32 flags_to_set, RAW_U8 set_option) { LIST *iter; LIST *event_head_ptr; LIST *iter_temp; RAW_TASK_OBJ *task_ptr; RAW_U8 status; RAW_SR_ALLOC(); status = RAW_FALSE; RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } event_head_ptr = &event_ptr->common_block_obj.block_list; /*if the set_option is AND_MASK, it just clear the flags and will return immediately!*/ if (set_option & RAW_FLAGS_AND_MASK) { event_ptr->flags &= flags_to_set; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; } /*if it is or mask then set the flag and continue.........*/ else { event_ptr->flags |= flags_to_set; } iter = event_head_ptr->next; /*if list is not empty*/ while (iter != event_head_ptr) { task_ptr = list_entry(iter, RAW_TASK_OBJ, task_list); iter_temp = iter->next; if (task_ptr->raw_suspend_option & RAW_FLAGS_AND_MASK) { if ((event_ptr->flags & task_ptr ->raw_suspend_flags) == task_ptr ->raw_suspend_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } else { if (event_ptr->flags & task_ptr ->raw_suspend_flags) { status = RAW_TRUE; } else { status = RAW_FALSE; } } if (status == RAW_TRUE) { (*(RAW_U32 *)(task_ptr->raw_additional_suspend_info)) = event_ptr->flags; /*Ok the task condition is met, just wake this task*/ raw_wake_object(task_ptr); TRACE_EVENT_WAKE(raw_task_active, task_ptr); } iter = iter_temp; } RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }