/* ************************************************************************************************************************ * Post an event to active object * * Description: This function is called to post an event to active object (Implemented as FIFO way) * * Arguments :me is the address of this active object * --------- * event is the address of sending event * * Returns * * Note(s) * * ************************************************************************************************************************ */ void active_event_post_end(ACTIVE_OBJECT_STRUCT *me, STATE_EVENT *event) { RAW_OS_ERROR err; RAW_SR_ALLOC(); RAW_CPU_DISABLE(); if (event->which_pool) { event->ref_count++; } RAW_CPU_ENABLE(); err = raw_queue_end_post(&me->active_queue, (void *)event); if (err != RAW_SUCCESS) { RAW_ASSERT(0); } }
/* ************************************************************************************************************************ * Post an event to active object * * Description: This function is called to post an event to active object (Implemented as LIFO way) * * Arguments :me is the address of this active object * --------- * event is the address of sending event * * Returns * * Note(s) * * ************************************************************************************************************************ */ void active_event_post_front(ACTIVE_OBJECT_STRUCT *me, STATE_EVENT *event) { RAW_U16 ret; RAW_SR_ALLOC(); RAW_CPU_DISABLE(); if (event->which_pool) { event->ref_count++; } RAW_CPU_ENABLE(); ret = raw_queue_front_post(&me->active_queue, (void *)event); if (ret != RAW_SUCCESS) { RAW_ASSERT(0); } }
/* ************************************************************************************************************************ * Post an event to a defered queue * * Description: This function is called to post an event to a defered queue. * * Arguments :q is the address of the defered queue * --------- * me is the active object to post * * Returns * * Note(s) * * ************************************************************************************************************************ */ RAW_U16 active_event_recall(ACTIVE_OBJECT_STRUCT *me, RAW_QUEUE *q) { STATE_EVENT *event; RAW_U16 recalled; RAW_U16 err; RAW_SR_ALLOC(); err = raw_queue_receive (q, RAW_NO_WAIT, (RAW_VOID **)&event); if (err == RAW_SUCCESS) { RAW_CPU_DISABLE(); if (event->which_pool) { event->ref_count++; } RAW_CPU_ENABLE(); active_event_post_front(me, event); recalled = 1; } else { recalled = 0; } return recalled; }
/* ************************************************************************************************************************ * Register notify function to semaphore * * Description: This function is called to Register notify function to semphore. * * Arguments :semaphore_ptr is the address of the semphore object * ----- * notify_function is the function to be called whennever put semphore. * * * * Returns * RAW_SUCCESS: raw os return success * * Note(s) This function is normally used to implement pending on multi object function. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_semphore_send_notify(RAW_SEMAPHORE *semaphore_ptr, SEMPHORE_SEND_NOTIFY notify_function) { RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } semaphore_ptr->semphore_send_notify = notify_function; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/*this function can be used in interrupt*/ int process_post(struct process *p, process_event_t ev, process_data_t data) { static process_num_events_t snum; RAW_SR_ALLOC(); if(nevents == PROCESS_CONF_NUMEVENTS) { RAW_ASSERT(0); } RAW_CRITICAL_ENTER(); snum = (process_num_events_t)(fevent + nevents) & (PROCESS_CONF_NUMEVENTS - 1); ++nevents; RAW_CRITICAL_EXIT(); events[snum].ev = ev; events[snum].data = data; events[snum].p = p; if(nevents > process_maxevents) { process_maxevents = nevents; } return PROCESS_ERR_OK; }
void raw_sched(void) { RAW_SR_ALLOC(); /*if it is in interrupt or system is locked, just return*/ if (raw_int_nesting || raw_sched_lock) { return; } USER_CPU_INT_DISABLE(); get_ready_task(&raw_ready_queue); /*if highest task is currently task, then no need to do switch and just return*/ if (high_ready_obj == raw_task_active) { USER_CPU_INT_ENABLE(); return; } TRACE_TASK_SWITCH(raw_task_active, high_ready_obj); CONTEXT_SWITCH(); USER_CPU_INT_ENABLE(); }
static void work_queue_task(void *pa) { RAW_OS_ERROR ret; OBJECT_WORK_QUEUE_MSG *msg_recv; WORK_QUEUE_STRUCT *wq; RAW_SR_ALLOC(); wq = pa; while (1) { ret = raw_queue_receive (&wq->queue, RAW_WAIT_FOREVER, (void **)(&msg_recv)); if (ret != RAW_SUCCESS) { RAW_ASSERT(0); } msg_recv->handler(msg_recv->arg, msg_recv->msg); RAW_CPU_DISABLE(); msg_recv->next = free_work_queue_msg; free_work_queue_msg = msg_recv; RAW_CPU_ENABLE(); } }
/* ************************************************************************************************************************ * Register notify function to queue * * Description: This function is called to Register notify function to queue. * * Arguments :p_q is the address of the queue object * ----- * notify_function is the function to be called whennever send queue data to it. * ----- * * * Returns * RAW_SUCCESS: raw os return success * * Note(s) This function is normally used to implement pending on multi object function. * * ************************************************************************************************************************ */ RAW_U16 raw_queue_send_notify(RAW_QUEUE *p_q, QUEUE_SEND_NOTIFY notify_function) { RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } p_q->queue_send_notify = notify_function; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Get a semaphore * * Description: This function is called to get a semaphore. * * Arguments :semaphore_ptr is the address of semphore object want to be initialized * ----- * wait_option: 0 means return immediately if not get semphore * * RAW_NO_WAIT (0x00000000) * RAW_WAIT_FOREVER (0xFFFFFFFF) * timeout value (0x00000001 * through * 0xFFFFFFFE) * Returns * RAW_SUCCESS : Get semphore success. * RAW_BLOCK_ABORT: semphore is aborted by other task or ISR. * RAW_NO_PEND_WAIT: semphore is not got and option is RAW_NO_WAIT. * RAW_SCHED_DISABLE: semphore is locked ant task is not allowed block. * RAW_BLOCK_DEL: if this mutex is deleted * * Note(s) * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_semaphore_get(RAW_SEMAPHORE *semaphore_ptr, RAW_TICK_TYPE wait_option) { RAW_OS_ERROR error_status; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (semaphore_ptr->count) { semaphore_ptr->count--; RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_GET_SUCCESS(raw_task_active, semaphore_ptr); return RAW_SUCCESS; } /*Cann't get semphore, and return immediately if wait_option is RAW_NO_WAIT*/ if (wait_option == RAW_NO_WAIT) { RAW_CRITICAL_EXIT(); return RAW_NO_PEND_WAIT; } SYSTEM_LOCK_PROCESS(); raw_pend_object((RAW_COMMON_BLOCK_OBJECT *)semaphore_ptr, raw_task_active, wait_option); RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_GET_BLOCK(raw_task_active, semaphore_ptr, wait_option); raw_sched(); error_status = block_state_post_process(raw_task_active, 0); return error_status; }
RAW_U16 raw_queue_get_information(RAW_QUEUE *p_q, RAW_MSG_INFO *msg_information) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (msg_information == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; msg_information->msg_q.peak_numbers = p_q->msg_q.peak_numbers; msg_information->msg_q.current_numbers = p_q->msg_q.current_numbers; msg_information->msg_q.queue_start = p_q->msg_q.queue_start; msg_information->msg_q.queue_end = p_q->msg_q.queue_end; msg_information->msg_q.read = p_q->msg_q.read; msg_information->msg_q.write = p_q->msg_q.write; msg_information->msg_q.size = p_q->msg_q.size; msg_information->suspend_entry = block_list_head->next; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
void calculate_time_slice(RAW_U8 task_prio) { RAW_TASK_OBJ *task_ptr; LIST *head; RAW_SR_ALLOC(); head = &raw_ready_queue.task_ready_list[task_prio]; RAW_CRITICAL_ENTER(); /*if ready list is empty then just return because nothing is to be caculated*/ if (is_list_empty(head)) { RAW_CRITICAL_EXIT(); return; } /*Always look at the first task on the ready list*/ task_ptr = list_entry(head->next, RAW_TASK_OBJ, task_list); /*SCHED_FIFO does not has timeslice, just return*/ if (task_ptr->sched_way == SCHED_FIFO) { RAW_CRITICAL_EXIT(); return; } /*there is only one task on this ready list, so do not need to caculate time slice*/ /*idle task must satisfy this condition*/ if (head->next->next == head) { RAW_CRITICAL_EXIT(); return; } if (task_ptr->time_slice) { task_ptr->time_slice--; } /*if current active task has time_slice, just return*/ if (task_ptr->time_slice) { RAW_CRITICAL_EXIT(); return; } /*Move current active task to the end of ready list for the same priority*/ move_to_ready_list_end(&raw_ready_queue, task_ptr); /*restore the task time slice*/ task_ptr->time_slice = task_ptr->time_total; RAW_CRITICAL_EXIT(); }
RAW_OS_ERROR raw_semaphore_set(RAW_SEMAPHORE *semaphore_ptr, RAW_U32 sem_count) { LIST *block_list_head; RAW_SR_ALLOC(); block_list_head = &semaphore_ptr->common_block_obj.block_list; #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (semaphore_ptr->count) { semaphore_ptr->count = sem_count; } else { if (is_list_empty(block_list_head)) { semaphore_ptr->count = sem_count; } else { RAW_CRITICAL_EXIT(); return RAW_SEMAPHORE_TASK_WAITING; } } RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_OS_ERROR raw_queue_size_get_information(RAW_QUEUE_SIZE *p_q, MSG_SIZE_TYPE *queue_free_msg_size, MSG_SIZE_TYPE *queue_peak_msg_size, MSG_SIZE_TYPE *queue_current_msg) { RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (queue_free_msg_size == 0) { return RAW_NULL_POINTER; } if (queue_current_msg == 0) { return RAW_NULL_POINTER; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } *queue_free_msg_size = p_q->queue_msg_size - p_q->queue_current_msg; *queue_current_msg = p_q->queue_current_msg; *queue_peak_msg_size = p_q->peak_numbers; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
RAW_U16 raw_queue_buffer_get_information(RAW_QUEUE_BUFFER *q_b, RAW_U32 *queue_buffer_free_size, RAW_U32 *queue_buffer_size) { RAW_SR_ALLOC(); #if (RAW_QUEUE_BUFFER_FUNCTION_CHECK > 0) if (q_b == 0) { return RAW_NULL_OBJECT; } if (queue_buffer_free_size == 0) { return RAW_NULL_OBJECT; } if (queue_buffer_size == 0) { return RAW_NULL_OBJECT; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } *queue_buffer_free_size = q_b->frbufsz; *queue_buffer_size = q_b->bufsz; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Release byte memory from pool * * Description: This function is called to allocate memory from pool * * Arguments : block_ptr is the address want to return to memory pool. * --------------------- * * Returns * RAW_SUCCESS: raw os return success * Note(s) This methods will not cause fragmention. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_byte_release(RAW_BYTE_POOL_STRUCT *pool_ptr, void *memory_ptr) { RAW_U8 *work_ptr; /* Working block pointer */ RAW_SR_ALLOC(); #if (RAW_BYTE_FUNCTION_CHECK > 0) if (pool_ptr == 0) { return RAW_NULL_POINTER; } if (memory_ptr == 0) { return RAW_NULL_POINTER; } #endif if (pool_ptr ->common_block_obj.object_type != RAW_BYTE_OBJ_TYPE) { return RAW_ERROR_OBJECT_TYPE; } /* Back off the memory pointer to pickup its header. */ work_ptr = (RAW_U8 *)memory_ptr - sizeof(RAW_U8 *) - sizeof(RAW_U32); /* Disable interrupts. */ RAW_CPU_DISABLE(); /* Indicate that this thread is the current owner. */ pool_ptr->raw_byte_pool_owner = raw_task_active; /* Release the memory.*/ *((RAW_U32 *)(work_ptr + sizeof(RAW_U8 *))) = RAW_BYTE_BLOCK_FREE; /* Update the number of available bytes in the pool. */ pool_ptr->raw_byte_pool_available = pool_ptr->raw_byte_pool_available + (*((RAW_U8 * *)(work_ptr)) - work_ptr); /* Set the pool search value appropriately. */ pool_ptr->raw_byte_pool_search = work_ptr; RAW_CPU_ENABLE(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Check whether queue size obj is full or not * * Description: This function is called to Check whether queue size obj is full or not. * * Arguments :p_q is the address of the queue object * ----- * * * Returns * 1: queue_size obj is full * 0: queue_size obj is not full * *Note(s) * * ************************************************************************************************************************ */ RAW_U16 raw_queue_size_full_check(RAW_QUEUE_SIZE *p_q) { RAW_SR_ALLOC(); RAW_U16 full_check_ret; #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } #endif #if (CONFIG_RAW_ZERO_INTERRUPT > 0) if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (p_q->queue_current_msg >= p_q->queue_msg_size) { full_check_ret = 1u; } else { full_check_ret = 0u; } RAW_CRITICAL_EXIT(); return full_check_ret; }
RAW_OS_ERROR raw_queue_size_flush(RAW_QUEUE_SIZE *p_q) { RAW_MSG_SIZE *p_msg; RAW_SR_ALLOC(); #if (RAW_QUEUE_SIZE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } if (p_q->queue_current_msg) { p_msg = p_q->write; p_msg->next = p_q->free_msg; /*free msg reset to queue read*/ p_q->free_msg = p_q->read; p_q->queue_current_msg = 0; p_q->read = 0; p_q->write = 0; } RAW_CRITICAL_EXIT(); TRACE_QUEUE_SIZE_FLUSH(raw_task_active, p_q); return RAW_SUCCESS; }
RAW_OS_ERROR raw_queue_size_full_register(RAW_QUEUE_SIZE *p_q, QUEUE_SIZE_FULL_CALLBACK callback_full) { RAW_SR_ALLOC(); if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } RAW_CPU_DISABLE(); p_q->queue_size_full_callback = callback_full; RAW_CPU_ENABLE(); return RAW_SUCCESS; }
RAW_U16 raw_event_delete(RAW_EVENT *event_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_EVENT_FUNCTION_CHECK > 0) if (event_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &event_ptr->common_block_obj.block_list; event_ptr->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up until list is empty*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } event_ptr->flags = 0u; RAW_CRITICAL_EXIT(); TRACE_EVENT_DELETE(raw_task_active, event_ptr); raw_sched(); return RAW_SUCCESS; }
void process_poll(struct process *p) { RAW_SR_ALLOC(); if (p) { if(p->state == PROCESS_STATE_RUNNING || p->state == PROCESS_STATE_CALLED) { RAW_CRITICAL_ENTER(); p->needspoll = 1; poll_requested++; RAW_CRITICAL_EXIT(); } } }
/* ************************************************************************************************************************ * Set system time * * Description: This function is called to set system time. * * Arguments :NONE * * * * * * Returns * raw_tick_count: The raw_os time. * Note(s) * ************************************************************************************************************************ */ RAW_OS_ERROR raw_system_time_set(RAW_TICK_TYPE time) { RAW_SR_ALLOC(); if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } RAW_CRITICAL_ENTER(); raw_tick_count = time; RAW_CRITICAL_EXIT(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Finish interrupt * * Description: This function is called to when exit interrupt. * * Arguments :NONE * * * * * * Returns * * Note(s) * ************************************************************************************************************************ */ void raw_finish_int(void) { RAW_SR_ALLOC(); #if (CONFIG_RAW_ISR_STACK_CHECK > 0) /*if you have no idea how to implement this function just write a blank port function*/ port_isr_stack_check(); #endif /*It should not be zero here*/ RAW_ASSERT(raw_int_nesting != 0); USER_CPU_INT_DISABLE(); raw_int_nesting--; /*if still interrupt nested just return */ if (raw_int_nesting) { USER_CPU_INT_ENABLE(); return; } /*if system is locked then just return*/ if (raw_sched_lock) { USER_CPU_INT_ENABLE(); return; } /*get the highest task*/ get_ready_task(&raw_ready_queue); /*if the current task is still the highest task then we do not need to have interrupt switch*/ if (high_ready_obj == raw_task_active) { USER_CPU_INT_ENABLE(); return; } TRACE_INT_TASK_SWITCH(raw_task_active, high_ready_obj); /*switch to the highest task, this function is cpu related, thus need to be ported*/ raw_int_switch(); USER_CPU_INT_ENABLE(); }
RAW_U16 raw_queue_delete(RAW_QUEUE *p_q) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_QUEUE_FUNCTION_CHECK > 0) if (p_q == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &p_q->common_block_obj.block_list; p_q->common_block_obj.object_type = 0u; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_QUEUE_DELETE(raw_task_active, p_q); raw_sched(); return RAW_SUCCESS; }
RAW_OS_ERROR int_msg_post(RAW_U8 type, void *p_obj, void *p_void, MSG_SIZE_TYPE msg_size, RAW_U32 flags, RAW_U8 opt) { OBJECT_INT_MSG *msg_data; RAW_OS_ERROR task_0_post_ret; RAW_SR_ALLOC(); RAW_CPU_DISABLE(); if (free_object_int_msg == 0) { int_msg_full++; RAW_CPU_ENABLE(); TRACE_INT_MSG_EXHAUSTED(); return RAW_INT_MSG_EXHAUSTED; } msg_data = free_object_int_msg; free_object_int_msg->type = type; free_object_int_msg->object = p_obj; free_object_int_msg->msg = p_void; free_object_int_msg->msg_size = msg_size; free_object_int_msg->event_flags = flags; free_object_int_msg->opt = opt; free_object_int_msg = free_object_int_msg->next; RAW_CPU_ENABLE(); /*raw_task_0_post may fail here due to full task0 events*/ task_0_post_ret = raw_task_0_post(&msg_event_handler, type, msg_data); if (task_0_post_ret == RAW_SUCCESS) { TRACE_INT_MSG_POST(type, p_obj, p_void, msg_size, flags, opt); } else { /*if raw_task_0_post fail, free_object_int_msg will be taken back*/ RAW_CPU_DISABLE(); msg_data->next = free_object_int_msg; free_object_int_msg = msg_data; RAW_CPU_ENABLE(); } return task_0_post_ret; }
static void do_poll(void) { struct process *p; RAW_SR_ALLOC(); RAW_CRITICAL_ENTER(); poll_requested--; RAW_CRITICAL_EXIT(); /* Call the processes that needs to be polled. */ for(p = process_list; p != 0; p = p->next) { if(p->needspoll) { p->state = PROCESS_STATE_RUNNING; p->needspoll = 0; call_process(p, PROCESS_EVENT_POLL, 0); } } }
RAW_OS_ERROR raw_semaphore_delete(RAW_SEMAPHORE *semaphore_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_SEMA_FUNCTION_CHECK > 0) if (semaphore_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &semaphore_ptr->common_block_obj.block_list; semaphore_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; /*All task blocked on this queue is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_SEMAPHORE_DELETE(raw_task_active, semaphore_ptr); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Release block memory from pool * * Description: This function is called to release memory from pool * * Arguments : block_ptr is the address want to return to memory pool. * --------------------- * * Returns * RAW_SUCCESS: raw os return success * Note(s) This methods will not cause fragmention. * * ************************************************************************************************************************ */ RAW_U16 raw_block_release(MEM_POOL *pool_ptr, RAW_VOID *block_ptr) { RAW_U8 *work_ptr; RAW_SR_ALLOC(); #if (RAW_BLOCK_FUNCTION_CHECK > 0) if (block_ptr == 0) { return RAW_NULL_OBJECT; } if (pool_ptr == 0) { return RAW_NULL_OBJECT; } #endif if (pool_ptr->common_block_obj.object_type != RAW_BLOCK_OBJ_TYPE) { return RAW_ERROR_OBJECT_TYPE; } RAW_CPU_DISABLE(); work_ptr = ((RAW_U8 *) block_ptr); /* Put the block back in the available list. */ *((RAW_U8 **)work_ptr) = pool_ptr->raw_block_pool_available_list; /* Adjust the head pointer. */ pool_ptr->raw_block_pool_available_list = work_ptr; /* Increment the count of available blocks. */ pool_ptr->raw_block_pool_available++; RAW_CPU_ENABLE(); /* Return completion status. */ return RAW_SUCCESS; }
RAW_OS_ERROR raw_mutex_delete(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &mutex_ptr->common_block_obj.block_list; mutex_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; if (mutex_ptr->mtxtsk) { release_mutex(mutex_ptr->mtxtsk, mutex_ptr); } /*All task blocked on this mutex is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_MUTEX_DELETE(raw_task_active, mutex_ptr); raw_sched(); return RAW_SUCCESS; }
/* ************************************************************************************************************************ * Schedule the specific work queue * * Description: This function is called to schedule the specific work queue * * Arguments :wq is the address of the work queue object * ----- * arg is the argument passed to the handler * ----- * msg is the message passed to the handler * * Returns : RAW_SUCCESS * RAW_WORK_QUEUE_MSG_MAX: need more work_queue_internal_msg. * RAW_MSG_MAX:queue is full. * * Note(s) : This API can be called by interrupt or task. * * ************************************************************************************************************************ */ RAW_OS_ERROR sche_work_queue(WORK_QUEUE_STRUCT *wq, RAW_U32 arg, void *msg, WORK_QUEUE_HANDLER handler) { OBJECT_WORK_QUEUE_MSG *msg_data; RAW_OS_ERROR ret; RAW_SR_ALLOC(); RAW_CPU_DISABLE(); if (free_work_queue_msg == 0) { RAW_CPU_ENABLE(); return RAW_WORK_QUEUE_MSG_MAX; } msg_data = free_work_queue_msg; free_work_queue_msg->arg = arg; free_work_queue_msg->msg = msg; free_work_queue_msg->handler = handler; free_work_queue_msg = free_work_queue_msg->next; RAW_CPU_ENABLE(); ret = raw_queue_end_post(&wq->queue, msg_data); if (ret == RAW_SUCCESS) { } else { RAW_CPU_DISABLE(); msg_data->next = free_work_queue_msg; free_work_queue_msg = msg_data; RAW_CPU_ENABLE(); } return ret; }
RAW_VOID raw_idle_task (void *p_arg) { RAW_SR_ALLOC(); p_arg = p_arg; /* Make compiler happy ^_^ */ while (1) { USER_CPU_INT_DISABLE(); raw_idle_count++; USER_CPU_INT_ENABLE(); #if (CONFIG_RAW_USER_HOOK > 0) raw_idle_coroutine_hook(); #endif } }