Ejemplo n.º 1
0
void idle_tick_isr(void) 
{

	ACTIVE_EVENT_STRUCT *a;
	
	LIST *head;
	LIST *iter;
	LIST *iter_temp;

	head = &raw_idle_tick_head;
	iter = head->next;

	/*if list is not empty*/
 	while (iter != head) {

		a =  raw_list_entry(iter, ACTIVE_EVENT_STRUCT, idle_tick_list);
		iter_temp =  iter->next;

		if (a->tick_ctr) {
			--a->tick_ctr;
			
			if (a->tick_ctr == 0) {
				list_delete(iter);
				idle_event_end_post(a, STM_TIMEOUT_SIG, 0);
			}
		}
		
		iter = iter_temp;
 	}

	
}
Ejemplo n.º 2
0
static void timer_list_priority_insert(LIST   *head, RAW_TIMER *timer_ptr)
{
	RAW_TICK_TYPE val;
	
	LIST *q, *list_start, *list_end;
	
	RAW_TIMER  *task_iter_temp;

	list_start = list_end = head;
	val = timer_ptr->remain;
	
	for (q = list_start->next; q != list_end; q = q->next) {

		task_iter_temp = raw_list_entry(q, RAW_TIMER, timer_list);

		/*sorted by remain time*/
		
		if ((task_iter_temp->match - raw_timer_count) > val) {
			break;
		}
	}

	list_insert(q, &timer_ptr->timer_list);

}
Ejemplo n.º 3
0
void calculate_time_slice(RAW_U8 task_prio)
{
	RAW_TASK_OBJ   *task_ptr;
	LIST *head;

	RAW_SR_ALLOC();

	head = &raw_ready_queue.task_ready_list[task_prio];
	 
	RAW_CRITICAL_ENTER();
	
	/*if ready list is empty then just return because nothing is to be caculated*/                       
	if (is_list_empty(head)) {

		RAW_CRITICAL_EXIT();
		return;
	}

	/*Always look at the first task on the ready list*/
	task_ptr = raw_list_entry(head->next, RAW_TASK_OBJ, task_list);

	/*SCHED_FIFO does not has timeslice, just return*/
	if (task_ptr->sched_way == SCHED_FIFO) {
		
		RAW_CRITICAL_EXIT();
		return;
	}

	/*there is only one task on this ready list, so do not need to caculate time slice*/
	/*idle task must satisfy this condition*/
	if (head->next->next == head)  {
		
		RAW_CRITICAL_EXIT();
		return;
		
	}

	if (task_ptr->time_slice) {
		task_ptr->time_slice--;
	}

	/*if current active task has time_slice, just return*/
	if (task_ptr->time_slice) {               
		RAW_CRITICAL_EXIT();
		return;
	}

	/*Move current active task to the end of ready list for the same priority*/
	move_to_ready_list_end(&raw_ready_queue, task_ptr);

	/*restore the task time slice*/ 
	task_ptr->time_slice = task_ptr->time_total;  
	
	RAW_CRITICAL_EXIT();
}
Ejemplo n.º 4
0
/*
 * Release the lock and delete it from list, and then adjust the
 * priority of task.
 * Set the highest priority between listed below:
 *	(A) The highest priority in all mutexes in which 'tcb' task locks.
 *	(B) The base priority of 'tcb' task.
 */
static void release_mutex(RAW_TASK_OBJ *tcb, RAW_MUTEX *relmtxcb)
{
	RAW_MUTEX	*mtxcb, **prev;
	RAW_U8	newpri, pri;
	RAW_TASK_OBJ *first_block_task;
	LIST *block_list_head;
	
	/* (B) The base priority of task */
	newpri = tcb->bpriority;

	/* (A) The highest priority in mutex which is locked */
	pri = newpri;
	prev = &tcb->mtxlist;
	while ((mtxcb = *prev) != 0) {
		if (mtxcb == relmtxcb) {
			/* Delete self from list and tcb->mtxlist point to next*/
			*prev = mtxcb->mtxlist;
			continue;
		}

		switch (mtxcb->policy) {
		  case RAW_MUTEX_CEILING_POLICY:
			pri = mtxcb->ceiling_prio;
			break;
			
		  case RAW_MUTEX_INHERIT_POLICY:
		  	
		  	block_list_head = &mtxcb->common_block_obj.block_list;
			
			if (!is_list_empty(block_list_head)) {
				first_block_task = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); 
				pri = first_block_task->priority;
			}
			
			break;
			
		  default:
			break;
		}
		if (newpri > pri) {
			newpri = pri;
		}

		prev = &mtxcb->mtxlist;
	}

	if ( newpri != tcb->priority ) {
		/* Change priority of lock get task */
		change_internal_task_priority(tcb, newpri);

		TRACE_MUTEX_RELEASE(raw_task_active, tcb, newpri);
	}
	
}
Ejemplo n.º 5
0
RAW_U16 raw_event_delete(RAW_EVENT *event_ptr)
{
	LIST *block_list_head;

	RAW_SR_ALLOC();

	#if (RAW_EVENT_FUNCTION_CHECK > 0)

	if (event_ptr == 0) {
		return RAW_NULL_OBJECT;
	}

	if (raw_int_nesting) {
		
		return RAW_NOT_CALLED_BY_ISR;
		
	}

	#endif

	RAW_CRITICAL_ENTER();

	if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &event_ptr->common_block_obj.block_list;
	
	event_ptr->common_block_obj.object_type = 0u;
	/*All task blocked on this queue is waken up until list is empty*/
	while (!is_list_empty(block_list_head)) {
		
		delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));	
	}    

	event_ptr->flags = 0u;

	RAW_CRITICAL_EXIT();

	TRACE_EVENT_DELETE(raw_task_active, event_ptr);

	raw_sched();  

	return RAW_SUCCESS;
}
Ejemplo n.º 6
0
RAW_OS_ERROR raw_queue_delete(RAW_QUEUE *p_q)
{
	LIST  *block_list_head;
	
	RAW_SR_ALLOC();

	#if (RAW_QUEUE_FUNCTION_CHECK > 0)

	if (p_q == 0) {
		
		return RAW_NULL_OBJECT;
	}

	if (raw_int_nesting) {
		
		return RAW_NOT_CALLED_BY_ISR;
		
	}
	
	#endif
	
	RAW_CRITICAL_ENTER();

	if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) {
		
		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &p_q->common_block_obj.block_list;
	
	p_q->common_block_obj.object_type = 0u;
	
	/*All task blocked on this queue is waken up*/
	while (!is_list_empty(block_list_head))  {
		delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));	
	}                             
	
	RAW_CRITICAL_EXIT();

	TRACE_QUEUE_DELETE(raw_task_active, p_q);

	raw_sched(); 
	
	return RAW_SUCCESS;
	
}
Ejemplo n.º 7
0
RAW_OS_ERROR raw_mutex_delete(RAW_MUTEX *mutex_ptr)
{
	LIST *block_list_head;
	
	RAW_SR_ALLOC();

	#if (RAW_MUTEX_FUNCTION_CHECK > 0)

	if (mutex_ptr == 0) {
		return RAW_NULL_OBJECT;
	}
	
	#endif
	
	RAW_CRITICAL_ENTER();

	if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) {
		
		RAW_CRITICAL_EXIT();  
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &mutex_ptr->common_block_obj.block_list;
	
	mutex_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE;

	if (mutex_ptr->mtxtsk) {
		release_mutex(mutex_ptr->mtxtsk, mutex_ptr);
	}
	
	/*All task blocked on this mutex is waken up*/
	while (!is_list_empty(block_list_head)) {
		delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));	
	}              

	RAW_CRITICAL_EXIT();

	TRACE_MUTEX_DELETE(raw_task_active, mutex_ptr);
		
	raw_sched(); 
	
	return RAW_SUCCESS;
}
Ejemplo n.º 8
0
void raw_task_free_mutex(RAW_TASK_OBJ *tcb)
{
	RAW_MUTEX	*mtxcb, *next_mtxcb;
	RAW_TASK_OBJ	*next_tcb;
	LIST 				*block_list_head;
	
	next_mtxcb = tcb->mtxlist;
	while ((mtxcb = next_mtxcb) != 0) {
		next_mtxcb = mtxcb->mtxlist;

		block_list_head = &mtxcb->common_block_obj.block_list;
		
		if (!is_list_empty(block_list_head)) {
			
			next_tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list);

			/* Wake wait task */
			raw_wake_object(next_tcb);

			/* Change mutex get task */
			mtxcb->mtxtsk = next_tcb;
			mtxcb->mtxlist = next_tcb->mtxlist;
			next_tcb->mtxlist = mtxcb;

			if (mtxcb->policy == RAW_MUTEX_CEILING_POLICY) {
				if (next_tcb->priority > mtxcb->ceiling_prio) {
					/* Raise the priority for the task
					   that got lock to the highest
					   priority limit */
					change_internal_task_priority(next_tcb, mtxcb->ceiling_prio);
				}
			}
		} 

		else {
			/* No wait task */
			mtxcb->mtxtsk = 0;
		}
		
	}
	
}
Ejemplo n.º 9
0
/*
 * Limit the priority change by mutex at task priority change
 *    1.If the 'tcb' task locks mutex, cannot set lower priority than the
 *	highest priority in all mutexes which hold lock. In such case,
 *	return the highest priority of locked mutex.
 *    2.If mutex with TA_CEILING attribute is locked or waiting to be locked,
 *	cannot set higher priority than the lowest within the highest
 *	priority limit of mutex with TA_CEILING attribute.
 *	In this case, return E_ILUSE.
 *    3.Other than above, return the 'priority'.
 */
RAW_U8 chg_pri_mutex(RAW_TASK_OBJ *tcb, RAW_U8 priority, RAW_OS_ERROR *error)
{
	RAW_MUTEX	*mtxcb;
	RAW_U8	hi_pri, low_pri, pri;
	RAW_TASK_OBJ *first_block_task;
	LIST *block_list_head;
	
	hi_pri  = priority;
	
	/*system highest priority*/
	low_pri = 0u;
	
	mtxcb = (RAW_MUTEX	*)(tcb->block_obj);
	
	if (mtxcb) {

		/*if it is blocked on mutex*/
		if (mtxcb->common_block_obj.object_type == RAW_MUTEX_OBJ_TYPE) {
			
			if (mtxcb->policy == RAW_MUTEX_CEILING_POLICY) {
				pri = mtxcb->ceiling_prio;
				
				if (pri > low_pri) {
					low_pri = pri;
				}
			}
		}
	}

	/* Locked Mutex */
	pri = hi_pri;
	for (mtxcb = tcb->mtxlist; mtxcb != 0; mtxcb = mtxcb->mtxlist) {
		switch (mtxcb->policy) {
			
		  case RAW_MUTEX_CEILING_POLICY:
			pri = mtxcb->ceiling_prio;
			if (pri > low_pri) {
				low_pri = pri;
			}
			break;
			
		  case RAW_MUTEX_INHERIT_POLICY:
		  	
			block_list_head = &mtxcb->common_block_obj.block_list;
			
			if (!is_list_empty(block_list_head)) {
				first_block_task = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); 
				pri = first_block_task->priority;
			}
			
			break;
			
		  default:
			/* nothing to do */
			break;
		}

		/*can not set lower priority than the highest priority in all mutexes which hold lock*/
		if (pri < hi_pri) {
			hi_pri = pri;
		}
	}

	if (priority < low_pri) {
		
		*error = RAW_EXCEED_CEILING_PRIORITY;
		return low_pri;
	}

	*error = RAW_SUCCESS;
	return hi_pri;
}
Ejemplo n.º 10
0
/*
************************************************************************************************************************
*                                       Release a mutex
*
* Description: This function is called to release a mutex.
*
* Arguments  :mutex_ptr is the address of the mutex object want to be released
*                
*                
*				         
* Returns		RAW_SUCCESS: raw os return success
* Note(s)    Any task pended on this semphore will be waked up and will return RAW_B_DEL.
*
*             
************************************************************************************************************************
*/
RAW_OS_ERROR raw_mutex_put(RAW_MUTEX *mutex_ptr)
{

	LIST 				*block_list_head;
	RAW_TASK_OBJ   		*tcb;
	
	RAW_SR_ALLOC();

	#if (RAW_MUTEX_FUNCTION_CHECK > 0)

	if (mutex_ptr == 0) {
		return RAW_NULL_OBJECT;
	}

	if (raw_int_nesting) {

		return RAW_NOT_CALLED_BY_ISR;
	}
	
	#endif

	RAW_CRITICAL_ENTER();

	if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) {
		
		RAW_CRITICAL_EXIT();  
		return RAW_ERROR_OBJECT_TYPE;
	}


	/*Must release the mutex by self*/
	if (raw_task_active != mutex_ptr->mtxtsk) {           
		RAW_CRITICAL_EXIT();
		return RAW_MUTEX_NOT_RELEASE_BY_OCCYPY;
	}

	mutex_ptr->owner_nested--;

	if (mutex_ptr->owner_nested) {

		RAW_CRITICAL_EXIT();
		return RAW_MUTEX_OWNER_NESTED;

	}

	release_mutex(raw_task_active, mutex_ptr);


	block_list_head = &mutex_ptr->common_block_obj.block_list;

	/*if no block task on this list just return*/
	if (is_list_empty(block_list_head)) {        
		/* No wait task */
		mutex_ptr->mtxtsk = 0;                                    
		RAW_CRITICAL_EXIT();

		TRACE_MUTEX_RELEASE_SUCCESS(raw_task_active, mutex_ptr);
		
		return RAW_SUCCESS;
	}

	
	/* there must have task blocked on this mutex object*/ 																												
	tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list);

	/*Wake up the occupy task, which is the highst priority task on the list*/																										 
	raw_wake_object(tcb);

	/* Change mutex get task */
	mutex_ptr->mtxtsk = tcb;
	mutex_ptr->mtxlist = tcb->mtxlist;
	tcb->mtxlist = mutex_ptr;
	mutex_ptr->owner_nested = 1u;
	
	if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) {
		
		if (tcb->priority > mutex_ptr->ceiling_prio) {
		/* Raise the priority of the task that
		got lock to the highest priority limit */
			change_internal_task_priority(tcb, mutex_ptr->ceiling_prio);
			
		}
		
	}

	TRACE_MUTEX_WAKE_TASK(raw_task_active, tcb);
	
	RAW_CRITICAL_EXIT();

	raw_sched();                                       

	return RAW_SUCCESS;
	
}
Ejemplo n.º 11
0
RAW_U16 event_set(RAW_EVENT *event_ptr, RAW_U32 flags_to_set, RAW_U8 set_option)
{

	LIST *iter;
	LIST *event_head_ptr;
	LIST *iter_temp;
	RAW_TASK_OBJ *task_ptr;
	
	RAW_U8 status;
	RAW_U32 current_event_flags;
	
	RAW_SR_ALLOC();

	status = RAW_FALSE;
	
	RAW_CRITICAL_ENTER();

	if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		
		return RAW_ERROR_OBJECT_TYPE;
	}

	event_head_ptr = &event_ptr->common_block_obj.block_list;

	/*if the set_option is AND_MASK, it just clear the flags and will return immediately!*/
	if (set_option & RAW_FLAGS_AND_MASK)  {

		event_ptr->flags &= flags_to_set;

		RAW_CRITICAL_EXIT();
		return RAW_SUCCESS;
	}
	
	/*if it is or mask then set the flag and continue.........*/
	else  {

		event_ptr->flags |= flags_to_set;    
	}

	current_event_flags = event_ptr->flags;
	iter = event_head_ptr->next;

	/*if list is not empty*/
 	while (iter != event_head_ptr) {

		task_ptr =  raw_list_entry(iter, RAW_TASK_OBJ, task_list);
		iter_temp =  iter->next;
		
		if (task_ptr->raw_suspend_option & RAW_FLAGS_AND_MASK)  {

			if ((current_event_flags  & task_ptr ->raw_suspend_flags) == task_ptr ->raw_suspend_flags) {
				status =  RAW_TRUE;
			}
			
			else {
				status =   RAW_FALSE;
			}
		}

		
		else {

			if (current_event_flags  &  task_ptr ->raw_suspend_flags) {
				
				status =  RAW_TRUE;
			}
			
			else {
				status =  RAW_FALSE;
			}
		}

		
		if (status == RAW_TRUE) {

			(*(RAW_U32 *)(task_ptr->raw_additional_suspend_info)) = current_event_flags;
			
			/*Ok the task condition is met, just wake this task*/
			raw_wake_object(task_ptr);

			/*does it need to clear the flags*/
			if (task_ptr->raw_suspend_option & RAW_FLAGS_CLEAR_MASK) {

				event_ptr->flags &= ~(task_ptr ->raw_suspend_flags);
			}

			TRACE_EVENT_WAKE(raw_task_active, task_ptr);

		}

		iter = iter_temp;

 	}

	RAW_CRITICAL_EXIT();

	raw_sched();
	
	return RAW_SUCCESS;

}
Ejemplo n.º 12
0
RAW_OS_ERROR msg_size_post(RAW_QUEUE_SIZE *p_q, RAW_MSG_SIZE *p_void,  MSG_SIZE_TYPE size,  RAW_U8 opt_send_method, RAW_U8 opt_wake_all)             
{
	
	LIST *block_list_head;
	
	RAW_MSG_SIZE *msg_temp;
	RAW_MSG_SIZE *p_msg_in; 
	
 	RAW_SR_ALLOC();
	
	RAW_CRITICAL_ENTER();

	if (p_q->common_block_obj.object_type != RAW_QUEUE_SIZE_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &p_q->common_block_obj.block_list;

	/*queue is full condition!*/
	if (p_q->queue_current_msg >= p_q->queue_msg_size) {  
	
		RAW_CRITICAL_EXIT();

		TRACE_QUEUE_SIZE_MSG_MAX(raw_task_active, p_q, p_void, size, opt_send_method);

		if (p_q->queue_size_full_callback) {

			p_q->queue_size_full_callback(p_q, p_void, size);
		}
		
		return RAW_MSG_MAX;
		
	}

	/*Queue is not full here, If there is no blocked receive task*/
	if (is_list_empty(block_list_head)) {        

		/*delete msg from free msg list*/
		msg_temp             = p_q->free_msg;                  
		p_q->free_msg 	= 	msg_temp->next;
		
		 /* If it is the first message placed in the queue*/
		if (p_q->queue_current_msg == 0) {            
			p_q->write         = msg_temp;                    
			p_q->read        = msg_temp;
			
		} 
		
		else {


			if (opt_send_method == SEND_TO_END)  {

				p_msg_in           = p_q->write;           
				p_msg_in->next  = msg_temp;
				msg_temp->next = 0;
				p_q->write     = msg_temp;

			}

			else {

				msg_temp->next = p_q->read;          
				p_q->read = msg_temp;                 

			}
			
			
		}

		p_q->queue_current_msg++;

		if (p_q->queue_current_msg > p_q->peak_numbers) {

			p_q->peak_numbers = p_q->queue_current_msg;
		}
		
		/*Assign value to msg*/
		msg_temp->msg_ptr = p_void;                               
		msg_temp->msg_size = size;
		
		RAW_CRITICAL_EXIT();

		TRACE_QUEUE_SIZE_MSG_POST(raw_task_active, p_q, p_void, size, opt_send_method);
		
		return RAW_SUCCESS;
	}

	/*wake all the task blocked on this queue*/
	if (opt_wake_all) {

		while (!is_list_empty(block_list_head)) {
			wake_send_msg_size(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list),  p_void, size);

			TRACE_QUEUE_SIZE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size, opt_wake_all);
		}
	}
	
	/*wake hignhest priority task blocked on this queue and send msg to it*/
	else {
		
		wake_send_msg_size(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list),  p_void, size);

		TRACE_QUEUE_SIZE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, size, opt_wake_all);
	}
	
	RAW_CRITICAL_EXIT();

	raw_sched();    
	return RAW_SUCCESS;
}
Ejemplo n.º 13
0
RAW_OS_ERROR msg_post(RAW_QUEUE *p_q, void *p_void, RAW_U8 opt_send_method, RAW_U8 opt_wake_all)             
{
	LIST *block_list_head;

 	RAW_SR_ALLOC();

	RAW_CRITICAL_ENTER();

	if (p_q->common_block_obj.object_type != RAW_QUEUE_OBJ_TYPE) {
		
		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &p_q->common_block_obj.block_list;
	
	if (p_q->msg_q.current_numbers >= p_q->msg_q.size) {  

		RAW_CRITICAL_EXIT();
		
		TRACE_QUEUE_MSG_MAX(raw_task_active, p_q, p_void, opt_send_method); 
		
		return RAW_MSG_MAX;
	}


	/*Queue is not full here, if there is no blocked receive task*/
	if (is_list_empty(block_list_head)) {        

		p_q->msg_q.current_numbers++;                                

		/*update peak_numbers for debug*/
		if (p_q->msg_q.current_numbers > p_q->msg_q.peak_numbers) {

			p_q->msg_q.peak_numbers = p_q->msg_q.current_numbers;
		}
		
		if (opt_send_method == SEND_TO_END)  {

			*p_q->msg_q.write++ = p_void;                              

			if (p_q->msg_q.write == p_q->msg_q.queue_end) {   
				
				p_q->msg_q.write = p_q->msg_q.queue_start;
				
			}   

		}

		else {

			 /* Wrap read pointer to end if we are at the 1st queue entry */
			if (p_q->msg_q.read == p_q->msg_q.queue_start) {                
	        	p_q->msg_q.read = p_q->msg_q.queue_end;
	    	}
			
			p_q->msg_q.read--;
			*p_q->msg_q.read = p_void;                               /* Insert message into queue                     */
			
		}
		
		RAW_CRITICAL_EXIT();

		/*if queue is registered with notify function just call it*/		
		if (p_q->queue_send_notify) {

			p_q->queue_send_notify(p_q);	
		}

		TRACE_QUEUE_MSG_POST(raw_task_active, p_q, p_void, opt_send_method); 
		
		return RAW_SUCCESS;
	}

	/*wake all the task blocked on this queue*/
	if (opt_wake_all) {

		while (!is_list_empty(block_list_head)) {
			
			wake_send_msg(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list),  p_void);	
			
			TRACE_QUEUE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all);
			
		}
		
	}
	
	/*wake hignhest priority task blocked on this queue and send msg to it*/
	else {
		
		wake_send_msg(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list),  p_void);
		
		TRACE_QUEUE_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, opt_wake_all);

	}
	
	RAW_CRITICAL_EXIT();

	raw_sched();    
	return RAW_SUCCESS;
}
Ejemplo n.º 14
0
/*
************************************************************************************************************************
*                                    Timer task 
*
* Description: This function is called to  start a timer task.
*
* Arguments  :pa is the parameters to task.
*                    -----
*                
*				         
*				         
* Returns   
*				   
* Note(s) :This function is called by internal, users shoud not touch this function.
*
*             
************************************************************************************************************************
*/
void timer_task(void *pa) 
{
	LIST 								*timer_head_ptr;
	LIST 								*iter;
	LIST 								*iter_temp;
	RAW_TIMER							*timer_ptr;
	RAW_OS_ERROR                         mutex_ret;
	RAW_U16                              callback_ret;
	
	/*reset the timer_sem count since it may not be 0 at this point, make it start here*/
	raw_semaphore_set(&timer_sem, 0);
	pa = pa;
	
	while (1) {
		
		/*timer task will be blocked after call this function*/
		raw_semaphore_get(&timer_sem, RAW_WAIT_FOREVER);

		mutex_ret = raw_mutex_get(&timer_mutex, RAW_WAIT_FOREVER);
		RAW_ASSERT(mutex_ret == RAW_SUCCESS);

		/*calculate which  timer_head*/
		raw_timer_count++;                                          
	
		timer_head_ptr  = &timer_head;

		iter = timer_head_ptr->next;
		
		while (RAW_TRUE) {

			/*if timer exits*/	
			if (iter != timer_head_ptr) {

				/*Must use iter_temp because iter may be remove later.*/
				iter_temp = iter->next;
				timer_ptr =  raw_list_entry(iter, RAW_TIMER, timer_list);

				/*if timeout*/
				if (raw_timer_count == timer_ptr->match) {  

					/*remove from timer list*/
					timer_list_remove(timer_ptr);

					/*if timer is reschedulable*/			
					if (timer_ptr->reschedule_ticks) {

						/*Sort by remain time*/
						timer_ptr->remain = timer_ptr->reschedule_ticks;

						timer_ptr->match  = raw_timer_count + timer_ptr->remain;
						timer_ptr->to_head = &timer_head;
						timer_list_priority_insert(&timer_head, timer_ptr);
					          
					} 

					else {

						timer_ptr->timer_state = TIMER_DEACTIVE;

					}

					/*Any way both condition need to call registered timer function*/
					/*the registered timer function should not touch any timer related API,otherwise system will be crashed*/
					if (timer_ptr->raw_timeout_function) {

						callback_ret = timer_ptr->raw_timeout_function(timer_ptr->raw_timeout_param);
						if ((callback_ret == TIMER_CALLBACK_STOP) && (timer_ptr->timer_state != TIMER_DEACTIVE)) {
							/*remove from timer list*/
							timer_list_remove(timer_ptr);
							timer_ptr->timer_state = TIMER_DEACTIVE;
						}
					         
					}

					iter  = iter_temp; 

				} 

				else { 

					break;	
				}

			}

			/*exit because timer is not exit*/		
			else {

				break;
			}

		}

		raw_mutex_put(&timer_mutex);

	}


}
Ejemplo n.º 15
0
void hybrid_int_process(void)
{
	TASK_0_EVENT_TYPE hybrid_ev;
	RAW_U8 *hybrid_data_temp;
	EVENT_HANLDER *hybrid_receiver;
	LIST *hybrid_node;
	RAW_U8 hybrid_highest_pri;

	RAW_SR_ALLOC();
	
	register RAW_U8 hybrid_task_may_switch = 0u;

	while (1) {
		
		USER_CPU_INT_DISABLE(); 
		
		if (task_0_events) {

			/*current running task can never be task 0*/
			if (raw_int_nesting) {

				raw_sched_lock = 0;
				USER_CPU_INT_ENABLE();
				return;
			}

			else {

				--task_0_events;
				/* There are events that we should deliver. */
				hybrid_ev = task_0_events_queue[task_0_event_end].ev;
				hybrid_data_temp = task_0_events_queue[task_0_event_end].event_data;
				hybrid_receiver = task_0_events_queue[task_0_event_end].p;

				task_0_event_end++;

				if (task_0_event_end == MAX_TASK_EVENT) {                  
					task_0_event_end = 0;
				}
				
				USER_CPU_INT_ENABLE();

				/*exceute the event handler*/
				hybrid_receiver->handle_event(hybrid_ev, hybrid_data_temp);
				hybrid_task_may_switch = 1;
			}
			
		}

		else {
			
			raw_sched_lock = 0;

			if (hybrid_task_may_switch) {
				
				hybrid_highest_pri = raw_ready_queue.highest_priority;
				/*Highest priority task must be the first element on the list*/
				hybrid_node = raw_ready_queue.task_ready_list[hybrid_highest_pri].next;

				/*Get the highest priority task object*/
				high_ready_obj = raw_list_entry(hybrid_node, RAW_TASK_OBJ, task_list);

				/*if highest task is currently task, then no need to do switch and just return*/
				if (high_ready_obj == raw_task_active) { 
					
					USER_CPU_INT_ENABLE();                                     
					return;

				}

				CONTEXT_SWITCH();
			}
			
			USER_CPU_INT_ENABLE();
			return;

		}

	}
	
		
}
Ejemplo n.º 16
0
RAW_OS_ERROR semaphore_put(RAW_SEMAPHORE *semaphore_ptr, RAW_U8 opt_wake_all)
{
	LIST *block_list_head;
	
	RAW_SR_ALLOC();
	
	RAW_CRITICAL_ENTER();

	if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &semaphore_ptr->common_block_obj.block_list;
	
	/*if no block task on this list just return*/
	if (is_list_empty(block_list_head)) {        
	    
		if (semaphore_ptr->count == (RAW_PROCESSOR_UINT) - 1) {

			RAW_CRITICAL_EXIT();
			TRACE_SEMAPHORE_OVERFLOW(raw_task_active, semaphore_ptr);
			return RAW_SEMAPHORE_OVERFLOW;

		}
		/*increase resource*/
		semaphore_ptr->count++;

		if (semaphore_ptr->count > semaphore_ptr->peak_count) {

			semaphore_ptr->peak_count = semaphore_ptr->count;
		}	
	    
		RAW_CRITICAL_EXIT();
		
		/*if semphore is registered with notify function just call it*/		
		if (semaphore_ptr->semphore_send_notify) {

			semaphore_ptr->semphore_send_notify(semaphore_ptr);	
		}

		TRACE_SEMAPHORE_COUNT_INCREASE(raw_task_active, semaphore_ptr);
		return RAW_SUCCESS;
	}

	/*wake all the task blocked on this semphore*/
	if (opt_wake_all) {

		while (!is_list_empty(block_list_head)) {
			
			raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));

			
			TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all);
			
		}

	}

	else {
		
		/*Wake up the highest priority task block on the semaphore*/
		raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));

		TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all);
		
	}
	
	RAW_CRITICAL_EXIT();

	raw_sched();    

	return RAW_SUCCESS;



}
Ejemplo n.º 17
0
/*
************************************************************************************************************************
*                                    Update system tick time
*
* Description: This function is called to update system tick time.
*
* Arguments  :None
*                  
*                
*				         
*				         
* Returns   None
*				   
* Note(s) :This function is called by internal, users shoud not touch this function.
*
*             
************************************************************************************************************************
*/
void tick_list_update(void)
{
	
	LIST     *tick_head_ptr;
	RAW_TASK_OBJ            *p_tcb;
	LIST                            *iter;
	LIST                            *iter_temp;

	RAW_SR_ALLOC();

	RAW_CRITICAL_ENTER();
	
	raw_tick_count++;                                                     
	tick_head_ptr  = &tick_head;
	iter    = tick_head_ptr->next;
	
	while (RAW_TRUE) {

		/*search all the time list if possible*/
		if (iter != tick_head_ptr) {

			iter_temp =  iter->next;
			p_tcb =  raw_list_entry(iter, RAW_TASK_OBJ, tick_list);

			/*Since time list is sorted by remain time, so just campare  the absolute time*/
			if (raw_tick_count == p_tcb->tick_match) {
			
				switch (p_tcb->task_state) {
					case RAW_DLY:
						
						p_tcb->block_status = RAW_B_OK; 
						p_tcb->task_state = RAW_RDY;  
						tick_list_remove(p_tcb);
						add_ready_list(&raw_ready_queue, p_tcb);
						break; 

					case RAW_PEND_TIMEOUT:
						
						tick_list_remove(p_tcb);
						/*remove task on the block list because task is timeout*/
						list_delete(&p_tcb->task_list); 
						add_ready_list(&raw_ready_queue, p_tcb);
						p_tcb->block_status = RAW_B_TIMEOUT; 
						p_tcb->task_state = RAW_RDY; 
						
						#if (CONFIG_RAW_MUTEX > 0)
						mutex_state_change(p_tcb);
						#endif

						p_tcb->block_obj = 0;
						break;
						
					case RAW_PEND_TIMEOUT_SUSPENDED:

						tick_list_remove(p_tcb);
						/*remove task on the block list because task is timeout*/
						list_delete(&p_tcb->task_list); 
						p_tcb->block_status = RAW_B_TIMEOUT; 
						p_tcb->task_state = RAW_SUSPENDED;  
						
						#if (CONFIG_RAW_MUTEX > 0)
						mutex_state_change(p_tcb);
						#endif
					
						p_tcb->block_obj = 0;
						break;
					 
					case RAW_DLY_SUSPENDED:
										      
						p_tcb->task_state  =  RAW_SUSPENDED;
						p_tcb->block_status = RAW_B_OK; 
						tick_list_remove(p_tcb);                   
						break;

					default:
				
						port_system_error_process(RAW_SYSTEM_CRITICAL_ERROR, 0, 0, 0, 0, 0, 0);
						break;
											
				}

				iter  = iter_temp;
			}

		/*if current task time out absolute time is not equal current system time, just break because timer list is sorted*/
			else {
			
				break;

			}

		}

		
		/*finish all the time list search */ 
		
		else {
			
			break;
		}
		
	}

	RAW_CRITICAL_EXIT();
}