示例#1
0
void raw_task_free_mutex(RAW_TASK_OBJ *tcb)
{
	RAW_MUTEX	*mtxcb, *next_mtxcb;
	RAW_TASK_OBJ	*next_tcb;
	LIST 				*block_list_head;
	
	next_mtxcb = tcb->mtxlist;
	while ((mtxcb = next_mtxcb) != 0) {
		next_mtxcb = mtxcb->mtxlist;

		block_list_head = &mtxcb->common_block_obj.block_list;
		
		if (!is_list_empty(block_list_head)) {
			
			next_tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list);

			/* Wake wait task */
			raw_wake_object(next_tcb);

			/* Change mutex get task */
			mtxcb->mtxtsk = next_tcb;
			mtxcb->mtxlist = next_tcb->mtxlist;
			next_tcb->mtxlist = mtxcb;

			if (mtxcb->policy == RAW_MUTEX_CEILING_POLICY) {
				if (next_tcb->priority > mtxcb->ceiling_prio) {
					/* Raise the priority for the task
					   that got lock to the highest
					   priority limit */
					change_internal_task_priority(next_tcb, mtxcb->ceiling_prio);
				}
			}
		} 

		else {
			/* No wait task */
			mtxcb->mtxtsk = 0;
		}
		
	}
	
}
示例#2
0
RAW_OS_ERROR semaphore_put(RAW_SEMAPHORE *semaphore_ptr, RAW_U8 opt_wake_all)
{
	LIST *block_list_head;
	
	RAW_SR_ALLOC();
	
	RAW_CRITICAL_ENTER();

	if (semaphore_ptr->common_block_obj.object_type != RAW_SEM_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &semaphore_ptr->common_block_obj.block_list;
	
	/*if no block task on this list just return*/
	if (is_list_empty(block_list_head)) {        
	    
		if (semaphore_ptr->count == (RAW_PROCESSOR_UINT) - 1) {

			RAW_CRITICAL_EXIT();
			TRACE_SEMAPHORE_OVERFLOW(raw_task_active, semaphore_ptr);
			return RAW_SEMAPHORE_OVERFLOW;

		}
		/*increase resource*/
		semaphore_ptr->count++;

		if (semaphore_ptr->count > semaphore_ptr->peak_count) {

			semaphore_ptr->peak_count = semaphore_ptr->count;
		}	
	    
		RAW_CRITICAL_EXIT();
		
		/*if semphore is registered with notify function just call it*/		
		if (semaphore_ptr->semphore_send_notify) {

			semaphore_ptr->semphore_send_notify(semaphore_ptr);	
		}

		TRACE_SEMAPHORE_COUNT_INCREASE(raw_task_active, semaphore_ptr);
		return RAW_SUCCESS;
	}

	/*wake all the task blocked on this semphore*/
	if (opt_wake_all) {

		while (!is_list_empty(block_list_head)) {
			
			raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));

			
			TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all);
			
		}

	}

	else {
		
		/*Wake up the highest priority task block on the semaphore*/
		raw_wake_object(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list));

		TRACE_SEM_WAKE_TASK(raw_task_active, raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), opt_wake_all);
		
	}
	
	RAW_CRITICAL_EXIT();

	raw_sched();    

	return RAW_SUCCESS;



}
示例#3
0
RAW_U16 event_set(RAW_EVENT *event_ptr, RAW_U32 flags_to_set, RAW_U8 set_option)
{

	LIST *iter;
	LIST *event_head_ptr;
	LIST *iter_temp;
	RAW_TASK_OBJ *task_ptr;
	
	RAW_U8 status;
	
	RAW_SR_ALLOC();

	status = RAW_FALSE;
	
	RAW_CRITICAL_ENTER();

	if (event_ptr->common_block_obj.object_type != RAW_EVENT_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		
		return RAW_ERROR_OBJECT_TYPE;
	}

	event_head_ptr = &event_ptr->common_block_obj.block_list;

	/*if the set_option is AND_MASK, it just clear the flags and will return immediately!*/
	if (set_option & RAW_FLAGS_AND_MASK)  {

		event_ptr->flags &= flags_to_set;

		RAW_CRITICAL_EXIT();
		return RAW_SUCCESS;
	}
	
	/*if it is or mask then set the flag and continue.........*/
	else  {

		event_ptr->flags |= flags_to_set;    
	}

	iter = event_head_ptr->next;

	/*if list is not empty*/
 	while (iter != event_head_ptr) {

		task_ptr =  list_entry(iter, RAW_TASK_OBJ, task_list);
		iter_temp =  iter->next;
		
		if (task_ptr->raw_suspend_option & RAW_FLAGS_AND_MASK)  {

			if ((event_ptr->flags  & task_ptr ->raw_suspend_flags) == task_ptr ->raw_suspend_flags) {
				status =  RAW_TRUE;
			}
			
			else {
				status =   RAW_FALSE;
			}
		}

		
		else {

			if (event_ptr->flags  &  task_ptr ->raw_suspend_flags) {
				
				status =  RAW_TRUE;
			}
			
			else {
				status =  RAW_FALSE;
			}
		}

		
		if (status == RAW_TRUE) {

			(*(RAW_U32 *)(task_ptr->raw_additional_suspend_info)) = event_ptr->flags;
			
			/*Ok the task condition is met, just wake this task*/
			raw_wake_object(task_ptr);

			TRACE_EVENT_WAKE(raw_task_active, task_ptr);

		}

		iter = iter_temp;

 	}

	RAW_CRITICAL_EXIT();

	raw_sched();
	
	return RAW_SUCCESS;

}
示例#4
0
/*
************************************************************************************************************************
*                                       Release a mutex
*
* Description: This function is called to release a mutex.
*
* Arguments  :mutex_ptr is the address of the mutex object want to be released
*                
*                
*				         
* Returns		RAW_SUCCESS: raw os return success
* Note(s)    Any task pended on this semphore will be waked up and will return RAW_B_DEL.
*
*             
************************************************************************************************************************
*/
RAW_OS_ERROR raw_mutex_put(RAW_MUTEX *mutex_ptr)
{

	LIST 				*block_list_head;
	RAW_TASK_OBJ   		*tcb;
	
	RAW_SR_ALLOC();

	#if (RAW_MUTEX_FUNCTION_CHECK > 0)

	if (mutex_ptr == 0) {
		return RAW_NULL_OBJECT;
	}

	if (raw_int_nesting) {

		return RAW_NOT_CALLED_BY_ISR;
	}
	
	#endif

	RAW_CRITICAL_ENTER();

	if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) {
		
		RAW_CRITICAL_EXIT();  
		return RAW_ERROR_OBJECT_TYPE;
	}


	/*Must release the mutex by self*/
	if (raw_task_active != mutex_ptr->mtxtsk) {           
		RAW_CRITICAL_EXIT();
		return RAW_MUTEX_NOT_RELEASE_BY_OCCYPY;
	}

	mutex_ptr->owner_nested--;

	if (mutex_ptr->owner_nested) {

		RAW_CRITICAL_EXIT();
		return RAW_MUTEX_OWNER_NESTED;

	}

	release_mutex(raw_task_active, mutex_ptr);


	block_list_head = &mutex_ptr->common_block_obj.block_list;

	/*if no block task on this list just return*/
	if (is_list_empty(block_list_head)) {        
		/* No wait task */
		mutex_ptr->mtxtsk = 0;                                    
		RAW_CRITICAL_EXIT();

		TRACE_MUTEX_RELEASE_SUCCESS(raw_task_active, mutex_ptr);
		
		return RAW_SUCCESS;
	}

	
	/* there must have task blocked on this mutex object*/ 																												
	tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list);

	/*Wake up the occupy task, which is the highst priority task on the list*/																										 
	raw_wake_object(tcb);

	/* Change mutex get task */
	mutex_ptr->mtxtsk = tcb;
	mutex_ptr->mtxlist = tcb->mtxlist;
	tcb->mtxlist = mutex_ptr;
	mutex_ptr->owner_nested = 1u;
	
	if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) {
		
		if (tcb->priority > mutex_ptr->ceiling_prio) {
		/* Raise the priority of the task that
		got lock to the highest priority limit */
			change_internal_task_priority(tcb, mutex_ptr->ceiling_prio);
			
		}
		
	}

	TRACE_MUTEX_WAKE_TASK(raw_task_active, tcb);
	
	RAW_CRITICAL_EXIT();

	raw_sched();                                       

	return RAW_SUCCESS;
	
}
RAW_U16 queue_buffer_post(RAW_QUEUE_BUFFER *q_b, RAW_VOID *p_void, MSG_SIZE_TYPE msg_size, RAW_U8 opt_send_method)
{

	LIST *block_list_head;
	RAW_TASK_OBJ *task_ptr;

 	RAW_SR_ALLOC();
	
	RAW_CRITICAL_ENTER();

	if (q_b->common_block_obj.object_type != RAW_QUEUE_BUFFER_OBJ_TYPE) {

		RAW_CRITICAL_EXIT();
		return RAW_ERROR_OBJECT_TYPE;
	}

	block_list_head = &q_b->common_block_obj.block_list;
	
	if (!is_queue_buffer_free(q_b,  msg_size)) {

		RAW_CRITICAL_EXIT();

		TRACE_QUEUE_BUFFER_MAX(raw_task_active, q_b, p_void, msg_size, opt_send_method); 
		
		return RAW_QUEUE_BUFFER_FULL;
	}


	/*Queue buffer is not full here, if there is no blocked receive task*/
	if (is_list_empty(block_list_head)) {        

		if (opt_send_method == SEND_TO_END)  { 
			msg_to_end_buffer(q_b, p_void, msg_size);
		}

		else {


		}
		
		RAW_CRITICAL_EXIT();

		TRACE_QUEUE_BUFFER_POST(raw_task_active, q_b, p_void, msg_size, opt_send_method);
		
		return RAW_SUCCESS;
	}
	
	task_ptr = list_entry(block_list_head->next, RAW_TASK_OBJ, task_list);
	
	raw_memcpy(task_ptr->msg, p_void, msg_size);
	task_ptr->qb_msg_size = msg_size;
	
	raw_wake_object(task_ptr);
		
	RAW_CRITICAL_EXIT();

	TRACE_QUEUE_BUFFER_WAKE_TASK(raw_task_active, list_entry(block_list_head->next, RAW_TASK_OBJ, task_list), p_void, msg_size, opt_send_method);

	raw_sched();    
	return RAW_SUCCESS;
	

}