Ejemplo n.º 1
0
/*
************************************************************************************************************************
*                                    Post an event to active object  
*
* Description: This function is called to post an event to active object (Implemented as FIFO way)
*
* Arguments  :me is the address of this active object
*                    ---------
*                    event is the address of sending event               
*				         
* Returns			
*						
* Note(s)    	
*
*             
************************************************************************************************************************
*/
void active_event_post_end(ACTIVE_OBJECT_STRUCT *me, STATE_EVENT *event)
{
	RAW_OS_ERROR err;

	RAW_SR_ALLOC();
	

	RAW_CPU_DISABLE();

	if (event->which_pool) {          
		event->ref_count++;          
	}

	RAW_CPU_ENABLE();

	err = raw_queue_end_post(&me->active_queue, (void *)event);

	if (err != RAW_SUCCESS) {

		RAW_ASSERT(0);

	}

	

	
	
}
Ejemplo n.º 2
0
static void work_queue_task(void *pa)
{
	RAW_OS_ERROR  ret;
	OBJECT_WORK_QUEUE_MSG *msg_recv;
	WORK_QUEUE_STRUCT *wq;

	RAW_SR_ALLOC();
	
	wq = pa;

	while (1) {

		ret = raw_queue_receive (&wq->queue, RAW_WAIT_FOREVER, (void **)(&msg_recv));

		if (ret != RAW_SUCCESS) {
			
			RAW_ASSERT(0);

		}

		msg_recv->handler(msg_recv->arg, msg_recv->msg);

		RAW_CPU_DISABLE();

		msg_recv->next = free_work_queue_msg;
		free_work_queue_msg = msg_recv;

		RAW_CPU_ENABLE();


	}

}
Ejemplo n.º 3
0
/*
************************************************************************************************************************
*                                   Post an event  to a defered queue
*
* Description: This function is called to post an event  to a defered queue.
*
* Arguments  :q is the address of the defered queue
*                    ---------
*                   me is the active object  to post       
*				         
* Returns			
*						
* Note(s)    	
*
*             
************************************************************************************************************************
*/
RAW_U16 active_event_recall(ACTIVE_OBJECT_STRUCT *me, RAW_QUEUE *q) 
{
	STATE_EVENT *event;
	RAW_U16 recalled;
	RAW_U16 err;
	RAW_SR_ALLOC();

	err = raw_queue_receive (q, RAW_NO_WAIT, (RAW_VOID  **)&event);

	if (err == RAW_SUCCESS) {
		
		RAW_CPU_DISABLE();

		if (event->which_pool) {  
			
			event->ref_count++;
		}

		RAW_CPU_ENABLE();

		active_event_post_front(me, event);

		recalled = 1;
	}

	else {

		recalled = 0;
	}


	return recalled;
}
Ejemplo n.º 4
0
/*
************************************************************************************************************************
*                                    Post an event to active object  
*
* Description: This function is called to post an event to active object (Implemented as LIFO way)
*
* Arguments  :me is the address of this active object
*                    ---------
*                    event is the address of sending event               
*				         
* Returns			
*						
* Note(s)    	
*
*             
************************************************************************************************************************
*/
void active_event_post_front(ACTIVE_OBJECT_STRUCT *me, STATE_EVENT *event)
{

	RAW_U16 ret;

	RAW_SR_ALLOC();

	RAW_CPU_DISABLE();

	if (event->which_pool) {          
		event->ref_count++;          
	}

	RAW_CPU_ENABLE();

	ret = raw_queue_front_post(&me->active_queue, (void *)event);

	if (ret != RAW_SUCCESS) {

		RAW_ASSERT(0);

	}

	
	
}
Ejemplo n.º 5
0
RAW_OS_ERROR int_msg_post(RAW_U8 type, void *p_obj, void *p_void, MSG_SIZE_TYPE msg_size, RAW_U32 flags, RAW_U8 opt)
{
	OBJECT_INT_MSG *msg_data;
	RAW_OS_ERROR task_0_post_ret;
	
	RAW_SR_ALLOC();

	RAW_CPU_DISABLE();
	
	if (free_object_int_msg == 0) {
		
		int_msg_full++;
		
		RAW_CPU_ENABLE();
		
		TRACE_INT_MSG_EXHAUSTED();
		
		return RAW_INT_MSG_EXHAUSTED;
	}

	msg_data = free_object_int_msg;
	
	free_object_int_msg->type = type;
	free_object_int_msg->object = p_obj;
	free_object_int_msg->msg = p_void;
	free_object_int_msg->msg_size = msg_size;
	free_object_int_msg->event_flags = flags;
	free_object_int_msg->opt = opt;
	free_object_int_msg = free_object_int_msg->next;

	RAW_CPU_ENABLE();
	/*raw_task_0_post may fail here due to full task0 events*/
	task_0_post_ret = raw_task_0_post(&msg_event_handler, type, msg_data);
	if (task_0_post_ret == RAW_SUCCESS) {
		
		TRACE_INT_MSG_POST(type, p_obj, p_void, msg_size, flags, opt);
	}
	else {
		/*if 	raw_task_0_post fail, free_object_int_msg will be taken back*/
		RAW_CPU_DISABLE();
		msg_data->next = free_object_int_msg;
		free_object_int_msg = msg_data;
		RAW_CPU_ENABLE();
	}

	return task_0_post_ret;
}
Ejemplo n.º 6
0
/*
************************************************************************************************************************
*                                       Schedule the specific work queue
*
* Description: This function is called to schedule the specific work queue
*
* Arguments  :wq is the address of the work queue object
*                    -----
*                    arg is the argument passed to the handler
*	               -----
*                    msg is the message passed to the handler
*				         
* Returns   : RAW_SUCCESS
*                  RAW_WORK_QUEUE_MSG_MAX: need more work_queue_internal_msg.
*                  RAW_MSG_MAX:queue is full.
*
* Note(s)  :   This API can be called by interrupt or task.  
*
*             
************************************************************************************************************************
*/
RAW_OS_ERROR sche_work_queue(WORK_QUEUE_STRUCT *wq, RAW_U32 arg, void *msg, WORK_QUEUE_HANDLER handler)
{
	OBJECT_WORK_QUEUE_MSG *msg_data;
	RAW_OS_ERROR ret;

	RAW_SR_ALLOC();

	RAW_CPU_DISABLE();
	
	if (free_work_queue_msg == 0) {
		
		RAW_CPU_ENABLE();
		
		return RAW_WORK_QUEUE_MSG_MAX;
	}

	msg_data = free_work_queue_msg;

	free_work_queue_msg->arg = arg;
	free_work_queue_msg->msg = msg;
	free_work_queue_msg->handler = handler;
	
	free_work_queue_msg = free_work_queue_msg->next;

	RAW_CPU_ENABLE();
	
	ret = raw_queue_end_post(&wq->queue, msg_data);

	if (ret == RAW_SUCCESS) {

	}

	else {

		RAW_CPU_DISABLE();
		msg_data->next = free_work_queue_msg;
		free_work_queue_msg = msg_data;
		RAW_CPU_ENABLE();	
	}
		
	return ret;
}
Ejemplo n.º 7
0
/*
************************************************************************************************************************
*                                    Enter interrupt
*
* Description: This function is called to when enter into interrupt.
*
* Arguments  :NONE
*                
*               
*                
*                 
*				         
* Returns		
*					
* Note(s) raw_int_nesting can directly be accessed by interrupt function with cpu interrupt disabled!
*you must invoke raw_enter_interrupt before calling interrupt function.
*you must  invoke  raw_finish_int after calling interrupt function.
* you must invoke raw_enter_interrupt and raw_finish_int in pair.
*             
************************************************************************************************************************
*/
RAW_OS_ERROR raw_enter_interrupt(void)
{
	RAW_SR_ALLOC();

	RAW_CPU_DISABLE();
	
	if (raw_int_nesting >= INT_NESTED_LEVEL) {  

		RAW_CPU_ENABLE();
		
		port_system_error_process(RAW_EXCEED_INT_NESTED_LEVEL, 0, 0, 0, 0, 0, 0);
		
		return RAW_EXCEED_INT_NESTED_LEVEL;                                                                                      
	}

	raw_int_nesting++; 
	RAW_CPU_ENABLE();

	return RAW_SUCCESS;
}
Ejemplo n.º 8
0
/*
************************************************************************************************************************
*                                       Release byte memory from pool
*
* Description: This function is called to allocate memory from pool
*
* Arguments  : block_ptr is the address want to return to memory pool.          
*                   ---------------------
*				         
* Returns		
*						RAW_SUCCESS: raw os return success
* Note(s)     This methods will not cause fragmention. 
*
*             
************************************************************************************************************************
*/
RAW_OS_ERROR raw_byte_release(RAW_BYTE_POOL_STRUCT *pool_ptr, void *memory_ptr)
{
	RAW_U8  *work_ptr;           /* Working block pointer      */

	RAW_SR_ALLOC();


	#if (RAW_BYTE_FUNCTION_CHECK > 0)

	if (pool_ptr == 0) {

		return RAW_NULL_POINTER;
	}

	if (memory_ptr == 0) {

		return RAW_NULL_POINTER;
	}
		
	#endif

	if (pool_ptr ->common_block_obj.object_type != RAW_BYTE_OBJ_TYPE) {

		return RAW_ERROR_OBJECT_TYPE;
	}
	
	/* Back off the memory pointer to pickup its header.  */
	work_ptr = (RAW_U8 *)memory_ptr - sizeof(RAW_U8 *) - sizeof(RAW_U32);

	/* Disable interrupts.  */
	RAW_CPU_DISABLE();

	/* Indicate that this thread is the current owner.  */
	pool_ptr->raw_byte_pool_owner = raw_task_active;

	/* Release the memory.*/
	*((RAW_U32 *)(work_ptr + sizeof(RAW_U8 *))) = RAW_BYTE_BLOCK_FREE;

	/* Update the number of available bytes in the pool. */
	pool_ptr->raw_byte_pool_available =  
	pool_ptr->raw_byte_pool_available + (*((RAW_U8 * *)(work_ptr)) - work_ptr);

	/* Set the pool search value appropriately. */
	pool_ptr->raw_byte_pool_search = work_ptr;

	RAW_CPU_ENABLE();

	return RAW_SUCCESS;
		
}
Ejemplo n.º 9
0
RAW_OS_ERROR raw_queue_size_full_register(RAW_QUEUE_SIZE *p_q, QUEUE_SIZE_FULL_CALLBACK callback_full)
{
	RAW_SR_ALLOC();

	if (raw_int_nesting) {

		return RAW_NOT_CALLED_BY_ISR;	
	}
	
	RAW_CPU_DISABLE();
	p_q->queue_size_full_callback = callback_full;
	RAW_CPU_ENABLE();

	return RAW_SUCCESS;
}
Ejemplo n.º 10
0
/*
************************************************************************************************************************
*                                       Release block memory from pool
*
* Description: This function is called to release memory from pool
*
* Arguments  : block_ptr is the address want to return to memory pool.
*                   ---------------------
*
* Returns
*                  RAW_SUCCESS: raw os return success
* Note(s)     This methods will not cause fragmention.
*
*
************************************************************************************************************************
*/
RAW_U16 raw_block_release(MEM_POOL *pool_ptr, RAW_VOID *block_ptr)
{
    RAW_U8	*work_ptr;

    RAW_SR_ALLOC();

#if (RAW_BLOCK_FUNCTION_CHECK > 0)

    if (block_ptr == 0) {
        return RAW_NULL_OBJECT;
    }

    if (pool_ptr == 0) {

        return RAW_NULL_OBJECT;
    }

#endif

    if (pool_ptr->common_block_obj.object_type != RAW_BLOCK_OBJ_TYPE) {

        return RAW_ERROR_OBJECT_TYPE;
    }

    RAW_CPU_DISABLE();

    work_ptr = ((RAW_U8 *) block_ptr);

    /* Put the block back in the available list.  */
    *((RAW_U8 **)work_ptr) = pool_ptr->raw_block_pool_available_list;

    /* Adjust the head pointer.  */
    pool_ptr->raw_block_pool_available_list = work_ptr;

    /* Increment the count of available blocks.  */
    pool_ptr->raw_block_pool_available++;

    RAW_CPU_ENABLE();

    /* Return completion status.  */
    return RAW_SUCCESS;

}
Ejemplo n.º 11
0
/*
************************************************************************************************************************
*                                       Allocating byte memory from pool
*
* Description: This function is called to allocate memory from pool
*
* Arguments  :pool_ptr is the address of the pool
*                   ---------------------         
*                   memory_ptr is the address of pointer, and it will be filled the allocating block address          
*                   ---------------------
*				       memory_size is the size you want to allocate
*                  ---------------------
*
* Returns			RAW_NO_MEMORY: No block memory available.
					RAW_SUCCESS: raw os return success
* Note(s)       If  *memory_ptr is 0 which means no memory available now.This methods will cause fragmention.
*
*             
************************************************************************************************************************
*/
RAW_OS_ERROR raw_byte_allocate(RAW_BYTE_POOL_STRUCT *pool_ptr, void **memory_ptr, RAW_U32 memory_size)
{

	RAW_OS_ERROR   status;                 /* Return status              */
	RAW_U8         *work_ptr;               /* Working byte pointer       */
	RAW_TASK_OBJ   *current_work_task;
	RAW_U8         byte_align_mask;

	RAW_SR_ALLOC();

	#if (RAW_BYTE_FUNCTION_CHECK > 0)

	if (pool_ptr == 0) {
			
		return RAW_NULL_POINTER;
	}

	if (memory_ptr == 0) {

		return RAW_NULL_POINTER;
	}
	 
	#endif

	if (pool_ptr->common_block_obj.object_type != RAW_BYTE_OBJ_TYPE) {

		return RAW_ERROR_OBJECT_TYPE;
	}

	byte_align_mask = sizeof(void *) - 1u;
	
	/* align the memory size to pointer align*/

	memory_size = ((memory_size & (~byte_align_mask)) + sizeof(void *));

	current_work_task = raw_task_active;

	/* Disable interrupts.  */
	RAW_CPU_DISABLE();

	/* Loop to handle cases where the owner of the pool changed.  */
	do {

		/* Indicate that this thread is the current owner.  */
		pool_ptr->raw_byte_pool_owner = current_work_task;

		/* Restore interrupts.  */
		RAW_CPU_ENABLE();

		/*Search for free memory*/
		work_ptr = raw_byte_pool_search(pool_ptr, memory_size);

		/* Disable interrupts.  */
		RAW_CPU_DISABLE();

	/*if raw_byte_pool_owner changed and we have not got memory yet, continue tom do search*/
	} while ((!work_ptr) && (pool_ptr ->raw_byte_pool_owner != current_work_task));

	/* Determine if memory was found.  */
	if (work_ptr) {

		/* Copy the pointer into the return destination.  */
		*memory_ptr = (RAW_U8 *) work_ptr;

		/* Set the status to success.  */
		status = RAW_SUCCESS;

	}

	else {
		
		*memory_ptr = 0;

		/* Set the status to RAW_NO_MEMORY.  */
		status = RAW_NO_MEMORY; 

		TRACE_BYTE_NO_MEMORY(raw_task_active, pool_ptr);
	}

	RAW_CPU_ENABLE();

	return status;
}
Ejemplo n.º 12
0
static void *raw_byte_pool_search(RAW_BYTE_POOL_STRUCT *pool_ptr, RAW_U32 memory_size)
{

	RAW_U8 *current_ptr;                /* Current block pointer      */
	RAW_U8 *next_ptr;                   /* Next block pointer         */
	RAW_U32 available_bytes;            /* Calculate bytes available  */
	RAW_U32 examine_blocks;             /* Blocks to be examined      */

	RAW_SR_ALLOC();

	/* Disable interrupts.  */
	RAW_CPU_DISABLE();

	/* First, determine if there are enough bytes in the pool.  */
	if (memory_size >= pool_ptr->raw_byte_pool_available) {

		/* Restore interrupts.  */
		RAW_CPU_ENABLE();
		/* Not enough memory, return a NULL pointer.  */
		return 0;
	}

	/* Walk through the memory pool in search for a large enough block.  */
	current_ptr = pool_ptr->raw_byte_pool_search;
	examine_blocks = pool_ptr->raw_byte_pool_fragments + 1u;
	available_bytes = 0u;
		
	do {
		
		/* Check to see if this block is free.  */
		if (*((RAW_U32 *)(current_ptr + sizeof(RAW_U8  *))) == RAW_BYTE_BLOCK_FREE) {

			/* Block is free, see if it is large enough.  */

			/* Pickup the next block's pointer.  */
			next_ptr = *((RAW_U8 * *) current_ptr);

			/* Calculate the number of byte available in this block.  */
			available_bytes = next_ptr - current_ptr - sizeof(RAW_U8  *) - sizeof(RAW_U32);

			/* If this is large enough, we are done because our first-fit algorithm
			has been satisfied!  */
			if (available_bytes >= memory_size) {

				/* Find the suitable position */
				break;
			}
						
			else {
		    
				/* Clear the available bytes variable.  */
				available_bytes = 0u;

				/* Not enough memory, check to see if the neighbor is 
				free and can be merged.  */
				if (*((RAW_U32 *)(next_ptr + sizeof(RAW_U8 *))) == RAW_BYTE_BLOCK_FREE) {

					/* Yes, neighbor block can be merged!  This is quickly accomplished
					   by updating the current block with the next blocks pointer.  */
					*((RAW_U8 * *)current_ptr) = *((RAW_U8 * *)next_ptr);

					/* Reduce the fragment number, and does not need to increase  available bytes since 
					    they are already there*/
					    
					pool_ptr->raw_byte_pool_fragments--;
						
				}
				
				else {

					/* Neighbor is not free so get to the next search point*/
					current_ptr = *((RAW_U8 * *)next_ptr);

					/* Reduse the examined block since we have skiped one search */
					if (examine_blocks) {
						examine_blocks--;
					}
				}
			}
		}
		
		else {

			/* Block is not free, move to next block.  */
			current_ptr = *((RAW_U8 * *)current_ptr);
		}

		/* finish one block search*/
		if (examine_blocks) {
			examine_blocks--;
		}

		/* Restore interrupts temporarily.  */
		RAW_CPU_ENABLE();

		/* Disable interrupts.  */
		RAW_CPU_DISABLE();

		/* Determine if anything has changed in terms of pool ownership.  */
		if (pool_ptr ->raw_byte_pool_owner != raw_task_active) {

			/* Pool changed ownership during interrupts.so we reset the search*/

			current_ptr = pool_ptr ->raw_byte_pool_search;
			examine_blocks = pool_ptr ->raw_byte_pool_fragments + 1u;

			/* Setup our ownership again.  */
			pool_ptr ->raw_byte_pool_owner = raw_task_active;
		}

	} while (examine_blocks);

	/* Determine if a block was found.  If so, determine if it needs to be
	split.  */
	if (available_bytes) {

		/* Do we need to split this block if this is big enough.*/
		if ((available_bytes - memory_size) >= ((RAW_U32) RAW_BYTE_BLOCK_MIN)) {

			/* Split the block.  */
			next_ptr = current_ptr + memory_size + sizeof(RAW_U8 *) + sizeof(RAW_U32);

			/* Setup the new free block.  */
			*((RAW_U8 * *) next_ptr) = *((RAW_U8 * *) current_ptr);
			*((RAW_U32 *) (next_ptr + sizeof(RAW_U8 *))) = RAW_BYTE_BLOCK_FREE;

			/* Increase the total fragment counter.  */
			pool_ptr->raw_byte_pool_fragments++;

			/* Update the current pointer to point at the newly created block.  */
			*((RAW_U8 * *)current_ptr) = next_ptr;

			/* Set available equal to memory size for subsequent calculation.  */
			available_bytes = memory_size;
		}

			/* In any case, mark the current block as allocated.  */
			*((RAW_U32 *)(current_ptr + sizeof(RAW_U8  *))) = RAW_BYTE_BLOCK_ALLOC;

			/* Reduce the number of available bytes in the pool.  */
			pool_ptr->raw_byte_pool_available =  pool_ptr ->raw_byte_pool_available - available_bytes
			                   - sizeof(RAW_U8 *) - sizeof(RAW_U32);

			/* Adjust the pointer for the application.  */
			current_ptr = current_ptr + sizeof(RAW_U8 *) + sizeof(RAW_U32);

	}
		
	else {

		/* Set current pointer to NULL to indicate nothing was found. */
		current_ptr = 0;
	}


	/* Restore interrupts temporarily.  */
	RAW_CPU_ENABLE();

	/* Return the searched result*/
	return current_ptr;
		
}
Ejemplo n.º 13
0
static RAW_OS_ERROR task_0_post(EVENT_HANLDER *p, TASK_0_EVENT_TYPE ev, void *event_data, RAW_U8 opt_send_method)
{
	RAW_U16 task_0_event_position;
	RAW_SR_ALLOC();

	/*this function should not be called in task*/
	if (raw_int_nesting == 0) {

		return RAW_NOT_CALLED_BY_TASK;
	}

	/*fastest way to make task 0 ready*/
	RAW_CPU_DISABLE();

	/*if message is max, probally interrupt is too fast, please check your interrupt*/
	if(task_0_events == MAX_TASK_EVENT) {
		RAW_CPU_ENABLE();
		TRACE_TASK_0_OVERFLOW(p, ev, event_data);
		return RAW_TASK_0_EVENT_EXHAUSTED;
	}

	++task_0_events;

	/*Update the debug information*/
	if (task_0_events > peak_events) {
		peak_events = task_0_events;
	}
	
	if (opt_send_method == SEND_TO_END) {

		task_0_event_position = task_0_event_head;
		
		task_0_event_head++;

		if (task_0_event_head == MAX_TASK_EVENT) {   
			
			task_0_event_head = 0;
			
		}
	}

	else {

		if (task_0_event_end == 0) { 			   
			task_0_event_end = MAX_TASK_EVENT;
		}

		task_0_event_end--;

		task_0_event_position = task_0_event_end;

	}

	/*Store the message*/
	task_0_events_queue[task_0_event_position].ev = ev;
	task_0_events_queue[task_0_event_position].event_data = event_data;
	task_0_events_queue[task_0_event_position].p = p;

	RAW_CPU_ENABLE();

	return RAW_SUCCESS;
 
}
Ejemplo n.º 14
0
static void int_msg_handler(TASK_0_EVENT_TYPE ev, void *msg_data)
{
	OBJECT_INT_MSG *int_msg;
	RAW_OS_ERROR   int_msg_ret;
	
	RAW_SR_ALLOC();
	
	int_msg = msg_data;
	int_msg_ret = RAW_SYSTEM_ERROR;
	
	switch (ev) {

		#if (CONFIG_RAW_TASK_SUSPEND > 0)
		
		case RAW_TYPE_SUSPEND:
			int_msg_ret = task_suspend((RAW_TASK_OBJ *)(int_msg->object));
			break;

			
		#endif
		
		#if (CONFIG_RAW_TASK_RESUME > 0)
		
		case RAW_TYPE_RESUME:
			int_msg_ret = task_resume((RAW_TASK_OBJ *)(int_msg->object));
			break;

			
		#endif

		#if (CONFIG_RAW_SEMAPHORE > 0)
		
		case RAW_TYPE_SEM:
			int_msg_ret = semaphore_put((RAW_SEMAPHORE *)(int_msg->object), WAKE_ONE_SEM);
			break;

		case RAW_TYPE_SEM_ALL:
			int_msg_ret = semaphore_put((RAW_SEMAPHORE *)(int_msg->object), WAKE_ALL_SEM);
			break;
			
		#endif
		
		
		#if (CONFIG_RAW_QUEUE > 0)
		
		case RAW_TYPE_Q_FRONT:
			int_msg_ret = msg_post((RAW_QUEUE *)(int_msg->object), int_msg->msg, SEND_TO_FRONT, WAKE_ONE_QUEUE);
			break;

		case RAW_TYPE_Q_END:
			int_msg_ret = msg_post((RAW_QUEUE *)(int_msg->object), int_msg->msg, SEND_TO_END, WAKE_ONE_QUEUE);
			break;

		case RAW_TYPE_Q_ALL:
			int_msg_ret = msg_post((RAW_QUEUE *)(int_msg->object), int_msg->msg, int_msg->opt, WAKE_ALL_QUEUE);
			break;

		#endif


		#if (CONFIG_RAW_QUEUE_SIZE > 0)
		
		case RAW_TYPE_Q_SIZE_FRONT:
			int_msg_ret = msg_size_post((RAW_QUEUE_SIZE *)(int_msg->object), int_msg->msg, int_msg->msg_size, SEND_TO_FRONT, WAKE_ONE_QUEUE);
			break;

		case RAW_TYPE_Q_SIZE_END:
			int_msg_ret = msg_size_post((RAW_QUEUE_SIZE *)(int_msg->object), int_msg->msg, int_msg->msg_size, SEND_TO_END, WAKE_ONE_QUEUE);
			break;

		case RAW_TYPE_Q_SIZE_ALL:
			int_msg_ret = msg_size_post((RAW_QUEUE_SIZE *)(int_msg->object), int_msg->msg, int_msg->msg_size, int_msg->opt, WAKE_ALL_QUEUE);
			break;

		#endif

		#if (CONFIG_RAW_EVENT > 0)
		
		case RAW_TYPE_EVENT:
			int_msg_ret = event_set((RAW_EVENT *)(int_msg->object), int_msg->event_flags, int_msg->opt);
			break;
			
		#endif

		#if (CONFIG_RAW_IDLE_EVENT > 0)
		
		case RAW_TYPE_IDLE_END_EVENT_POST:
			int_msg_ret = event_post((ACTIVE_EVENT_STRUCT *)(int_msg->object), int_msg->msg_size, int_msg->msg, SEND_TO_END);
			break;

		case RAW_TYPE_IDLE_FRONT_EVENT_POST:
			int_msg_ret = event_post((ACTIVE_EVENT_STRUCT *)(int_msg->object), int_msg->msg_size, int_msg->msg, SEND_TO_FRONT);
			break;
			
		#endif
		
		default:
			RAW_ASSERT(0);



	}

	if (int_msg_ret != RAW_SUCCESS) {

		/*trace the incorrect information here, there is no way to infrom user at this condition*/
		TRACE_INT_MSG_HANDLE_ERROR(ev, int_msg->object, int_msg_ret);
	}

	RAW_CPU_DISABLE();

	int_msg->next = free_object_int_msg;
	free_object_int_msg = int_msg;
	
	RAW_CPU_ENABLE();

}
Ejemplo n.º 15
0
void idle_run(void) 
{
    ACTIVE_EVENT_STRUCT *a;
	STATE_EVENT temp;
	
	ACTIVE_EVENT_STRUCT_CB *acb;
	RAW_U8 x;
	RAW_U8 y;
	RAW_U8 idle_high_priority;
	
	RAW_SR_ALLOC();

	while (1) { 
		
		RAW_CRITICAL_ENTER();

		/*if get events then process it*/
		if (raw_idle_rdy_grp) {

			y = raw_idle_map_table[raw_idle_rdy_grp];
			x = y >> 3;
			idle_high_priority = (y + raw_idle_map_table[raw_rdy_tbl[x]]);
           
			acb = &active_idle_task[idle_high_priority];
			a = active_idle_task[idle_high_priority].act;

			--a->nUsed;
			
			if (a->nUsed == 0) {         
		
				raw_rdy_tbl[a->priority_y] &= (RAW_U8)~a->priority_bit_x;
				
				if (raw_rdy_tbl[a->priority_y] == 0) {                      /* Clear event grp bit if this was only task pending */
					raw_idle_rdy_grp &= (RAW_U8)~a->priority_bit_y;
				}
			}
			
			temp.sig = acb->queue[a->tail].sig;

			temp.which_pool = acb->queue[a->tail].para;

			a->tail++;

			if (a->tail == acb->end) {                  
			    a->tail = 0;
			}

			RAW_CRITICAL_EXIT();

			#if (RAW_FSM_ACTIVE > 0)
			
			fsm_exceute(&a->super, &temp);                    
			
			#else
			
			hsm_exceute(&a->super, &temp);                   
			
			#endif
			
		}

		else {

			RAW_CRITICAL_EXIT();

			RAW_CPU_DISABLE();

			if (raw_idle_rdy_grp == 0) {
				idle_event_user();
			}
		
			RAW_CPU_ENABLE();
			
		}
		
    }
Ejemplo n.º 16
0
/*
************************************************************************************************************************
*                                       Allocating block memory from pool
*
* Description: This function is called to allocate memory from pool
*
* Arguments  :pool_ptr is the address of the pool
*                   ---------------------
*                   block_ptr is the address of pointer, and it will be filled the allocating block address
*                   ---------------------
*
* Returns      RAW_NO_MEMORY: No block memory is available.
*                  RAW_SUCCESS: raw os return success
* Note(s)       If *block_ptr is 0 which means no memory available now.This methods will not cause fragmention.
*
*
************************************************************************************************************************
*/
RAW_U16 raw_block_allocate(MEM_POOL *pool_ptr, RAW_VOID **block_ptr)
{

    RAW_U16				status;

    RAW_U8		*work_ptr;

    RAW_SR_ALLOC();

#if (RAW_BLOCK_FUNCTION_CHECK > 0)

    if (pool_ptr == 0) {
        return RAW_NULL_OBJECT;
    }

    if (block_ptr == 0) {

        return RAW_NULL_POINTER;
    }

#endif

    if (pool_ptr->common_block_obj.object_type != RAW_BLOCK_OBJ_TYPE) {

        return RAW_ERROR_OBJECT_TYPE;
    }

    RAW_CPU_DISABLE();

    /* Determine if there is an available block.  */
    if (pool_ptr->raw_block_pool_available) {

        /* Yes, a block is available.  Decrement the available count.  */
        pool_ptr->raw_block_pool_available--;

        /* Pickup the current block pointer.  */
        work_ptr = pool_ptr->raw_block_pool_available_list;

        /* Return the first available block to the caller.  */
        *((RAW_U8 **)block_ptr) = work_ptr;

        /* Modify the available list to point at the next block in the pool. */
        pool_ptr->raw_block_pool_available_list = *((RAW_U8 **)work_ptr);

        /* Set status to success.  */
        status =  RAW_SUCCESS;
    }

    /*if no block memory is available then just return*/
    else {

        *((RAW_U8 **)block_ptr) = 0;
        status = RAW_NO_MEMORY;
        TRACE_BLOCK_NO_MEMORY(raw_task_active, pool_ptr);
    }

    RAW_CPU_ENABLE();

    return status;

}