/*! * \private * * \brief Writes data to the lightweight log. * * \param[in] log_number Log number of a previously created lightweight log. * \param[in] p1 Data to be written into the log entry. * \param[in] p2 Data to be written into the log entry. * \param[in] p3 Data to be written into the log entry. * \param[in] p4 Data to be written into the log entry. * \param[in] p5 Data to be written into the log entry. * \param[in] p6 Data to be written into the log entry. * \param[in] p7 Data to be written into the log entry. * * \return MQX_OK * \return LOG_FULL (Log is full and LOG_OVERWRITE is not set.) * \return LOG_DISABLED (Log is disabled.) * \return LOG_DOES_NOT_EXIST (Log_number was not created.) * \return MQX_INVALID_COMPONENT_HANDLE (Log component data is not valid.) * * \see _lwlog_write */ _mqx_uint _lwlog_write_internal ( _mqx_uint log_number, _mqx_max_type p1, _mqx_max_type p2, _mqx_max_type p3, _mqx_max_type p4, _mqx_max_type p5, _mqx_max_type p6, _mqx_max_type p7 ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWLOG_COMPONENT_STRUCT_PTR log_component_ptr; LWLOG_HEADER_STRUCT_PTR log_header_ptr; _mqx_max_type *data_ptr; LWLOG_ENTRY_STRUCT_PTR log_ptr; #if MQX_LWLOG_TIME_STAMP_IN_TICKS == 0 TIME_STRUCT time; MQX_TICK_STRUCT ticks; #endif _GET_KERNEL_DATA(kernel_data); log_component_ptr = (LWLOG_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_LWLOG]; log_header_ptr = log_component_ptr->LOGS[log_number]; #if MQX_CHECK_VALIDITY if (log_component_ptr->VALID != LWLOG_VALID) { return (MQX_INVALID_COMPONENT_HANDLE); } /* Endif */ #endif #if MQX_CHECK_ERRORS if (log_header_ptr == NULL) { return (LOG_DOES_NOT_EXIST); } /* Endif */ #endif if (!(log_header_ptr->FLAGS & LWLOG_ENABLED)) { return (LOG_DISABLED); } /* Endif */ log_ptr = log_header_ptr->WRITE_PTR->NEXT_PTR; if (log_header_ptr->CURRENT_ENTRIES >= log_header_ptr->MAX_ENTRIES) { if (log_header_ptr->FLAGS & LOG_OVERWRITE) { if (log_ptr == log_header_ptr->READ_PTR) { log_header_ptr->READ_PTR = log_ptr->NEXT_PTR; } /* Endif */ log_header_ptr->OLDEST_PTR = log_ptr->NEXT_PTR; } else { return (LOG_FULL); } /* Endif */ } else { log_header_ptr->CURRENT_ENTRIES++; } /* Endif */ #if MQX_LWLOG_TIME_STAMP_IN_TICKS == 0 log_ptr->MICROSECONDS = (uint32_t)_time_get_microseconds(); PSP_ADD_TICKS(&kernel_data->TIME, &kernel_data->TIME_OFFSET, &ticks); PSP_TICKS_TO_TIME(&ticks, &time); log_ptr->SECONDS = time.SECONDS; log_ptr->MILLISECONDS = time.MILLISECONDS; #else log_ptr->TIMESTAMP = kernel_data->TIME; log_ptr->TIMESTAMP.HW_TICKS = _time_get_hwticks(); PSP_ADD_TICKS(&log_ptr->TIMESTAMP, &kernel_data->TIME_OFFSET, &log_ptr->TIMESTAMP); #endif log_ptr->SEQUENCE_NUMBER = log_header_ptr->NUMBER++; data_ptr = &log_ptr->DATA[0]; *data_ptr++ = p1; *data_ptr++ = p2; *data_ptr++ = p3; *data_ptr++ = p4; *data_ptr++ = p5; *data_ptr++ = p6; *data_ptr = p7; log_header_ptr->WRITE_PTR = log_ptr; return (MQX_OK); } /* Endbody */
/*! * \brief Gets the number of free messages in the message pool. * * The function fails if either: * \li Message component is not created. * \li Pool_id is for a private message pool, but does not represent a valid one. * * \param[in] pool One of the following: * \li Private message pool for which to get the number of free messages. * \li MSGPOOL_NULL_POOL_ID (for system message pools). * * \return The number of free messages in the private message pool (success). * \return The number of free messages in all system message pools (success). * \return 0 (Success: No free messages.) * \return 0 (Failure: see Description.) * * \warning If pool_id does not represent a valid private message pool, calls * _task_set_error() to set the task error code to MSGPOOL_INVALID_POOL_ID * * \see _msgpool_create * \see _msgpool_destroy * \see _msg_free * \see _msg_alloc_system * \see _task_set_error * \see _msg_create_component */ _mqx_uint _msg_available ( _pool_id pool ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; register MSGPOOL_STRUCT_PTR msgpool_ptr; _mqx_uint i; _mqx_uint count; _GET_KERNEL_DATA(kernel_data); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL) { return(0); } /* Endif */ #endif msgpool_ptr = msg_component_ptr->MSGPOOLS_PTR; #if MQX_CHECK_ERRORS if (msgpool_ptr == NULL) { return(0); }/* Endif */ #endif if (pool == MSGPOOL_NULL_POOL_ID) { count = 0; _INT_DISABLE(); i = msg_component_ptr->MAX_MSGPOOLS_EVER + 1; while ( --i ) { if ( #if MQX_CHECK_VALIDITY (msgpool_ptr->VALID == MSG_VALID) && #endif (msgpool_ptr->MSGPOOL_TYPE == SYSTEM_MSG_POOL)) { count += msgpool_ptr->SIZE; } /* Endif */ ++msgpool_ptr; } /* Endwhile */ _INT_ENABLE(); return count; } else { msgpool_ptr = (MSGPOOL_STRUCT_PTR)pool; if ( #if MQX_CHECK_VALIDITY (msgpool_ptr->VALID != MSG_VALID) || #endif (msgpool_ptr->MSGPOOL_TYPE != MSG_POOL) ) { _task_set_error(MSGPOOL_INVALID_POOL_ID); return (0); } /* Endif */ return (_mqx_uint)msgpool_ptr->SIZE; } /* Endif */ } /* Endbody */
/*! * \brief Allocates a message from a system message pool. * * The size of the message is determined by the message size that a task * specified when it called _msgpool_create_system(). * \n The message is a resource of the task until the task either frees it * (_msg_free()) or puts it on a message queue (_msgq_send family of functions.) * * \param[in] message_size Maximum size (in single-addressable units) of the * message. * * \return Pointer to a message of at least message_size single-addressable * units (success). * \return NULL (Failure: message component is not created.) * * \warning On failure, calls _task_set_error() to set one of the following task * error codes: * \li MQX_COMPONENT_DOES_NOT_EXIST (Message component is not created.) * \li Task error codes from _mem_alloc_system() (If MQX needs to grow the pool.) * * \see _mem_alloc * \see _mem_alloc_from * \see _mem_alloc_system * \see _mem_alloc_system_from * \see _mem_alloc_system_zero * \see _mem_alloc_system_zero_from * \see _mem_alloc_zero * \see _mem_alloc_zero_from * \see _mem_alloc_align * \see _mem_alloc_align_from * \see _mem_alloc_at * \see _msg_alloc * \see _msg_free * \see _msgpool_create_system * \see _msgq_send * \see _task_set_error * \see MESSAGE_HEADER_STRUCT */ pointer _msg_alloc_system ( _msg_size message_size ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; register INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; register MESSAGE_HEADER_STRUCT_PTR message_ptr; register MSGPOOL_STRUCT_PTR msgpool_ptr; uint_16 grow_number; _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_msg_alloc_system, message_size ); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL) { _task_set_error(MQX_COMPONENT_DOES_NOT_EXIST); _KLOGX2( KLOG_msg_alloc_system, NULL ); return(NULL); }/* Endif */ #endif message_ptr = NULL; _INT_DISABLE(); msgpool_ptr = msg_component_ptr->SMALLEST_MSGPOOL_PTR; while (msgpool_ptr != NULL) { if (msgpool_ptr->MESSAGE_SIZE >= message_size) { imsg_ptr = msgpool_ptr->MSG_FREE_LIST_PTR; if ( (imsg_ptr == NULL) && (msgpool_ptr->GROW_NUMBER) && (msgpool_ptr->MAX < msgpool_ptr->GROW_LIMIT) ) { /* Attempt to add elements to the pool */ grow_number = msgpool_ptr->GROW_NUMBER; if ( ((uint_16)(msgpool_ptr->MAX + grow_number) > msgpool_ptr->GROW_LIMIT)) { grow_number = msgpool_ptr->GROW_LIMIT - msgpool_ptr->MAX; } /* Endif */ _msgpool_add_internal(msgpool_ptr, grow_number); imsg_ptr = msgpool_ptr->MSG_FREE_LIST_PTR; } /* Endif */ if ( imsg_ptr != NULL ) { msgpool_ptr->MSG_FREE_LIST_PTR = imsg_ptr->NEXT; --msgpool_ptr->SIZE; _INT_ENABLE(); imsg_ptr->FREE = FALSE; imsg_ptr->QUEUED = FALSE; if (kernel_data->IN_ISR) { imsg_ptr->TD_PTR = NULL; } else { imsg_ptr->TD_PTR = kernel_data->ACTIVE_PTR; } /* Endif */ message_ptr = (MESSAGE_HEADER_STRUCT_PTR)&imsg_ptr->MESSAGE; message_ptr->TARGET_QID = MSGQ_NULL_QUEUE_ID; message_ptr->SOURCE_QID = MSGQ_NULL_QUEUE_ID; message_ptr->SIZE = message_size; message_ptr->CONTROL = MSG_HDR_ENDIAN | MSG_DATA_ENDIAN; _KLOGX2(KLOG_msg_alloc_system, message_ptr); return (pointer)message_ptr; } /* Endif */ } /* Endif */ msgpool_ptr = msgpool_ptr->NEXT_MSGPOOL_PTR; } /* Endwhile */ _int_enable(); _KLOGX2(KLOG_msg_alloc_system, message_ptr); return (pointer)message_ptr; } /* Endbody */
/*! * \brief Tests all the periodic queues and their lightweight timers for * validity and consistency. * * \param[out] period_error_ptr Pointer to the first periodic queue that has * an error (NULL if no error is found). * \param[out] timer_error_ptr Pointer to the first timer that has an error * (NULL if no error is found). * * \return MQX_OK (No periodic queues have been created or no errors found * in any periodic queues or timers.) * \return MQX_LWTIMER_INVALID (Period_ptr points to an invalid periodic queue.) * \return Error from _queue_test() (A periodic queue or its queue was in error.) * * \see _lwtimer_add_timer_to_queue * \see _lwtimer_cancel_period * \see _lwtimer_cancel_timer * \see _lwtimer_create_periodic_queue */ _mqx_uint _lwtimer_test ( void **period_error_ptr, void **timer_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWTIMER_STRUCT_PTR timer_ptr; LWTIMER_PERIOD_STRUCT_PTR period_ptr; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwtimer_test, period_error_ptr, timer_error_ptr); *period_error_ptr = NULL; *timer_error_ptr = NULL; /* * It is not considered an error if the lwtimer component has not been * created yet */ if (kernel_data->LWTIMERS.NEXT == NULL) { return (MQX_OK); } /* Endif */ result = _queue_test(&kernel_data->LWTIMERS, period_error_ptr); if (result != MQX_OK) { _KLOGX3(KLOG_lwtimer_test, result, *period_error_ptr); return (result); } /* Endif */ _int_disable(); period_ptr = (void *) kernel_data->LWTIMERS.NEXT; while ((void *) period_ptr != (void *) &kernel_data->LWTIMERS) { if (period_ptr->VALID != LWTIMER_VALID) { _int_enable(); *period_error_ptr = period_ptr; _KLOGX3(KLOG_lwtimer_test, MQX_LWTIMER_INVALID, period_ptr); return (MQX_LWTIMER_INVALID); } /* Endif */ result = _queue_test(&period_ptr->TIMERS, timer_error_ptr); if (result != MQX_OK) { _int_enable(); *period_error_ptr = period_ptr; _KLOGX4(KLOG_lwtimer_test, result, *period_error_ptr, *timer_error_ptr); return (result); } /* Endif */ timer_ptr = (void *) period_ptr->TIMERS.NEXT; while (timer_ptr != (void *) &period_ptr->TIMERS) { if (timer_ptr->VALID != LWTIMER_VALID) { *period_error_ptr = period_ptr; *timer_error_ptr = timer_ptr; _KLOGX4(KLOG_lwtimer_test, MQX_LWTIMER_INVALID, period_ptr, timer_ptr); return (MQX_LWTIMER_INVALID); } /* Endif */ timer_ptr = (void *) timer_ptr->LINK.NEXT; } /* Endwhile */ period_ptr = (void *) period_ptr->LINK.NEXT; } /* Endwhile */ _int_enable(); _KLOGX2(KLOG_lwtimer_test, MQX_OK); return (MQX_OK); } /* Endbody */
_mqx_uint _task_set_priority ( /* [IN] the task id to use */ _task_id task_id, /* [IN] the new task priority */ _mqx_uint new_priority, /* [OUT] the location where the old task priority is to be placed */ _mqx_uint_ptr priority_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; READY_Q_STRUCT_PTR ready_q_ptr; TD_STRUCT_PTR td_ptr; TASK_QUEUE_STRUCT_PTR task_queue_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_task_set_priority, task_id, new_priority); #if MQX_CHECK_ERRORS if (new_priority > kernel_data->LOWEST_TASK_PRIORITY) { _KLOGX2(KLOG_task_set_priority, MQX_INVALID_PARAMETER); return(MQX_INVALID_PARAMETER); }/* Endif */ #endif td_ptr = (TD_STRUCT_PTR)_task_get_td(task_id); if (td_ptr == NULL) { _KLOGX2(KLOG_task_set_priority, MQX_INVALID_TASK_ID); return(MQX_INVALID_TASK_ID); } /* Endif */ _int_disable(); /* Return old priority */ *priority_ptr = td_ptr->HOME_QUEUE->PRIORITY; /* Make the change permanent */ ready_q_ptr = kernel_data->READY_Q_LIST; td_ptr->HOME_QUEUE = ready_q_ptr - new_priority; if (td_ptr->BOOSTED) { /* Can only change priority to a higher (lower value) */ if (new_priority < td_ptr->MY_QUEUE->PRIORITY) { /* Move the task to the correct priority level */ _sched_set_priority_internal(td_ptr, new_priority); } /* Endif */ } else { /* Move the task to the correct priority level */ _sched_set_priority_internal(td_ptr, new_priority); } /* Endif */ if (td_ptr->STATE == TASK_QUEUE_BLOCKED) { task_queue_ptr = (TASK_QUEUE_STRUCT_PTR) ((uchar_ptr)td_ptr->INFO - FIELD_OFFSET(TASK_QUEUE_STRUCT, TD_QUEUE)); if (task_queue_ptr->POLICY & MQX_TASK_QUEUE_BY_PRIORITY) { /* Requeue the td by priority */ _QUEUE_REMOVE(&task_queue_ptr->TD_QUEUE, td_ptr); _sched_insert_priorityq_internal(&task_queue_ptr->TD_QUEUE, td_ptr); }/* Endif */ }/* Endif */ /* Allow higher priority tasks to run */ _CHECK_RUN_SCHEDULER(); _int_enable(); _KLOGX2(KLOG_task_set_priority, MQX_OK); return MQX_OK; } /* Endbody */
/*! * \brief Used by a task to set the specified event bits in an event. * * \param[in] event_ptr Pointer to the lightweight event to set bits in. * \param[in] bit_mask Bit mask. Each bit represents an event bit to be set. * * \return MQX_OK * \return MQX_LWEVENT_INVALID (Lightweight event was invalid.) * * \see _lwevent_create * \see _lwevent_destroy * \see _lwevent_set_auto_clear * \see _lwevent_clear * \see _lwevent_test * \see _lwevent_wait_for * \see _lwevent_wait_ticks * \see _lwevent_wait_until * \see _lwevent_get_signalled * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_set ( LWEVENT_STRUCT_PTR event_ptr, _mqx_uint bit_mask ) { KERNEL_DATA_STRUCT_PTR kernel_data; QUEUE_ELEMENT_STRUCT_PTR q_ptr; QUEUE_ELEMENT_STRUCT_PTR next_q_ptr; TD_STRUCT_PTR td_ptr; _mqx_uint set_bits; #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwevent_set(event_ptr, bit_mask); } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwevent_set, event_ptr, bit_mask); _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); _KLOGX2(KLOG_lwevent_set, MQX_LWEVENT_INVALID); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif set_bits = event_ptr->VALUE | bit_mask; if (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { /* Schedule waiting task(s) to run if bits ok */ q_ptr = event_ptr->WAITING_TASKS.NEXT; while (q_ptr != (QUEUE_ELEMENT_STRUCT_PTR) ((void *) &event_ptr->WAITING_TASKS)) { td_ptr = (void *) q_ptr; _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE); next_q_ptr = q_ptr->NEXT; if (((td_ptr->FLAGS & TASK_LWEVENT_ALL_BITS_WANTED) && ((td_ptr->LWEVENT_BITS & set_bits) == td_ptr->LWEVENT_BITS)) || ((!(td_ptr->FLAGS & TASK_LWEVENT_ALL_BITS_WANTED)) && (td_ptr->LWEVENT_BITS & set_bits))) { _QUEUE_REMOVE(&event_ptr->WAITING_TASKS, q_ptr); _TIME_DEQUEUE(td_ptr, kernel_data); td_ptr->INFO = 0; _TASK_READY(td_ptr, kernel_data); /* store information about which bits caused task to be unblocked */ td_ptr->LWEVENT_BITS &= set_bits; set_bits &= ~(event_ptr->AUTO & td_ptr->LWEVENT_BITS); } /* Endif */ q_ptr = next_q_ptr; } /* Endwhile */ } /* Endif */ event_ptr->VALUE = set_bits; _INT_ENABLE(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); _KLOGX2(KLOG_lwevent_set, MQX_OK); return (MQX_OK); }
_pool_id _msgpool_create_internal ( /* [IN] size of the messages being created */ uint_16 message_size, /* [IN] no. of messages in this pool */ uint_16 num_messages, /* [IN] no. of messages to grow pool by if empty */ uint_16 grow_number, /* [IN] maximum number of messages allowed in pool */ uint_16 grow_limit, /* [IN] whether this is a system pool or a regular pool */ _mqx_uint pool_type ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; register MSGPOOL_STRUCT_PTR msgpool_ptr; register MSGPOOL_STRUCT_PTR temp_msgpool_ptr; register MSGPOOL_STRUCT_PTR prev_msgpool_ptr; register _mqx_uint i; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (message_size < sizeof(MESSAGE_HEADER_STRUCT)) { _task_set_error(MSGPOOL_MESSAGE_SIZE_TOO_SMALL); return ((_pool_id) 0); } /* Endif */ #endif /* ** Try to find an available slot in the array of msgpools for a new pool ** if MAX_MSGPOOLS_EVER has not yet reached MAX_MSGPOOLS then ** simply use MAX_MSGPOOLS_EVER as an index value and then increment it ** but if MAX_MSGPOOLS_EVER has reached MAX_MSGPOOLS then ** go back and search through the previously assigned headers to see ** if one has been deallocated and is available for use */ if (msg_component_ptr == NULL) { result = _msg_create_component(); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (msg_component_ptr == NULL) { _task_set_error(result); return ((_pool_id)0); } /* Endif */ #endif } /* Endif */ _int_disable(); if (msg_component_ptr->MAX_MSGPOOLS_EVER >= msg_component_ptr->MAX_MSGPOOLS) { msgpool_ptr = &msg_component_ptr->MSGPOOLS_PTR[0]; for ( i=0; i < msg_component_ptr->MAX_MSGPOOLS; i++ ) { if (msgpool_ptr->VALID != MSG_VALID) { break; } /* Endif */ msgpool_ptr++; } /* Endfor */ if (i == msg_component_ptr->MAX_MSGPOOLS) { _int_enable(); _task_set_error(MSGPOOL_OUT_OF_POOLS); return ((_pool_id)0); } /* Endif */ } else { msgpool_ptr = &msg_component_ptr->MSGPOOLS_PTR[ msg_component_ptr->MAX_MSGPOOLS_EVER++]; } /* Endif */ msgpool_ptr->VALID = MSG_VALID; msgpool_ptr->MESSAGE_SIZE = message_size; msgpool_ptr->GROW_NUMBER = 0; _int_enable(); msgpool_ptr->MSGPOOL_BLOCK_PTR = NULL; msgpool_ptr->MSG_FREE_LIST_PTR = NULL; msgpool_ptr->NEXT_MSGPOOL_PTR = NULL; msgpool_ptr->SIZE = 0; msgpool_ptr->MAX = 0; if ( grow_number == 0 ) { msgpool_ptr->GROW_LIMIT = num_messages; } else if (grow_limit == 0) { msgpool_ptr->GROW_LIMIT = 0xFFFF; } else { msgpool_ptr->GROW_LIMIT = grow_limit; } /* Endif */ msgpool_ptr->MSGPOOL_TYPE = pool_type; if (num_messages) { _msgpool_add_internal(msgpool_ptr, num_messages); /* no messages could be created, so abort pool creation */ if (msgpool_ptr->SIZE == 0) { msgpool_ptr->VALID = 0; _task_set_error(MQX_OUT_OF_MEMORY); return ((_pool_id)0); } /* Endif */ } /* Endif */ msgpool_ptr->GROW_NUMBER = grow_number; if ( pool_type == SYSTEM_MSG_POOL ) { /* We must insert the pool into the link list of system message pools, ** by order of size, smallest first. */ _int_disable(); prev_msgpool_ptr = msg_component_ptr->SMALLEST_MSGPOOL_PTR; if (prev_msgpool_ptr == NULL) { /* first entry in list */ msg_component_ptr->SMALLEST_MSGPOOL_PTR = msgpool_ptr; msg_component_ptr->LARGEST_MSGPOOL_PTR = msgpool_ptr; } else if (prev_msgpool_ptr->MESSAGE_SIZE >= msgpool_ptr->MESSAGE_SIZE){ /* The new pool is smaller than that at head of list */ msgpool_ptr->NEXT_MSGPOOL_PTR = prev_msgpool_ptr; msg_component_ptr->SMALLEST_MSGPOOL_PTR = msgpool_ptr; } else { temp_msgpool_ptr = prev_msgpool_ptr->NEXT_MSGPOOL_PTR; while (temp_msgpool_ptr != NULL) { /* check the relative message sizes */ if (temp_msgpool_ptr->MESSAGE_SIZE < msgpool_ptr->MESSAGE_SIZE){ /* continue to walk down linked list */ prev_msgpool_ptr = temp_msgpool_ptr; temp_msgpool_ptr = prev_msgpool_ptr->NEXT_MSGPOOL_PTR; } else { /* this entry belongs between prev_ptr and temp_msgpool_ptr */ prev_msgpool_ptr->NEXT_MSGPOOL_PTR = msgpool_ptr; msgpool_ptr->NEXT_MSGPOOL_PTR = temp_msgpool_ptr; break; } /* Endif */ } /* Endwhile */ if (temp_msgpool_ptr == NULL) { /* searched the list and this entry belongs at the bottom */ prev_msgpool_ptr->NEXT_MSGPOOL_PTR = msgpool_ptr; msg_component_ptr->LARGEST_MSGPOOL_PTR = msgpool_ptr; }/* Endif */ } /* Endif */ _int_enable(); } /* Endif */ return ((_pool_id)msgpool_ptr); } /* Endbody */
_mqx_uint _mem_extend_pool_internal ( /* [IN] the address of the start of the memory pool addition */ pointer start_of_pool, /* [IN] the size of the memory pool addition */ _mem_size size, /* [IN] the memory pool to extend */ MEMPOOL_STRUCT_PTR mem_pool_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; STOREBLOCK_STRUCT_PTR block_ptr; STOREBLOCK_STRUCT_PTR end_ptr; STOREBLOCK_STRUCT_PTR free_ptr; STOREBLOCK_STRUCT_PTR tmp_ptr; MEMPOOL_EXTENSION_STRUCT_PTR ext_ptr; uchar_ptr real_start_ptr; uchar_ptr end_of_pool; _mem_size block_size; _mem_size real_size; _mem_size free_block_size; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_ERRORS if (size < (_mem_size)(3*MQX_MIN_MEMORY_STORAGE_SIZE)) { /* Pool must be big enough to hold at least 3 memory blocks */ return(MQX_INVALID_SIZE); }/* Endif */ #endif #if MQX_CHECK_VALIDITY if (mem_pool_ptr->VALID != MEMPOOL_VALID) { return(MQX_INVALID_COMPONENT_HANDLE); }/* Endif */ #endif ext_ptr = (MEMPOOL_EXTENSION_STRUCT_PTR) _ALIGN_ADDR_TO_HIGHER_MEM(start_of_pool); real_start_ptr = (uchar_ptr)ext_ptr + sizeof(MEMPOOL_EXTENSION_STRUCT); real_start_ptr = (uchar_ptr)_ALIGN_ADDR_TO_HIGHER_MEM(real_start_ptr); end_of_pool = (uchar_ptr)start_of_pool + size; end_of_pool = (uchar_ptr)_ALIGN_ADDR_TO_LOWER_MEM(end_of_pool); real_size = (_mem_size)(end_of_pool - real_start_ptr); ext_ptr->START = start_of_pool; ext_ptr->SIZE = size; ext_ptr->REAL_START = real_start_ptr; block_ptr = (STOREBLOCK_STRUCT_PTR)real_start_ptr; block_size = MQX_MIN_MEMORY_STORAGE_SIZE; free_ptr = (STOREBLOCK_STRUCT_PTR)((uchar_ptr)block_ptr + block_size); free_block_size = real_size - (_mem_size)(2 * MQX_MIN_MEMORY_STORAGE_SIZE); end_ptr = (STOREBLOCK_STRUCT_PTR)((uchar_ptr)free_ptr + free_block_size); /* ** Make a small minimal sized memory block to be as ** the first block in the pool. This will be an in-use block ** and will thus avoid problems with memory co-allescing during ** memory frees */ block_ptr->BLOCKSIZE = block_size; block_ptr->MEM_TYPE = 0; block_ptr->USER_AREA = 0; block_ptr->PREVBLOCK = (struct storeblock_struct _PTR_)NULL; block_ptr->NEXTBLOCK = free_ptr; MARK_BLOCK_AS_USED(block_ptr, SYSTEM_TASK_ID(kernel_data)); CALC_CHECKSUM(block_ptr); /* ** Let the next block be the actual free block that will be added ** to the free list */ free_ptr->BLOCKSIZE = free_block_size; free_ptr->MEM_TYPE = 0; free_ptr->USER_AREA = 0; free_ptr->PREVBLOCK = block_ptr; free_ptr->NEXTBLOCK = end_ptr; MARK_BLOCK_AS_FREE(free_ptr); CALC_CHECKSUM(free_ptr); /* ** Set up a minimal sized block at the end of the pool, and also ** mark it as being allocated. Again this is to comply with the ** _mem_free algorithm */ end_ptr->BLOCKSIZE = block_size; end_ptr->MEM_TYPE = 0; end_ptr->USER_AREA = 0; end_ptr->PREVBLOCK = free_ptr; end_ptr->NEXTBLOCK = NULL; MARK_BLOCK_AS_USED(end_ptr, SYSTEM_TASK_ID(kernel_data)); CALC_CHECKSUM(end_ptr); _int_disable(); /* Add the block to the free list */ tmp_ptr = mem_pool_ptr->POOL_FREE_LIST_PTR; mem_pool_ptr->POOL_FREE_LIST_PTR = free_ptr; if (tmp_ptr != NULL) { PREV_FREE(tmp_ptr) = free_ptr; } /* Endif */ PREV_FREE(free_ptr) = NULL; NEXT_FREE(free_ptr) = tmp_ptr; /* Reset the free list queue walker for some other task */ mem_pool_ptr->POOL_FREE_CURRENT_BLOCK = mem_pool_ptr->POOL_FREE_LIST_PTR; /* Link in the extension */ _QUEUE_ENQUEUE(&mem_pool_ptr->EXT_LIST, &ext_ptr->LINK); _int_enable(); return(MQX_OK); } /* Endbody */
uint_32 _psp_init_readyqs ( void ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; READY_Q_STRUCT_PTR q_ptr; uint_32 priority_levels; uint_32 n; _GET_KERNEL_DATA(kernel_data); kernel_data->READY_Q_LIST = (READY_Q_STRUCT_PTR) NULL; priority_levels = kernel_data->LOWEST_TASK_PRIORITY + 2; q_ptr = (READY_Q_STRUCT_PTR)_mem_alloc_zero(sizeof(READY_Q_STRUCT) * priority_levels); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if ( q_ptr == NULL ) { return (MQX_OUT_OF_MEMORY); } /* Endif */ #endif _mem_set_type(q_ptr, MEM_TYPE_READYQ); n = priority_levels; while (n--) { q_ptr->HEAD_READY_Q = (TD_STRUCT_PTR)q_ptr; q_ptr->TAIL_READY_Q = (TD_STRUCT_PTR)q_ptr; q_ptr->PRIORITY = (uint_16)n; if (n + kernel_data->INIT.MQX_HARDWARE_INTERRUPT_LEVEL_MAX < ((1 << CORTEX_PRIOR_IMPL) - 1)) q_ptr->ENABLE_SR = CORTEX_PRIOR(n + kernel_data->INIT.MQX_HARDWARE_INTERRUPT_LEVEL_MAX); else q_ptr->ENABLE_SR = CORTEX_PRIOR((1 << CORTEX_PRIOR_IMPL) - 2); q_ptr->NEXT_Q = kernel_data->READY_Q_LIST; kernel_data->READY_Q_LIST = q_ptr++; } /* ** Set the current ready q (where the ready queue searches start) to ** the head of the list of ready queues. */ kernel_data->CURRENT_READY_Q = kernel_data->READY_Q_LIST; #if 0 /* Initialize the ENABLE_SR fields in the ready queues */ sr = 0; n = priority_levels; q_ptr = kernel_data->READY_Q_LIST; while (n--) { q_ptr->ENABLE_SR = CORTEX_PRIOR(sr); if (sr < kernel_data->INIT.MQX_HARDWARE_INTERRUPT_LEVEL_MAX) { sr++; } q_ptr = q_ptr->NEXT_Q; } #endif return MQX_OK; } /* Endbody */
/*! * \brief Destroys the private message pool. * * Any task can destroy the private message pool as long as all its messages have * been freed. * * \param[in] pool_id Pool to destroy. * * \return MQX_OK * \return MQX_COMPONENT_DOES_NOT_EXIST (Message component is not created.) * \return MSGPOOL_INVALID_POOL_ID (Pool_id does not represent a message pool * that was created by _msgpool_create().) * \return MSGPOOL_ALL_MESSAGES_NOT_FREE (All messages in the message pool have * not been freed.) * * \warning Calls _mem_free(), which on error sets the task error code. * * \see _msgpool_create * \see _msg_free * \see _msg_alloc * \see _mem_free */ _mqx_uint _msgpool_destroy ( _pool_id pool_id ) { /* Body */ #if MQX_KERNEL_LOGGING || MQX_CHECK_ERRORS KERNEL_DATA_STRUCT_PTR kernel_data; #endif #if MQX_CHECK_ERRORS MSG_COMPONENT_STRUCT_PTR msg_component_ptr; #endif MSGPOOL_STRUCT_PTR msgpool_ptr; MSGPOOL_BLOCK_STRUCT_PTR msgpool_block_ptr; MSGPOOL_BLOCK_STRUCT_PTR next_block_ptr; #if MQX_KERNEL_LOGGING || MQX_CHECK_ERRORS _GET_KERNEL_DATA(kernel_data); #endif _KLOGE2(KLOG_msgpool_destroy, pool_id); #if MQX_CHECK_ERRORS msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); if (msg_component_ptr == NULL) { _KLOGX2(KLOG_msgpool_destroy, MQX_COMPONENT_DOES_NOT_EXIST); return MQX_COMPONENT_DOES_NOT_EXIST; } /* Endif */ #endif msgpool_ptr = (MSGPOOL_STRUCT_PTR)pool_id; #if MQX_CHECK_VALIDITY if ( msgpool_ptr->VALID != MSG_VALID ) { _KLOGX2(KLOG_msgpool_destroy, MSGPOOL_INVALID_POOL_ID); return MSGPOOL_INVALID_POOL_ID; } /* Endif */ #endif _int_disable(); if (msgpool_ptr->SIZE == msgpool_ptr->MAX) { /* All messages currently returned, lets delete them */ msgpool_ptr->SIZE = 0; msgpool_ptr->GROW_NUMBER = 0; _int_enable(); msgpool_block_ptr = msgpool_ptr->MSGPOOL_BLOCK_PTR; while (msgpool_block_ptr != NULL) { next_block_ptr = msgpool_block_ptr->NEXT_BLOCK_PTR; _mem_free((void *)msgpool_block_ptr); msgpool_block_ptr = next_block_ptr; } /* Endwhile */ msgpool_ptr->MSGPOOL_BLOCK_PTR = NULL; msgpool_ptr->VALID = 0; msgpool_ptr->MSGPOOL_TYPE = 0; _KLOGX2(KLOG_msgpool_destroy, MQX_OK); return MQX_OK; } else { _int_enable(); _KLOGX2(KLOG_msgpool_destroy, MSGPOOL_ALL_MESSAGES_NOT_FREE); return MSGPOOL_ALL_MESSAGES_NOT_FREE; } /* Endif */ } /* Endbody */
/*! * \brief Tests all the message poolsin the system for consistency and validity. * * The function checks the validity of each message in each private and system * message pool. It reports the first error that it finds. * * \param[out] pool_error_ptr (Initialized only if an error is found.) If the * message in a message pool has an error; one of the following: * \li A pointer to a pool ID if the message is from a private message pool. * \li A pointer to a system message pool if the message is from a system * message pool. * \param[out] msg_error_ptr Pointer to the message that has an error * (initialized only if an error is found). * * \return MQX_OK (all messages in all message pools passed) * \return MQX_COMPONENT_DOES_NOT_EXIST (Message component is not created.) * \return MSGQ_INVALID_MESSAGE (At least one message in at least one message * pool failed.) * * \warning Disables and enables interrupts. * * \see _msgpool_create * \see _msgpool_create_system */ _mqx_uint _msgpool_test ( void **pool_error_ptr, void **msg_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; MSGPOOL_STRUCT_PTR msgpool_ptr; MSGPOOL_BLOCK_STRUCT_PTR msgpool_block_ptr; INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; _mqx_uint i,j,raw_message_size; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_msgpool_test, pool_error_ptr, msg_error_ptr); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL) { _KLOGX2(KLOG_msgpool_test, MQX_COMPONENT_DOES_NOT_EXIST); return(MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ #endif /* Check all the message pools */ msgpool_ptr = msg_component_ptr->MSGPOOLS_PTR; i = msg_component_ptr->MAX_MSGPOOLS + 1; while (--i) { _int_disable(); if (msgpool_ptr->VALID == MSG_VALID) { /* The pool has been created */ /* Search through all of the message pool blocks for this pool */ msgpool_block_ptr = msgpool_ptr->MSGPOOL_BLOCK_PTR; while (msgpool_block_ptr != NULL) { raw_message_size = msgpool_block_ptr->RAW_MESSAGE_SIZE; imsg_ptr = (INTERNAL_MESSAGE_STRUCT_PTR) msgpool_block_ptr->FIRST_IMSG_PTR; j = msgpool_block_ptr->NUM_MESSAGES + 1; while (--j) { if ((imsg_ptr->VALID != MSG_VALID) || (imsg_ptr->MSGPOOL_PTR != msgpool_ptr)) { _int_enable(); *pool_error_ptr = msgpool_ptr; *msg_error_ptr = imsg_ptr; _KLOGX4(KLOG_msgpool_test, MSGQ_INVALID_MESSAGE, msgpool_ptr, imsg_ptr); return(MSGQ_INVALID_MESSAGE); } /* Endif */ imsg_ptr =(INTERNAL_MESSAGE_STRUCT_PTR) ((unsigned char *)imsg_ptr + raw_message_size); } /* Endwhile */ msgpool_block_ptr = msgpool_block_ptr->NEXT_BLOCK_PTR; } /* Endwhile */ } /* Endif */ _int_enable(); msgpool_ptr++; } /* Endwhile */ _KLOGX2(KLOG_msgpool_test, MQX_OK); return(MQX_OK); } /* Endbody */
/*! * \brief To provide support for exception handlers, applications can use this ISR * to replace the default ISR. The ISR is specific to the PSP. * * An application calls _int_install_exception_isr() to install _int_exception_isr(). * \n The function _int_exception_isr() does the following: * \n - If an exception occurs when a task is running and a task exception ISR * exists, MQX runs the ISR; if a task exception ISR does not exist, MQX aborts * the task by calling _task_abort(). * \n - If an exception occurs when an ISR is running and an ISR exception ISR * exists, MQX aborts the running ISR and runs the ISR’s exception ISR. * \n - The function walks the interrupt stack looking for information about the * ISR or task that was running before the exception occurred. If the function * determines that the interrupt stack contains incorrect information, it calls * _mqx_fatal_error() with error code MQX_CORRUPT_INTERRUPT_STACK. * * \param[in] parameter Parameter passed to the default ISR (the vector number). * * \warning See description. * * \see _int_install_exception_isr * \see _mqx_fatal_error * \see _task_abort */ void _int_exception_isr ( pointer parameter ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; PSP_INT_CONTEXT_STRUCT_PTR exception_frame_ptr; PSP_INT_CONTEXT_STRUCT_PTR isr_frame_ptr; INTERRUPT_TABLE_STRUCT_PTR table_ptr; INT_EXCEPTION_FPTR exception_handler; uint_32 isr_vector; /* uint_32 exception_vector; */ _GET_KERNEL_DATA(kernel_data); td_ptr = kernel_data->ACTIVE_PTR; /* Stop all interrupts */ _PSP_SET_DISABLE_SR(kernel_data->DISABLE_SR); /*_int_disable(); */ if ( kernel_data->IN_ISR > 1 ) { /* We have entered this function from an exception that happened * while an isr was running. */ /* Get our current exception frame */ exception_frame_ptr = kernel_data->INTERRUPT_CONTEXT_PTR; /* the current context contains a pointer to the next one */ isr_frame_ptr = (PSP_INT_CONTEXT_STRUCT_PTR)exception_frame_ptr->PREV_CONTEXT; if (isr_frame_ptr == NULL) { /* This is not allowable */ _mqx_fatal_error(MQX_CORRUPT_INTERRUPT_STACK); } isr_vector = isr_frame_ptr->EXCEPTION_NUMBER; /* Call the isr exception handler for the ISR that WAS running */ table_ptr = kernel_data->INTERRUPT_TABLE_PTR; #if MQX_CHECK_ERRORS if ((table_ptr != NULL) && (isr_vector >= kernel_data->FIRST_USER_ISR_VECTOR) && (isr_vector <= kernel_data->LAST_USER_ISR_VECTOR)) { #endif /* Call the exception handler for the isr on isr_vector, * passing the isr_vector, the exception_vector, the isr_data and * the basic frame pointer for the exception */ exception_handler = _int_get_exception_handler(isr_vector); if (exception_handler) { (*exception_handler)(isr_vector, (_mqx_uint)parameter, _int_get_isr_data(isr_vector)/*table_ptr->APP_ISR_DATA*/, exception_frame_ptr); } #if MQX_CHECK_ERRORS } else { /* In this case, the exception occured in this handler */ _mqx_fatal_error(MQX_INVALID_VECTORED_INTERRUPT); } #endif /* Indicate we have popped 1 interrupt stack frame (the exception frame) */ --kernel_data->IN_ISR; /* Reset the stack to point to the interrupt frame */ /* And off we go. Will never return */ _psp_exception_return( (pointer)isr_frame_ptr ); } else { /* We have entered this function from an exception that happened * while a task was running. */ if (td_ptr->EXCEPTION_HANDLER_PTR != NULL ) { (*td_ptr->EXCEPTION_HANDLER_PTR)((_mqx_uint)parameter, td_ptr->STACK_PTR); } else { /* Abort the current task */ _task_abort(MQX_NULL_TASK_ID); } } }
_mqx_uint _mem_free_part ( /* [IN] the address of the memory block whose size is to change */ pointer mem_ptr, /* [IN] the new size for the block */ _mem_size requested_size ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; STOREBLOCK_STRUCT_PTR block_ptr; STOREBLOCK_STRUCT_PTR prev_block_ptr; STOREBLOCK_STRUCT_PTR next_block_ptr; STOREBLOCK_STRUCT_PTR new_block_ptr; _mem_size size; _mem_size block_size; _mem_size new_block_size; _mqx_uint result_code; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_mem_free_part, mem_ptr, requested_size); #if MQX_CHECK_ERRORS /* Make sure a correct pointer was passed in. */ if (mem_ptr == NULL) { _task_set_error(MQX_INVALID_POINTER); _KLOGX2(KLOG_mem_free_part, MQX_INVALID_POINTER); return(MQX_INVALID_POINTER); } /* Endif */ #endif /* Verify the block size */ block_ptr = GET_MEMBLOCK_PTR(mem_ptr); #if MQX_CHECK_ERRORS if (! _MEMORY_ALIGNED(block_ptr)) { _task_set_error(MQX_INVALID_POINTER); _KLOGX2(KLOG_mem_free_part, MQX_INVALID_POINTER); return(MQX_INVALID_POINTER); } /* Endif */ if ( (block_ptr->BLOCKSIZE < MQX_MIN_MEMORY_STORAGE_SIZE) || BLOCK_IS_FREE(block_ptr) ) { _task_set_error(MQX_INVALID_POINTER); kernel_data->KD_POOL.POOL_BLOCK_IN_ERROR = block_ptr; _KLOGX3(KLOG_mem_free_part, MQX_INVALID_POINTER, block_ptr); return(MQX_INVALID_POINTER); } /* Endif */ #endif #if MQX_CHECK_VALIDITY _int_disable(); if ( ! VALID_CHECKSUM(block_ptr) ) { _int_enable(); _task_set_error(MQX_INVALID_CHECKSUM); kernel_data->KD_POOL.POOL_BLOCK_IN_ERROR = block_ptr; _KLOGX3(KLOG_mem_free_part, MQX_INVALID_CHECKSUM, block_ptr); return(MQX_INVALID_CHECKSUM); } /* Endif */ _int_enable(); #endif /* Walk through the memory resources of the task descriptor. * Two pointers are maintained, one to the current block * and one to the previous block. */ next_block_ptr = (STOREBLOCK_STRUCT_PTR) kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST; prev_block_ptr = GET_MEMBLOCK_PTR(&kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST); /* Scan the task's memory resource list searching for the block to * free, Stop when the current pointer is equal to the block to free * or the end of the list is reached. */ while ( next_block_ptr && ((pointer)next_block_ptr != mem_ptr) ) { /* The block is not found, and the end of the list has not been * reached, so move down the list. */ prev_block_ptr = GET_MEMBLOCK_PTR(next_block_ptr); next_block_ptr = (STOREBLOCK_STRUCT_PTR)prev_block_ptr->NEXTBLOCK; } /* Endwhile */ #if MQX_CHECK_ERRORS if ( next_block_ptr == NULL ) { /* The specified block does not belong to the calling task. */ _task_set_error(MQX_NOT_RESOURCE_OWNER); _KLOGX2(KLOG_mem_free_part, MQX_NOT_RESOURCE_OWNER); return(MQX_NOT_RESOURCE_OWNER); } /* Endif */ #endif /* determine the size of the block. */ block_size = block_ptr->BLOCKSIZE; size = requested_size + (_mem_size)FIELD_OFFSET(STOREBLOCK_STRUCT,USER_AREA); if (size < MQX_MIN_MEMORY_STORAGE_SIZE) { size = MQX_MIN_MEMORY_STORAGE_SIZE; } /* Endif */ _MEMORY_ALIGN_VAL_LARGER(size); #if MQX_CHECK_ERRORS /* Verify that the size parameter is within range of the block size. */ if (size <= block_size) { #endif /* Adjust the size to allow for the overhead and force alignment */ /* Compute the size of the new_ block that would be created. */ new_block_size = block_size - size; /* Decide if it worthwile to split the block. If the amount of space * returned is not at least twice the size of the block overhead, * then dont bother. */ if (new_block_size >= (2*MQX_MIN_MEMORY_STORAGE_SIZE) ) { /* Create an 'inuse' block */ new_block_ptr = (STOREBLOCK_STRUCT_PTR)((char _PTR_)block_ptr + size); new_block_ptr->BLOCKSIZE = new_block_size; PREV_PHYS(new_block_ptr) = block_ptr; new_block_ptr->TASK_NUMBER = block_ptr->TASK_NUMBER; new_block_ptr->MEM_POOL_PTR = block_ptr->MEM_POOL_PTR; CALC_CHECKSUM(new_block_ptr); _int_disable(); /* Split the block */ block_ptr->BLOCKSIZE = size; CALC_CHECKSUM(block_ptr); /* make sure right physical neighbour knows about it */ block_ptr = NEXT_PHYS(new_block_ptr); PREV_PHYS(block_ptr) = new_block_ptr; CALC_CHECKSUM(block_ptr); /* Link the new block onto the requestor's task descriptor. */ new_block_ptr->NEXTBLOCK = kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST; kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST = (char _PTR_)(&new_block_ptr->USER_AREA); _int_enable(); result_code = _mem_free((pointer)&new_block_ptr->USER_AREA); } else { result_code = MQX_OK; } /* Endif */ #if MQX_CHECK_ERRORS } else { result_code = MQX_INVALID_SIZE; } /* Endif */ #endif #if MQX_CHECK_ERRORS if ( result_code != MQX_OK ) { _task_set_error(result_code); } /* Endif */ #endif _KLOGX2(KLOG_mem_free_part, result_code); return (result_code); } /* Endbody */
_mqx_uint _timer_test ( /* [OUT] the timer element in error */ pointer _PTR_ timer_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TIMER_COMPONENT_STRUCT_PTR timer_component_ptr; QUEUE_STRUCT_PTR queue_ptr; TIMER_ENTRY_STRUCT_PTR element_ptr; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_timer_test, timer_error_ptr); *timer_error_ptr = NULL; timer_component_ptr = kernel_data->KERNEL_COMPONENTS[KERNEL_TIMER]; if (timer_component_ptr == NULL) { _KLOGX2(KLOG_timer_test, MQX_OK); return(MQX_OK); } /* Endif */ /* Gain exclusive access to the timer queues */ _lwsem_wait(&timer_component_ptr->TIMER_ENTRIES_LWSEM); result = _queue_test(&timer_component_ptr->ELAPSED_TIMER_ENTRIES, timer_error_ptr); if (result != MQX_OK) { _lwsem_post(&timer_component_ptr->TIMER_ENTRIES_LWSEM); _KLOGX3(KLOG_timer_test, result, *timer_error_ptr); return(result); } /* Endif */ result = _queue_test(&timer_component_ptr->KERNEL_TIMER_ENTRIES, timer_error_ptr); if (result != MQX_OK) { _lwsem_post(&timer_component_ptr->TIMER_ENTRIES_LWSEM); _KLOGX3(KLOG_timer_test, result, *timer_error_ptr); return(result); } /* Endif */ queue_ptr = (pointer)&timer_component_ptr->ELAPSED_TIMER_ENTRIES; element_ptr = (pointer)queue_ptr->NEXT; while (element_ptr != (pointer)queue_ptr) { #if MQX_CHECK_VALIDITY if (element_ptr->VALID != TIMER_VALID) { *timer_error_ptr = element_ptr; _lwsem_post(&timer_component_ptr->TIMER_ENTRIES_LWSEM); _KLOGX3(KLOG_timer_test, MQX_INVALID_COMPONENT_HANDLE, *timer_error_ptr); return(MQX_INVALID_COMPONENT_HANDLE); } /* Endif */ #endif element_ptr = (pointer)element_ptr->QUEUE_ELEMENT.NEXT; } /* Endwhile */ queue_ptr = (pointer)&timer_component_ptr->KERNEL_TIMER_ENTRIES; element_ptr = (pointer)queue_ptr->NEXT; while (element_ptr != (pointer)queue_ptr) { #if MQX_CHECK_VALIDITY if (element_ptr->VALID != TIMER_VALID) { *timer_error_ptr = element_ptr; _lwsem_post(&timer_component_ptr->TIMER_ENTRIES_LWSEM); _KLOGX3(KLOG_timer_test, MQX_INVALID_COMPONENT_HANDLE, *timer_error_ptr); return(MQX_INVALID_COMPONENT_HANDLE); } /* Endif */ #endif element_ptr = (pointer)element_ptr->QUEUE_ELEMENT.NEXT; } /* Endwhile */ _lwsem_post(&timer_component_ptr->TIMER_ENTRIES_LWSEM); _KLOGX2(KLOG_timer_test, MQX_OK); return(MQX_OK); } /* Endbody */
/*! * \private * * \brief Used by a task to destroy an instance of a lightweight event. * * \param[in] event_ptr Pointer to the lightweight event to be deinitialized. * \param[in] user User mode * * \return MQX_OK * \return MQX_LWEVENT_INVALID (Lightweight event was not valid.) * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * * \see _lwevent_destroy * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_destroy_internal ( LWEVENT_STRUCT_PTR event_ptr, bool user ) { KERNEL_DATA_STRUCT_PTR kernel_data; #if MQX_COMPONENT_DESTRUCTION TD_STRUCT_PTR td_ptr; #endif #if MQX_ENABLE_USER_MODE if (user && !_psp_mem_check_access_mask((uint32_t)event_ptr, sizeof(LWEVENT_STRUCT), MPU_UM_R, MPU_UM_RW)) { return MQX_LWEVENT_INVALID; } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_lwevent_destroy, event_ptr); #if MQX_COMPONENT_DESTRUCTION #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwevent_destroy, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif _int_disable(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); _KLOGX2(KLOG_lwevent_destroy, MQX_LWEVENT_INVALID); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif /* Effectively stop all access to the event */ event_ptr->VALID = 0; while (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { _QUEUE_DEQUEUE(&event_ptr->WAITING_TASKS, td_ptr); _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE); _TIME_DEQUEUE(td_ptr, kernel_data); _TASK_READY(td_ptr, kernel_data); } /* Endwhile */ /* remove event from kernel LWEVENTS queue */ #if MQX_ENABLE_USER_MODE if (user) { _QUEUE_REMOVE(&kernel_data->USR_LWEVENTS, event_ptr); } else #endif { _QUEUE_REMOVE(&kernel_data->LWEVENTS, event_ptr); } _int_enable(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); #endif _KLOGX2(KLOG_lwevent_destroy, MQX_OK); return (MQX_OK); }
MQX_FILE_PTR _io_fopen ( /* [IN] the name of the device to open */ const char _PTR_ open_type_ptr, /* [IN] I/O initialization parameter to pass to the device initialization */ const char _PTR_ open_mode_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MQX_FILE_PTR file_ptr; IO_DEVICE_STRUCT_PTR dev_ptr; char _PTR_ dev_name_ptr; char _PTR_ tmp_ptr; _mqx_int result; _GET_KERNEL_DATA(kernel_data); _lwsem_wait((LWSEM_STRUCT_PTR)&kernel_data->IO_LWSEM); dev_ptr = (IO_DEVICE_STRUCT_PTR)((pointer)kernel_data->IO_DEVICES.NEXT); while (dev_ptr != (pointer)&kernel_data->IO_DEVICES.NEXT) { dev_name_ptr = dev_ptr->IDENTIFIER; tmp_ptr = (char _PTR_)open_type_ptr; while (*tmp_ptr && *dev_name_ptr && (*tmp_ptr == *dev_name_ptr)) { ++tmp_ptr; ++dev_name_ptr; } /* Endwhile */ if (*dev_name_ptr == '\0') { /* Match */ break; } /* Endif */ dev_ptr = (IO_DEVICE_STRUCT_PTR)((pointer)dev_ptr->QUEUE_ELEMENT.NEXT); } /* Endwhile */ _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->IO_LWSEM); if (dev_ptr == (pointer)&kernel_data->IO_DEVICES.NEXT) { return(NULL); } /* Endif */ file_ptr = (MQX_FILE_PTR)_mem_alloc_system_zero((_mem_size)sizeof(MQX_FILE)); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (file_ptr == NULL) { return(NULL); } /* Endif */ #endif _mem_set_type(file_ptr, MEM_TYPE_FILE_PTR); file_ptr->DEV_PTR = dev_ptr; if (dev_ptr->IO_OPEN != NULL) { result = (*dev_ptr->IO_OPEN)(file_ptr, (char _PTR_)open_type_ptr, (char _PTR_)open_mode_ptr); if (result != MQX_OK) { _task_set_error(result); _mem_free(file_ptr); return(NULL); } /* Endif */ } /* Endif */ return(file_ptr); } /* Endbody */
/*! * \private * * \brief Used by a task to create an instance of a lightweight event. * * \param[in] event_ptr Pointer representing location of the event. * \param[in] flags Flags for the light weight event. * \param[in] user User mode * * \return MQX_OK * \return MQX_EINVAL (lwevent is already initialized.) * \return MQX_LWEVENT_INVALID (In case of user mode, MQX tries to access * a lwevent with inappropriate access rights.) * * \see _lwevent_create * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_create_internal ( LWEVENT_STRUCT_PTR event_ptr, _mqx_uint flags, bool user ) { KERNEL_DATA_STRUCT_PTR kernel_data; LWEVENT_STRUCT_PTR event_chk_ptr; #if MQX_ENABLE_USER_MODE if (user && !_psp_mem_check_access_mask((uint32_t)event_ptr, sizeof(LWEVENT_STRUCT), MPU_UM_R, MPU_UM_RW) ) { return MQX_LWEVENT_INVALID; } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_lwevent_create, event_ptr); _QUEUE_INIT(&event_ptr->WAITING_TASKS, 0); event_ptr->VALUE = 0; event_ptr->FLAGS = flags; if (flags & LWEVENT_AUTO_CLEAR) event_ptr->AUTO = ~0; else event_ptr->AUTO = 0; _int_disable(); #if MQX_ENABLE_USER_MODE if (user) { if (kernel_data->USR_LWEVENTS.NEXT == NULL) { /* Initialize the light weight event queue */ _QUEUE_INIT(&kernel_data->USR_LWEVENTS, 0); } } else #endif { if (kernel_data->LWEVENTS.NEXT == NULL) { /* Initialize the light weight event queue */ _QUEUE_INIT(&kernel_data->LWEVENTS, 0); } } event_ptr->VALID = LWEVENT_VALID; #if MQX_CHECK_ERRORS /* Check if lwevent is already initialized */ #if MQX_ENABLE_USER_MODE if (user) { event_chk_ptr = (LWEVENT_STRUCT_PTR)((void *)kernel_data->USR_LWEVENTS.NEXT); while (event_chk_ptr != (LWEVENT_STRUCT_PTR)((void *)&kernel_data->USR_LWEVENTS)) { if (event_chk_ptr == event_ptr) { _int_enable(); _KLOGX2(KLOG_lwevent_create, MQX_EINVAL); return(MQX_EINVAL); } event_chk_ptr = (LWEVENT_STRUCT_PTR)((void *)event_chk_ptr->LINK.NEXT); } } else #endif { event_chk_ptr = (LWEVENT_STRUCT_PTR) ((void *) kernel_data->LWEVENTS.NEXT); while (event_chk_ptr != (LWEVENT_STRUCT_PTR) ((void *) &kernel_data->LWEVENTS)) { if (event_chk_ptr == event_ptr) { _int_enable(); _KLOGX2(KLOG_lwevent_create, MQX_EINVAL); return (MQX_EINVAL); } event_chk_ptr = (LWEVENT_STRUCT_PTR) ((void *) event_chk_ptr->LINK.NEXT); } } #endif #if MQX_ENABLE_USER_MODE if (user) { _QUEUE_ENQUEUE(&kernel_data->USR_LWEVENTS, &event_ptr->LINK); } else #endif { _QUEUE_ENQUEUE(&kernel_data->LWEVENTS, &event_ptr->LINK); } _int_enable(); _KLOGX2(KLOG_lwevent_create, MQX_OK); return (MQX_OK); }
boolean _msgq_send_internal ( /* [IN] pointer to the message being sent by application */ MESSAGE_HEADER_STRUCT_PTR msg_ptr, /* [IN] is the calling task to be blocked after the call */ boolean blocking, /* [IN] the queue to put the message onto */ _queue_id target_qid ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; register INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; register MSGQ_STRUCT_PTR msgq_ptr; register TD_STRUCT_PTR td_ptr; MESSAGE_HEADER_STRUCT_PTR tmp_msg_ptr; register _mqx_uint state; register _queue_number queue; register _processor_number pnum; /* Start CR 2191 */ boolean swapped_msg; /* End CR 2191 */ _GET_KERNEL_DATA(kernel_data); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL){ _task_set_error(MQX_COMPONENT_DOES_NOT_EXIST); return(FALSE); } /* Endif */ if (msg_ptr == NULL) { _task_set_error(MSGQ_INVALID_MESSAGE); return(FALSE); } /* Endif */ #endif imsg_ptr = GET_INTERNAL_MESSAGE_PTR(msg_ptr); #if MQX_CHECK_ERRORS if (imsg_ptr->VALID != MSG_VALID){ /* An invalid message was input by the application. */ _task_set_error(MSGQ_INVALID_MESSAGE); return FALSE; } /* Endif */ #endif #if MQX_CHECK_ERRORS if (imsg_ptr->FREE || imsg_ptr->QUEUED){ /* Trying to send a free message, or one on a message queue. */ _task_set_error(MSGQ_INVALID_MESSAGE); return FALSE; } /* Endif */ #endif pnum = PROC_NUMBER_FROM_QID(target_qid); /* If processor number is zero then the message is for this processor */ if (pnum == 0) { /* Fix up the target QID in the message header */ msg_ptr->TARGET_QID = BUILD_QID(kernel_data->INIT.PROCESSOR_NUMBER, msg_ptr->TARGET_QID); } else if (pnum != kernel_data->INIT.PROCESSOR_NUMBER) { #if MQX_IS_MULTI_PROCESSOR IPC_MSG_ROUTING_COMPONENT_STRUCT_PTR ipc_msg_comp_ptr; ipc_msg_comp_ptr = (IPC_MSG_ROUTING_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_IPC_MSG_ROUTING]; if (ipc_msg_comp_ptr && ipc_msg_comp_ptr->MSG_ROUTER) { return( (*ipc_msg_comp_ptr->MSG_ROUTER)(pnum, msg_ptr, blocking)); } else { #endif _msg_free(msg_ptr); _task_set_error(MSGQ_INVALID_QUEUE_ID); return FALSE; #if MQX_IS_MULTI_PROCESSOR }/* Endif */ #endif } /* Endif */ queue = QUEUE_FROM_QID(target_qid); #if MQX_CHECK_ERRORS if ( ! VALID_QUEUE(queue)) { _msg_free(msg_ptr); _task_set_error(MSGQ_INVALID_QUEUE_ID); return FALSE; } /* Endif */ #endif msgq_ptr = &msg_component_ptr->MSGQS_PTR[queue]; if (msgq_ptr->QUEUE != queue) { msgq_ptr = NULL; } /* Endif */ #if MQX_CHECK_ERRORS if (msgq_ptr == NULL) { _msg_free(msg_ptr); _task_set_error(MSGQ_QUEUE_IS_NOT_OPEN); return FALSE; } /* Endif */ #endif _INT_DISABLE(); if ((msgq_ptr->MAX_ENTRIES == 0) || (msgq_ptr->NO_OF_ENTRIES < msgq_ptr->MAX_ENTRIES)) { /* End CR 2265 */ /* There is room on the queue, so add the msg. We need to check for room here even if the msg ends up being short-cutted to the receiver (via td_ptr->MESSAGE) in case msg_receive needs to enqueue the msg. */ if (msgq_ptr->TYPE == MSG_QUEUE) { /* THIS MESSAGE QUEUE IS ATTACHED TO A TASK */ /* check for pending receive ** if a receive is pending then satisfy the request ** and add the receiving task onto the ready-to-run queue */ td_ptr = msgq_ptr->TD_PTR; state = td_ptr->STATE & STATE_MASK; if ( (state == RCV_ANY_BLOCKED) || ((state == RCV_SPECIFIC_BLOCKED) && (td_ptr->INFO == queue))) { /* The task is blocked, waiting for a message */ td_ptr->MESSAGE = &imsg_ptr->MESSAGE; imsg_ptr->TD_PTR = td_ptr; _TIME_DEQUEUE(td_ptr,kernel_data); _TASK_READY(td_ptr,kernel_data); /* Now run the notification function */ if (msgq_ptr->NOTIFICATION_FUNCTION != NULL) { (*msgq_ptr->NOTIFICATION_FUNCTION)(msgq_ptr->NOTIFICATION_FUNCTION_PARAMETER); } /* Endif */ if (blocking) { if ( ! kernel_data->IN_ISR) { td_ptr = kernel_data->ACTIVE_PTR; td_ptr->STATE = SEND_BLOCKED; _task_block(); } /* Endif */ } else { /* ** if the highest priority ready task is not the ** same priority as the sending task, then a higher ** priority task was made ready and it has to be allowed ** to run. */ _CHECK_RUN_SCHEDULER(); /* Let a higher priority task run */ } /* Endif */ } else { /* The task is ready to run and pre-empted OR blocked and ** on a different queue. */ /* Start CR 2191 */ swapped_msg = FALSE; /* End CR 2191 */ if ((msg_ptr->CONTROL & MSG_PRIORITY_MASK) && (td_ptr->MESSAGE != NULL)) { /* Check the message in the TD */ tmp_msg_ptr = (MESSAGE_HEADER_STRUCT_PTR)td_ptr->MESSAGE; if ( (msg_ptr->CONTROL & MSG_HDR_URGENT) || /* Urgent messages first */ ( (! (tmp_msg_ptr->CONTROL & MSG_HDR_URGENT)) && /* Start CR 621 */ ( (_mqx_uint)(tmp_msg_ptr->CONTROL & MSG_HDR_PRIORITY_MASK) < (_mqx_uint)(msg_ptr->CONTROL & MSG_HDR_PRIORITY_MASK)) /* End CR 621 */ ) ) /* Higher priority messages first */ { /* Put new message into TD */ td_ptr->MESSAGE = msg_ptr; /* Start CR 2193 */ /* Set the new message's ownership to the receiving queue's TD */ imsg_ptr = GET_INTERNAL_MESSAGE_PTR(msg_ptr); imsg_ptr->TD_PTR = td_ptr; /* Old message which we pulled from TD, need to add to queue, below */ /* End CR 2193 */ msg_ptr = tmp_msg_ptr; imsg_ptr = GET_INTERNAL_MESSAGE_PTR(msg_ptr); /* Don't know the sender's TD for the swapped out msg, so set it to NULL; */ imsg_ptr->TD_PTR = NULL; /* Start CR 2191 */ /* Indicate that a swap occurred */ swapped_msg = TRUE; /* Set the queue to the swapped msg's queue. */ if (target_qid != msg_ptr->TARGET_QID) { queue = QUEUE_FROM_QID(msg_ptr->TARGET_QID); msgq_ptr = &msg_component_ptr->MSGQS_PTR[queue]; /* This msg's queue was not full when it was short-cut, so we should not get here. Check anyway. */ if ((msgq_ptr->MAX_ENTRIES != 0) && (msgq_ptr->NO_OF_ENTRIES >= msgq_ptr->MAX_ENTRIES)) { /* Queue full, error */ _INT_ENABLE(); _msg_free(msg_ptr); _task_set_error(MSGQ_QUEUE_FULL); return FALSE; } /* Endif */ } /* Endif */ } /* Endif */ } /* Endif */ /* add the message */ _msgq_insert_message_internal(msgq_ptr, imsg_ptr, swapped_msg); if (msgq_ptr->TD_PTR){ ++(msgq_ptr->TD_PTR->MESSAGES_AVAILABLE); } /* Endif */ /* Now run the notification function */ if (msgq_ptr->NOTIFICATION_FUNCTION != NULL) { (*msgq_ptr->NOTIFICATION_FUNCTION)(msgq_ptr->NOTIFICATION_FUNCTION_PARAMETER); } /* Endif */ if (blocking && ! kernel_data->IN_ISR ) { td_ptr = kernel_data->ACTIVE_PTR; td_ptr->STATE = SEND_BLOCKED; _task_block(); } /* Endif */ } /* Endif */ } else { /* THIS IS A SYSTEM QUEUE NOT ATTACHED TO A TASK */ /* add the message to the queue */ _msgq_insert_message_internal(msgq_ptr, imsg_ptr, FALSE); /* Run the notification function. */ if ( msgq_ptr->NOTIFICATION_FUNCTION != NULL ) { (*msgq_ptr->NOTIFICATION_FUNCTION)(msgq_ptr->NOTIFICATION_FUNCTION_PARAMETER); } /* Endif */ } /* Endif */ } else { /* Queue full, error */ _INT_ENABLE(); _task_set_error(MSGQ_QUEUE_FULL); _msg_free(&imsg_ptr->MESSAGE); return FALSE; } /* Endif */ _INT_ENABLE(); return TRUE; /* Message sent MQX_OK */ } /* Endbody */
/*! * \brief Tests the event component for validity and consistency. * * \param[out] event_error_ptr Pointer to the lightweight event that has an * error if MQX found an error in the lightweight event component (NULL if no error * is found). * \param[out] td_error_ptr TD on the lightweight event in error (NULL if no * error is found). * * \return MQX_OK * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * \return MQX_LWEVENT_INVALID (A lightweight event was invalid.) * \return code from _queue_test() (Waiting queue for a lightweight event has an error.) * * \warning Cannot be called from an ISR. * * \see _lwevent_create * \see _lwevent_destroy */ _mqx_uint _lwevent_test ( void **event_error_ptr, void **td_error_ptr ) { KERNEL_DATA_STRUCT_PTR kernel_data; LWEVENT_STRUCT_PTR event_ptr; _mqx_uint result; _mqx_uint queue_size; _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_lwevent_test, event_error_ptr); *td_error_ptr = NULL; *event_error_ptr = NULL; #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwevent_test, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR); }/* Endif */ #endif /* * It is not considered an error if the lwevent component has not been * created yet */ if (kernel_data->LWEVENTS.NEXT == NULL) { return (MQX_OK); } /* Endif */ result = _queue_test((QUEUE_STRUCT_PTR) &kernel_data->LWEVENTS, event_error_ptr); if (result != MQX_OK) { _KLOGX3(KLOG_lwevent_test, result, *event_error_ptr); return (result); } /* Endif */ event_ptr = (LWEVENT_STRUCT_PTR) ((void *) kernel_data->LWEVENTS.NEXT); queue_size = _QUEUE_GET_SIZE(&kernel_data->LWEVENTS); while (queue_size--) { if (event_ptr->VALID != LWEVENT_VALID) { result = MQX_LWEVENT_INVALID; break; } /* Endif */ result = _queue_test(&event_ptr->WAITING_TASKS, td_error_ptr); if (result != MQX_OK) { break; } /* Endif */ event_ptr = (LWEVENT_STRUCT_PTR) (void *) event_ptr->LINK.NEXT; } /* Endwhile */ _int_enable(); if (result != MQX_OK) { *event_error_ptr = (void *) event_ptr; } /* Endif */ _KLOGX4(KLOG_lwevent_test, result, *event_error_ptr, *td_error_ptr); return (result); }
/** Pre initialization - initializing requested modules for basic run of MQX. */ int _bsp_pre_init(void) { KERNEL_DATA_STRUCT_PTR kernel_data; uint32_t result; _mqx_int i; /* Set the CPU type */ _mqx_set_cpu_type(MQX_CPU); /* Set the bsp exit handler, called by _mqx_exit */ _mqx_set_exit_handler(_bsp_exit_handler); /* Initialize the MCF548x support functions */ _mcf5441_initialize_support(0); /* ** Initialize the interrupt handling */ _int_set_vector_table(BSP_RAM_INTERRUPT_VECTOR_TABLE); result = _psp_int_init(BSP_FIRST_INTERRUPT_VECTOR_USED, BSP_LAST_INTERRUPT_VECTOR_USED); if (result != MQX_OK) { return result; } /* Endif */ /* Initialize the timer interrupt */ _time_set_timer_vector(BSP_TIMER_INTERRUPT_VECTOR); if (_int_install_isr(BSP_TIMER_INTERRUPT_VECTOR, _bsp_timer_isr, NULL) == NULL) { return MQX_TIMER_ISR_INSTALL_FAIL; } /* Endif */ #if BSPCFG_HAS_SRAM_POOL /* When kernel data is placed outside of the SRAM memory create new _BSP_sram_pool in the SRAM, otherwise if kernel data points to SRAM, the _BSP_sram_pool points to system pool. */ if ( (((uint32_t)__INTERNAL_SRAM_BASE) < (uint32_t)BSP_DEFAULT_START_OF_KERNEL_MEMORY) && (((uint32_t)BSP_DEFAULT_START_OF_KERNEL_MEMORY) < ((uint32_t)__INTERNAL_SRAM_BASE + (uint32_t)__INTERNAL_SRAM_SIZE))) { _BSP_sram_pool = _mem_get_system_pool_id(); } else { _BSP_sram_pool = _mem_create_pool(__SRAM_POOL, (uint32_t)__INTERNAL_SRAM_BASE + (uint32_t)__INTERNAL_SRAM_SIZE - (uint32_t)__SRAM_POOL); } #endif _GET_KERNEL_DATA(kernel_data); // Initialize the slice timer to interrupt the specified // number of times per second kernel_data->TIMER_HW_REFERENCE = _pit_init_freq(BSP_TIMER, BSP_ALARM_FREQUENCY, BSP_SYSTEM_CLOCK/2, FALSE); _time_set_hwtick_function(_pit_get_hwticks, (void *)BSP_TIMER); _time_set_hwticks_per_tick(kernel_data->TIMER_HW_REFERENCE); _time_set_ticks_per_sec(BSP_ALARM_FREQUENCY); _pit_unmask_int(BSP_TIMER); // Initialize and enable the serial UART interrupts _mcf5441_int_init(BSP_UART0_INT_VECTOR, BSP_UART0_INT_LEVEL, TRUE); _mcf5441_int_init(BSP_UART2_INT_VECTOR, BSP_UART2_INT_LEVEL, TRUE); _mcf5441_int_init(BSP_UART4_INT_VECTOR, BSP_UART4_INT_LEVEL, TRUE); _mcf5441_int_init(BSP_UART6_INT_VECTOR, BSP_UART6_INT_LEVEL, TRUE); // Install and mask the DMA interrupt handler /* _int_install_isr(BSP_ENET_DMA_INTERRUPT, _mcf5445_dma_isr, (void *)0); _mcf5445_int_init(BSP_ENET_DMA_INTERRUPT, BSP_ENET_DMA_INT_LEVEL, BSP_ENET_DMA_INT_PRIORITY, FALSE); */ // Initialize and disable the security engine interrupt // _mcf54xx_int_init(MCF548x_INT_SEC, BSP_SEC_INT_LEVEL, /*BSP_SEC_INT_PRIORITY, */FALSE); #if BSP_TRAP_EXCEPTIONS _int_install_unexpected_isr(); #endif // Always invalidate the caches even if not enabled. This allows // us to flush the cache always. If we flush before invalidating // very bad things happen. _ICACHE_INVALIDATE(); _DCACHE_INVALIDATE(); if (_mqx_monitor_type == MQX_MONITOR_TYPE_NONE) { static const PSP_MMU_INIT_STRUCT mmu_init = { /* We define the default cacheability of non-ACR mapped regions */ /* as non-cacheable and unbuffered */ MCF54XX_CACR_DDCM(MCF54XX_CACHE_NONCACHEABLE_UNBUFFERED) }; /* Initialize Cache Control Register CACR */ _mmu_init((void *)&mmu_init); /* Set up 1 instruction and 1 data ACR in two separate SDRAM areas */ /* Caution: Consider memory map in linker command file before changing regions */ /* Note: Second arg to _mmu_add_region is used in mask value in ACR */ result = _mmu_add_region(__CACHED_CODE_START, __CACHED_CODE_END - __CACHED_CODE_START, PSP_MMU_EXEC_ALLOWED); if (result != MQX_OK) return result; result = _mmu_add_region(__CACHED_DATA_START, __CACHED_DATA_END - __CACHED_DATA_START, PSP_MMU_WRITE_THROUGH); if (result != MQX_OK) return result; /* Copy ACR table into ACR registers */ _MMU_ENABLE(); /* Enable instruction cache and branch history cache in CACR */ _ICACHE_ENABLE(MCF54XX_CACR_IEC | MCF54XX_CACR_BEC); /* Enable data cache bit in CACR */ _DCACHE_ENABLE(0); } /* Endif */ #if BSPCFG_ENABLE_CPP /* initialize C++ constructors */ __cpp_init(); #endif return 0; }
_mqx_uint _lwsem_wait_ticks ( /* [IN] the semaphore address */ LWSEM_STRUCT_PTR sem_ptr, /* [IN] the number of ticks to delay, if 0, delay forever */ _mqx_uint time_in_ticks ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; _mqx_uint result; #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwsem_wait_ticks(sem_ptr, time_in_ticks); } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwsem_wait_ticks, sem_ptr, time_in_ticks); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwsem_wait_ticks, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (sem_ptr->VALID != LWSEM_VALID) { _KLOGX2(KLOG_lwsem_wait_ticks, MQX_INVALID_LWSEM); return(MQX_INVALID_LWSEM); } /* Endif */ #endif _INT_DISABLE(); if (sem_ptr->VALUE <= 0) { td_ptr = kernel_data->ACTIVE_PTR; if (time_in_ticks == 0) { td_ptr->STATE = LWSEM_BLOCKED; td_ptr->INFO = (_mqx_uint)&sem_ptr->TD_QUEUE; _QUEUE_UNLINK(td_ptr); _QUEUE_ENQUEUE(&sem_ptr->TD_QUEUE, &td_ptr->AUX_QUEUE); _sched_execute_scheduler_internal(); /* Let the other tasks run */ /* Another task has posted a semaphore, and it has been tranfered to this ** task. */ result = MQX_OK; } else { PSP_ADD_TICKS_TO_TICK_STRUCT(&kernel_data->TIME, time_in_ticks, &td_ptr->TIMEOUT); result = _lwsem_wait_timed_internal(sem_ptr, td_ptr); } /* Endif */ } else { --sem_ptr->VALUE; /* Start CR 788 */ result = MQX_OK; /* End CR 788 */ } /* Endif */ //#if MQX_COMPONENT_DESTRUCTION /* We must check for component destruction */ if (sem_ptr->VALID != LWSEM_VALID) { _int_enable(); /* The semaphore has been deleted */ _KLOGX2(KLOG_lwsem_wait_ticks, MQX_INVALID_LWSEM); return(MQX_INVALID_LWSEM); } /* Endif */ //#endif _INT_ENABLE(); _KLOGX2(KLOG_lwsem_wait_ticks, result); return(result); }
MESSAGE_HEADER_STRUCT_PTR _msgq_receive_internal ( /* [IN] id of the queue from which a message is to be received */ _queue_id queue_id, /* [IN] indication of the number of ticks which can expire before ** this request times out */ MQX_TICK_STRUCT_PTR timeout_tick_ptr, /* [IN] relative or absolute time specified in tick_ptr */ _mqx_uint mode, /* [OUT] where the error code is to be stored */ _mqx_uint_ptr error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; register TD_STRUCT_PTR td_ptr; MESSAGE_HEADER_STRUCT_PTR message_ptr; register INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; register MSGQ_STRUCT_PTR msgq_ptr; _queue_number queue; *error_ptr = MQX_OK; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _task_set_error(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); *error_ptr = MQX_CANNOT_CALL_FUNCTION_FROM_ISR; return(NULL); }/* Endif */ #endif msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL){ _task_set_error(MQX_COMPONENT_DOES_NOT_EXIST); *error_ptr = MQX_COMPONENT_DOES_NOT_EXIST; return(NULL); } /* Endif */ #endif message_ptr = NULL; td_ptr = kernel_data->ACTIVE_PTR; if (queue_id == MSGQ_ANY_QUEUE){ /* if queue_id is 0 than a receive from any queue is performed */ /* Does the task own a queue */ if (td_ptr->MSG_QUEUE_HEAD == NULL){ /* Does NOT */ _task_set_error(MSGQ_QUEUE_IS_NOT_OPEN); *error_ptr = MSGQ_QUEUE_IS_NOT_OPEN; return NULL; } /* Endif */ _INT_DISABLE(); if (td_ptr->MESSAGES_AVAILABLE == 0){ td_ptr->STATE = RCV_ANY_BLOCKED; td_ptr->INFO = queue_id; td_ptr->MESSAGE = NULL; if (mode == MSG_TIMEOUT_NONE) { _task_block(); } else if (mode == MSG_TIMEOUT_RELATIVE) { _time_delay_for(timeout_tick_ptr); } else { _time_delay_until(timeout_tick_ptr); } /* Endif */ /* ** SHORT CUT... ** The message send routine does not queue up a message in this case. ** the message is deposited directly into the task descriptor */ message_ptr = (MESSAGE_HEADER_STRUCT_PTR)td_ptr->MESSAGE; if (message_ptr == NULL){ /* A timeout has occurred */ #if MQXCFG_ENABLE_MSG_TIMEOUT_ERROR _task_set_error(MSGQ_MESSAGE_NOT_AVAILABLE); #endif } /* Endif */ td_ptr->MESSAGE = NULL; } else { /* Check all queues for an available entry .. There must be at least ** one entry available */ msgq_ptr = (MSGQ_STRUCT_PTR)td_ptr->MSG_QUEUE_HEAD; while (msgq_ptr != NULL){ if (msgq_ptr->NO_OF_ENTRIES){ /* dequeue the top entry */ DEQUEUE_TOP_MSG_ENTRY(msgq_ptr, imsg_ptr, message_ptr, td_ptr); break; } /* Endif */ msgq_ptr = msgq_ptr->NEXT_MSGQ_PTR; } /* Endwhile */ } /* Endif */ } else { /* RECEIVE from a specific qid */ queue = QUEUE_FROM_QID(queue_id); #if MQX_CHECK_ERRORS if ( (PROC_NUMBER_FROM_QID(queue_id) != kernel_data->INIT.PROCESSOR_NUMBER) || (! VALID_QUEUE(queue)) ) { _task_set_error(MSGQ_INVALID_QUEUE_ID); *error_ptr = MSGQ_INVALID_QUEUE_ID; return (pointer)message_ptr; } /* Endif */ #endif msgq_ptr = &msg_component_ptr->MSGQS_PTR[queue]; #if MQX_CHECK_ERRORS if ( msgq_ptr->QUEUE != queue ) { _task_set_error(MSGQ_QUEUE_IS_NOT_OPEN); *error_ptr = MSGQ_QUEUE_IS_NOT_OPEN; return message_ptr; } /* Endif */ if ( (msgq_ptr->TD_PTR != NULL) && (msgq_ptr->TD_PTR != td_ptr) ) { _task_set_error(MSGQ_NOT_QUEUE_OWNER); *error_ptr = MSGQ_NOT_QUEUE_OWNER; return message_ptr; } /* Endif */ #endif /* ** check the specified queue for an entry ** if not entry, then block until an entry is received or ** timeout occurs */ _INT_DISABLE(); if (msgq_ptr->NO_OF_ENTRIES == 0) { if (msgq_ptr->TD_PTR == NULL) { /* A system message queue, indicate none available */ message_ptr = NULL; } else { td_ptr->STATE = RCV_SPECIFIC_BLOCKED; td_ptr->INFO = queue; td_ptr->MESSAGE = NULL; if (mode == MSG_TIMEOUT_NONE) { _task_block(); } else if (mode == MSG_TIMEOUT_RELATIVE) { _time_delay_for(timeout_tick_ptr); } else { _time_delay_until(timeout_tick_ptr); } /* Endif */ message_ptr = (MESSAGE_HEADER_STRUCT_PTR)td_ptr->MESSAGE; if ( message_ptr == NULL ) { #if MQXCFG_ENABLE_MSG_TIMEOUT_ERROR _task_set_error(MSGQ_MESSAGE_NOT_AVAILABLE); #endif } else if ((message_ptr->TARGET_QID != queue_id) && (msgq_ptr->NO_OF_ENTRIES > 0)) { /* The original msg was swapped out in msgq_sendi() for a higher priority msg with a different target_qid. Enqueue this msg, and then dequeue the msg we need. */ register MSGQ_STRUCT_PTR tmp_msgq_ptr; register _queue_number tmp_queue; /* Get the msg's queue */ tmp_queue = QUEUE_FROM_QID(message_ptr->TARGET_QID); tmp_msgq_ptr = &msg_component_ptr->MSGQS_PTR[tmp_queue]; if ((tmp_msgq_ptr->MAX_ENTRIES == 0) || (tmp_msgq_ptr->NO_OF_ENTRIES < tmp_msgq_ptr->MAX_ENTRIES)) { /* the msg's queue has room */ imsg_ptr = GET_INTERNAL_MESSAGE_PTR(message_ptr); #if MQX_CHECK_ERRORS if (imsg_ptr->VALID != MSG_VALID){ /* An invalid message was input by the application. */ _task_set_error(MSGQ_INVALID_MESSAGE); message_ptr = NULL; } else #endif { /* enqueue the msg */ _msgq_insert_message_internal(tmp_msgq_ptr, imsg_ptr, TRUE); if (tmp_msgq_ptr->TD_PTR) { ++(tmp_msgq_ptr->TD_PTR->MESSAGES_AVAILABLE); } /* Endif */ /* now dequeue our queue's top entry */ DEQUEUE_TOP_MSG_ENTRY(msgq_ptr, imsg_ptr, message_ptr, td_ptr); } } else { /* Queue full, error - this should not happen since msgq_sendi() checks for room on the queue for all msgs, including short-cut msgs. */ _task_set_error(MSGQ_QUEUE_FULL); message_ptr = NULL; } } /* Endif */ td_ptr->MESSAGE = NULL; } /* Endif */ } else { /* dequeue the top entry */ DEQUEUE_TOP_MSG_ENTRY(msgq_ptr, imsg_ptr, message_ptr, td_ptr); } /* Endif */ } /* Endif */ _INT_ENABLE(); return message_ptr; } /* Endbody */
void _time_notify_kernel ( void ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TD_STRUCT_PTR td_ptr; register TD_STRUCT_PTR next_td_ptr; register _mqx_uint count; register _mqx_int result; _GET_KERNEL_DATA(kernel_data); /* ** Update the current time. */ PSP_INC_TICKS(&kernel_data->TIME); _INT_DISABLE(); if (kernel_data->GET_HWTICKS) { // The hardware clock may have counted passed it's reference // and have an interrupt pending. Thus, HW_TICKS may exceed // kernel_data->HW_TICKS_PER_TICK and this tick_ptr may need // normalizing. This is done in a moment. kernel_data->TIME.HW_TICKS = (*kernel_data->GET_HWTICKS) (kernel_data->GET_HWTICKS_PARAM); } /* Endif */ // The tick_ptr->HW_TICKS value might exceed the // kernel_data->HW_TICKS_PER_TICK and need to be // normalized for the PSP. PSP_NORMALIZE_TICKS(&kernel_data->TIME); /* ** Check for tasks on the timeout queue, and wake the appropriate ** ones up. The timeout queue is a time-priority queue. */ count = _QUEUE_GET_SIZE(&kernel_data->TIMEOUT_QUEUE); if (count) { td_ptr = (TD_STRUCT_PTR)((pointer)kernel_data->TIMEOUT_QUEUE.NEXT); ++count; while ( --count ) { next_td_ptr = td_ptr->TD_NEXT; result = PSP_CMP_TICKS(&kernel_data->TIME, &td_ptr->TIMEOUT); if (result >= 0) { --kernel_data->TIMEOUT_QUEUE.SIZE; _QUEUE_UNLINK(td_ptr); td_ptr->STATE &= ~IS_ON_TIMEOUT_Q; if (td_ptr->STATE & TD_IS_ON_AUX_QUEUE) { td_ptr->STATE &= ~TD_IS_ON_AUX_QUEUE; _QUEUE_REMOVE(td_ptr->INFO, &td_ptr->AUX_QUEUE); } /* Endif */ _TASK_READY(td_ptr, kernel_data); } else { break; /* No more to do */ } /* Endif */ td_ptr = next_td_ptr; } /* Endwhile */ } /* Endif */ #if MQX_HAS_TIME_SLICE /* ** Check if the currently running task is a time slice task ** and if its time has expired, put it at the end of its queue */ td_ptr = kernel_data->ACTIVE_PTR; if ( td_ptr->FLAGS & MQX_TIME_SLICE_TASK ) { PSP_INC_TICKS(&td_ptr->CURRENT_TIME_SLICE); if (! (td_ptr->FLAGS & TASK_PREEMPTION_DISABLED) ) { result = PSP_CMP_TICKS(&td_ptr->CURRENT_TIME_SLICE, &td_ptr->TIME_SLICE); if ( result >= 0 ) { _QUEUE_UNLINK(td_ptr); _TASK_READY(td_ptr,kernel_data); } /* Endif */ } /* Endif */ } /* Endif */ #endif _INT_ENABLE(); #if MQX_USE_TIMER /* If the timer component needs servicing, call its ISR function */ if (kernel_data->TIMER_COMPONENT_ISR != NULL) { (*kernel_data->TIMER_COMPONENT_ISR)(); }/* Endif */ #endif #if MQX_USE_LWTIMER /* If the lwtimer needs servicing, call its ISR function */ if (kernel_data->LWTIMER_ISR != NULL) { (*kernel_data->LWTIMER_ISR)(); }/* Endif */ #endif } /* Endbody */
_mqx_uint _event_set ( /* [IN] - An event handle returned from _event_open or _event_open_fast */ pointer users_event_ptr, /* [IN] - bit mask, each bit of which represents an event. */ _mqx_uint bit_mask ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; EVENT_STRUCT_PTR event_ptr; EVENT_COMPONENT_STRUCT_PTR event_component_ptr; EVENT_CONNECTION_STRUCT_PTR conn_ptr; EVENT_CONNECTION_STRUCT_PTR next_conn_ptr; EVENT_CONNECTION_STRUCT_PTR event_connection_ptr; TD_STRUCT_PTR new_td_ptr; _mqx_uint set_bits; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_event_set, users_event_ptr, bit_mask); event_connection_ptr = (EVENT_CONNECTION_STRUCT_PTR)users_event_ptr; #if MQX_CHECK_VALIDITY if (event_connection_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_set, EVENT_INVALID_EVENT_HANDLE); return(EVENT_INVALID_EVENT_HANDLE); } /* Endif */ #endif event_component_ptr = (EVENT_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_EVENTS]; #if MQX_CHECK_ERRORS if (event_component_ptr == NULL){ _KLOGX2(KLOG_event_set, MQX_COMPONENT_DOES_NOT_EXIST); return(MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (event_component_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_set, MQX_INVALID_COMPONENT_BASE); return(MQX_INVALID_COMPONENT_BASE); } /* Endif */ #endif #if MQX_IS_MULTI_PROCESSOR if (event_connection_ptr->REMOTE_CPU) { if (kernel_data->IPC) { /* This open is for a remote processor */ (*kernel_data->IPC)(TRUE, event_connection_ptr->REMOTE_CPU, KERNEL_EVENTS, IPC_EVENT_SET, 2, event_connection_ptr->EVENT_PTR, bit_mask); _KLOGX2(KLOG_event_set, kernel_data->ACTIVE_PTR->TASK_ERROR_CODE); return(kernel_data->ACTIVE_PTR->TASK_ERROR_CODE); } else { _KLOGX2(KLOG_event_set, EVENT_NOT_FOUND); return(EVENT_NOT_FOUND); }/* Endif */ }/* Endif */ #endif _INT_DISABLE(); event_ptr = event_connection_ptr->EVENT_PTR; #if MQX_CHECK_VALIDITY if (event_ptr->VALID != EVENT_VALID) { _INT_ENABLE(); _KLOGX2(KLOG_event_set, EVENT_INVALID_EVENT); return(EVENT_INVALID_EVENT); } /* Endif */ #endif set_bits = event_ptr->EVENT | bit_mask; if (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { /* Schedule waiting task(s) to run if bits ok */ conn_ptr = (EVENT_CONNECTION_STRUCT_PTR) ((pointer)event_ptr->WAITING_TASKS.NEXT); while (conn_ptr != (EVENT_CONNECTION_STRUCT_PTR) ((pointer)&event_ptr->WAITING_TASKS)) { next_conn_ptr = (EVENT_CONNECTION_STRUCT_PTR)conn_ptr->NEXT; if (((conn_ptr->FLAGS & EVENT_WANTS_ALL) && ((conn_ptr->MASK & set_bits) == conn_ptr->MASK)) || ((!(conn_ptr->FLAGS & EVENT_WANTS_ALL)) && (conn_ptr->MASK & set_bits))) { new_td_ptr = conn_ptr->TD_PTR; if ((new_td_ptr->STATE & STATE_MASK) == EVENT_BLOCKED) { /* He may have timed out */ conn_ptr->FLAGS |= EVENT_OCCURRED; _TIME_DEQUEUE(new_td_ptr, kernel_data); _TASK_READY(new_td_ptr, kernel_data); /* Only ready one task if event is an auto clear event */ if (event_ptr->AUTO_CLEAR) { set_bits &= ~conn_ptr->MASK; break; } /* Endif */ } /* Endif */ } /* Endif */ conn_ptr = next_conn_ptr; } /* Endwhile */ } /* Endif */ event_ptr->EVENT = set_bits; _INT_ENABLE(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); _KLOGX2(KLOG_event_set, MQX_OK); return(MQX_OK); } /* Endbody */
/*! * \brief Creates the message component. * * The function uses fields in the MQX initialization structure to create the * number of message pools (MAX_MSGPOOLS) and message queues (MAX_MSGQS). MQX * creates the message component if it is not created when an application calls * one of: * \li _msgpool_create() * \li _msgpool_create_system() * \li _msgq_open() * \li _msgq_open_system() * * \return MQX_OK * \return MQX_OUT_OF_MEMORY (MQX is out of memory.) * \return MSGPOOL_POOL_NOT_CREATED (MQX cannot allocate the data structures for * message pools.) * \return MSGQ_TOO_MANY_QUEUES (MQX cannot allocate the data structures for * message queues.) * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * \return MQX_INVALID_LWSEM (Sem_ptr is for a lightweight semaphore that is not * longer valid.) * \return MQX_LWSEM_WAIT_TIMEOUT (Timeout expired before the task could get the * lightweight semaphore.) */ _mqx_uint _msg_create_component(void) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; register MSG_COMPONENT_STRUCT_PTR msg_component_ptr; pointer pools_ptr; pointer msgqs_ptr; _mqx_uint error; _GET_KERNEL_DATA(kernel_data); _KLOGE1(KLOG_msg_create_component); error = _lwsem_wait((LWSEM_STRUCT_PTR)&kernel_data->COMPONENT_CREATE_LWSEM); #if MQX_CHECK_ERRORS if (error != MQX_OK) { _KLOGX2(KLOG_msg_create_component, error); return(error); } /* Endif */ #endif if (kernel_data->KERNEL_COMPONENTS[KERNEL_MESSAGES] != NULL) { _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->COMPONENT_CREATE_LWSEM); _KLOGX2(KLOG_msg_create_component, MQX_OK); return(MQX_OK); } /* Endif */ msg_component_ptr = (MSG_COMPONENT_STRUCT_PTR) _mem_alloc_system_zero((_mem_size)sizeof(MSG_COMPONENT_STRUCT)); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (msg_component_ptr == NULL) { _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->COMPONENT_CREATE_LWSEM); _KLOGX2(KLOG_msg_create_component, MQX_OUT_OF_MEMORY); return(MQX_OUT_OF_MEMORY); } /* Endif */ #endif _mem_set_type(msg_component_ptr, MEM_TYPE_MESSAGE_COMPONENT); if (kernel_data->INIT.MAX_MSGPOOLS == 0) { kernel_data->INIT.MAX_MSGPOOLS = 1; } /* Endif */ pools_ptr = _mem_alloc_system_zero((_mem_size)(kernel_data->INIT.MAX_MSGPOOLS * sizeof(MSGPOOL_STRUCT))); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (pools_ptr == NULL) { _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->COMPONENT_CREATE_LWSEM); _KLOGX2(KLOG_msg_create_component, MSGPOOL_POOL_NOT_CREATED); return MSGPOOL_POOL_NOT_CREATED; }/* Endif */ #endif _mem_set_type(pools_ptr, MEM_TYPE_MESSAGE_POOLS); if (kernel_data->INIT.MAX_MSGQS >= MAX_UINT_16) { kernel_data->INIT.MAX_MSGQS = MAX_UINT_16 - 1; } else if (kernel_data->INIT.MAX_MSGQS < 1) { kernel_data->INIT.MAX_MSGQS = 1; } /* Endif */ msgqs_ptr = _mem_alloc_system_zero( (_mem_size)((kernel_data->INIT.MAX_MSGQS + 1) * sizeof(MSGQ_STRUCT))); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (msgqs_ptr == NULL) { _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->COMPONENT_CREATE_LWSEM); _mem_free(pools_ptr); _KLOGX2(KLOG_msg_create_component, MSGQ_TOO_MANY_QUEUES); return MSGQ_TOO_MANY_QUEUES; } /* Endif */ #endif _mem_set_type(msgqs_ptr, MEM_TYPE_MESSAGE_QUEUES); if (msg_component_ptr->MSGPOOLS_PTR == NULL) { msg_component_ptr->MAX_MSGPOOLS_EVER = 0; msg_component_ptr->SMALLEST_MSGPOOL_PTR = NULL; msg_component_ptr->LARGEST_MSGPOOL_PTR = NULL; msg_component_ptr->MAX_MSGPOOLS = kernel_data->INIT.MAX_MSGPOOLS; msg_component_ptr->MAX_MSGQS = kernel_data->INIT.MAX_MSGQS; msg_component_ptr->MSGPOOLS_PTR = (MSGPOOL_STRUCT_PTR)pools_ptr; pools_ptr = NULL; msg_component_ptr->MSGQS_PTR = (MSGQ_STRUCT_PTR)msgqs_ptr; msgqs_ptr = NULL; }/* Endif */ msg_component_ptr->VALID = MESSAGE_VALID; kernel_data->KERNEL_COMPONENTS[KERNEL_MESSAGES] = msg_component_ptr; #if MQX_COMPONENT_DESTRUCTION kernel_data->COMPONENT_CLEANUP[KERNEL_MESSAGES] = _msg_cleanup; #endif _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->COMPONENT_CREATE_LWSEM); if (pools_ptr) { _mem_free(pools_ptr); }/* Endif */ if (msgqs_ptr) { _mem_free(msgqs_ptr); }/* Endif */ _KLOGX2(KLOG_msg_create_component, MQX_OK); return MQX_OK; } /* Endbody */
uint_32 _bsp_enable_card ( void ) { KERNEL_DATA_STRUCT_PTR kernel_data; uint_32 result; _GET_KERNEL_DATA(kernel_data); _mqx_set_cpu_type(MQX_CPU); #if MQX_EXIT_ENABLED /* Set the bsp exit handler, called by _mqx_exit */ _mqx_set_exit_handler(_bsp_exit_handler); #endif /* Memory splitter - prevent accessing both ram banks in one instruction */ _mem_alloc_at(0, (void*)0x20000000); /* === Debugging is not allowed from here === */ /* Initialize the interrupt handling */ result = _psp_int_init(BSP_FIRST_INTERRUPT_VECTOR_USED, BSP_LAST_INTERRUPT_VECTOR_USED); /* === Debugging may now resume === */ if (result != MQX_OK) { return result; } /* set possible new interrupt vector table - if MQX_ROM_VECTORS = 0 switch to ram interrupt table which was initialized in _psp_int_init) */ (void)_int_set_vector_table(BSP_INTERRUPT_VECTOR_TABLE); /* Store timer interrupt vector for debugger */ _time_set_timer_vector(BSP_TIMER_INTERRUPT_VECTOR); /* Install Timer ISR. */ if (_int_install_isr(BSP_TIMER_INTERRUPT_VECTOR, (void (_CODE_PTR_)(pointer))_bsp_systick, NULL) == NULL) { return MQX_TIMER_ISR_INSTALL_FAIL; } /** bsp low level internal initialization. ***/ _bsp_low_level_init(); /* System timer initialization */ systick_init(); /* MCG initialization and internal oscillators trimming */ if (CM_ERR_OK != _bsp_set_clock_configuration(BSP_CLOCK_CONFIGURATION_AUTOTRIM)) { return MQX_TIMER_ISR_INSTALL_FAIL; } if (CM_ERR_OK != _bsp_osc_autotrim()) { return MQX_TIMER_ISR_INSTALL_FAIL; } /* Switch to startup clock configuration */ if (CM_ERR_OK != _bsp_set_clock_configuration(BSP_CLOCK_CONFIGURATION_STARTUP)) { return MQX_TIMER_ISR_INSTALL_FAIL; } /* Initialize the system ticks */ _GET_KERNEL_DATA(kernel_data); kernel_data->TIMER_HW_REFERENCE = (BSP_SYSTEM_CLOCK / BSP_ALARM_FREQUENCY); _time_set_ticks_per_sec(BSP_ALARM_FREQUENCY); _time_set_hwticks_per_tick(kernel_data->TIMER_HW_REFERENCE); _time_set_hwtick_function(_bsp_get_hwticks, (pointer)NULL); #if MQX_ENABLE_USER_MODE _kinetis_mpu_init(); // supervisor full access, user no access for whole memory _kinetis_mpu_add_region(0, ((uchar_ptr)kernel_data->INIT.START_OF_USER_NO_MEMORY) - 1, \ MPU_WORD_M3SM(MPU_SM_RWX) | MPU_WORD_M3UM(MPU_UM_R | MPU_UM_X) | \ MPU_WORD_M2SM(MPU_SM_RWX) | MPU_WORD_M2UM(MPU_UM_R | MPU_UM_X) | \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R | MPU_UM_X) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R | MPU_UM_X)); _kinetis_mpu_add_region(((uchar_ptr)kernel_data->INIT.END_OF_USER_NO_MEMORY), (uchar_ptr)0xffffffff, \ MPU_WORD_M3SM(MPU_SM_RWX) | MPU_WORD_M3UM(MPU_UM_R | MPU_UM_X) | \ MPU_WORD_M2SM(MPU_SM_RWX) | MPU_WORD_M2UM(MPU_UM_R | MPU_UM_X) | \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R | MPU_UM_X) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R | MPU_UM_X)); // set access for user memory area #if MQX_DEFAULT_USER_ACCESS_RW // user .data RW _kinetis_mpu_add_region(kernel_data->INIT.START_OF_USER_DEFAULT_MEMORY, ((uchar_ptr)kernel_data->INIT.END_OF_USER_DEFAULT_MEMORY) - 1, \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R | MPU_UM_W) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R | MPU_UM_W)); #else // user RO - this is by default // user .data RO /*_kinetis_mpu_add_region(kernel_data->INIT.START_OF_KERNEL_AREA, kernel_data->INIT.END_OF_KERNEL_AREA, \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R)); */ #endif // MQX_DEFAULT_USER_ACCESS_RW // set access for user memory area if (0 == kernel_data->INIT.END_OF_USER_HEAP) { // create user heap automaticaly, we have specified only size of heap (end of heap is zero, start of heap mean size) LWMEM_POOL_STRUCT_PTR lwmem_pool_ptr; uchar_ptr start; //start = _lwmem_alloc((char*)kernel_data->INIT.END_OF_USER_HEAP - (char*)kernel_data->INIT.START_OF_USER_HEAP + sizeof(LWMEM_POOL_STRUCT)); start = _lwmem_alloc((uint_32)kernel_data->INIT.START_OF_USER_HEAP + sizeof(LWMEM_POOL_STRUCT)); lwmem_pool_ptr = (LWMEM_POOL_STRUCT_PTR)start; start = (pointer)((uchar_ptr)start + sizeof(LWMEM_POOL_STRUCT)); _lwmem_create_pool(lwmem_pool_ptr, start, (uint_32)kernel_data->INIT.START_OF_USER_HEAP); _mem_set_pool_access(lwmem_pool_ptr, POOL_USER_RW_ACCESS); kernel_data->KD_USER_POOL = lwmem_pool_ptr; } else { // manual user heap definition _kinetis_mpu_add_region(kernel_data->INIT.START_OF_USER_HEAP, ((uchar_ptr)kernel_data->INIT.END_OF_USER_HEAP) - 1, \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R | MPU_UM_W) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R | MPU_UM_W)); } // set access for user read-write memory area if (kernel_data->INIT.START_OF_USER_RW_MEMORY < kernel_data->INIT.END_OF_USER_RW_MEMORY) { _kinetis_mpu_add_region(kernel_data->INIT.START_OF_USER_RW_MEMORY, ((uchar_ptr)kernel_data->INIT.END_OF_USER_RW_MEMORY) - 1, \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R | MPU_UM_W) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R | MPU_UM_W)); } // set access for user read-only memory area if (kernel_data->INIT.START_OF_USER_RO_MEMORY < kernel_data->INIT.END_OF_USER_RO_MEMORY) { _kinetis_mpu_add_region(kernel_data->INIT.START_OF_USER_RO_MEMORY, ((uchar_ptr)kernel_data->INIT.END_OF_USER_RO_MEMORY) - 1, \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(MPU_UM_R) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(MPU_UM_R)); } // set access for user no access memory area if (kernel_data->INIT.START_OF_USER_NO_MEMORY < kernel_data->INIT.END_OF_USER_NO_MEMORY) { _kinetis_mpu_add_region(kernel_data->INIT.START_OF_USER_NO_MEMORY, ((uchar_ptr)kernel_data->INIT.END_OF_USER_NO_MEMORY) - 1, \ MPU_WORD_M1SM(MPU_SM_RWX) | MPU_WORD_M1UM(0) | \ MPU_WORD_M0SM(MPU_SM_RWX) | MPU_WORD_M0UM(0)); } _kinetis_mpu_enable(); #else _kinetis_mpu_disable(); #endif /* MQX_ENABLE_USER_MODE */ /* Install low power support */ #if MQX_ENABLE_LOW_POWER MC_PMPROT = MC_PMPROT_AVLP_MASK | MC_PMPROT_ALLS_MASK; // allow VLPx, LLS, disallow VLLSx _lpm_install (LPM_CPU_OPERATION_MODES, LPM_OPERATION_MODE_RUN); #endif /* MQX_ENABLE_LOW_POWER */ #if BSPCFG_ENABLE_IO_SUBSYSTEM /*------------------------------------------------------------------------*/ /* ** Initialize the I/O Sub-system */ result = _io_init(); if (result != MQX_OK) { return result; } /* Endif */ /* Initialize RTC and MQX time */ #if BSPCFG_ENABLE_RTCDEV if (MQX_OK == _bsp_rtc_io_init()) { _rtc_init (RTC_INIT_FLAG_ENABLE); _rtc_sync_with_mqx (TRUE); } #endif /* Install device drivers */ #if BSPCFG_ENABLE_TTYA _kuart_polled_install("ttya:", &_bsp_sci0_init, _bsp_sci0_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_ITTYA _kuart_int_install("ittya:", &_bsp_sci0_init, _bsp_sci0_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_TTYB _kuart_polled_install("ttyb:", &_bsp_sci1_init, _bsp_sci1_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_ITTYB _kuart_int_install("ittyb:", &_bsp_sci1_init, _bsp_sci1_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_TTYC _kuart_polled_install("ttyc:", &_bsp_sci2_init, _bsp_sci2_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_ITTYC _kuart_int_install("ittyc:", &_bsp_sci2_init, _bsp_sci2_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_TTYD _kuart_polled_install("ttyd:", &_bsp_sci3_init, _bsp_sci3_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_ITTYD _kuart_int_install("ittyd:", &_bsp_sci3_init, _bsp_sci3_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_TTYE _kuart_polled_install("ttye:", &_bsp_sci4_init, _bsp_sci4_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_ITTYE _kuart_int_install("ittye:", &_bsp_sci4_init, _bsp_sci4_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_TTYF _kuart_polled_install("ttyf:", &_bsp_sci5_init, _bsp_sci5_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_ITTYF _kuart_int_install("ittyf:", &_bsp_sci5_init, _bsp_sci5_init.QUEUE_SIZE); #endif #if BSPCFG_ENABLE_I2C0 _ki2c_polled_install("i2c0:", &_bsp_i2c0_init); #endif #if BSPCFG_ENABLE_I2C1 _ki2c_polled_install("i2c1:", &_bsp_i2c1_init); #endif #if BSPCFG_ENABLE_II2C0 _ki2c_int_install("ii2c0:", &_bsp_i2c0_init); #endif #if BSPCFG_ENABLE_II2C1 _ki2c_int_install("ii2c1:", &_bsp_i2c1_init); #endif #if BSPCFG_ENABLE_SPI0 _dspi_polled_install("spi0:", &_bsp_dspi0_init); #endif #if BSPCFG_ENABLE_ISPI0 _dspi_dma_install("ispi0:", &_bsp_dspi0_init); #endif #if BSPCFG_ENABLE_SPI1 _dspi_polled_install("spi1:", &_bsp_dspi1_init); #endif #if BSPCFG_ENABLE_ISPI1 _dspi_dma_install("ispi1:", &_bsp_dspi1_init); #endif #if BSPCFG_ENABLE_SPI2 _dspi_polled_install("spi2:", &_bsp_dspi2_init); #endif #if BSPCFG_ENABLE_ISPI2 _dspi_dma_install("ispi2:", &_bsp_dspi2_init); #endif /* Install the GPIO driver */ #if BSPCFG_ENABLE_GPIODEV _io_gpio_install("gpio:"); #endif #if BSPCFG_ENABLE_ADC0 _io_adc_install("adc0:", (pointer) &_bsp_adc0_init); #endif #if BSPCFG_ENABLE_ADC1 _io_adc_install("adc1:", (pointer) &_bsp_adc1_init); #endif #if BSPCFG_ENABLE_ESDHC _esdhc_install ("esdhc:", &_bsp_esdhc0_init); #endif /* Install the PCCard Flash drivers */ #if BSPCFG_ENABLE_PCFLASH _io_pccardflexbus_install("pccarda:", (PCCARDFLEXBUS_INIT_STRUCT _PTR_) &_bsp_cfcard_init); _io_apcflash_install("pcflasha:"); #endif #if BSPCFG_ENABLE_FLASHX _io_flashx_install("flashx:", &_bsp_flashx_init); #endif #if BSPCFG_ENABLE_IODEBUG _io_debug_install("iodebug:", &_bsp_iodebug_init); #endif #if BSPCFG_ENABLE_II2S0 _ki2s_int_install("ii2s0:", &_bsp_i2s0_init); #endif /* Initialize the default serial I/O */ _io_serial_default_init(); #endif // BSPCFG_ENABLE_IO_SUBSYSTEM return MQX_OK; }
/*! * \brief This function is called when a task is being destroyed. * * Closes all open message queues and then free all messages owned by this task. * * \param[in] td_ptr The task descriptor of the task that is being destroyed. */ void _msg_cleanup ( TD_STRUCT_PTR td_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; MSGQ_STRUCT_PTR msgq_ptr; MSGQ_STRUCT_PTR qprev_ptr; MSGPOOL_STRUCT_PTR msgpool_ptr; MSGPOOL_BLOCK_STRUCT_PTR msgpool_block_ptr; INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; INTERNAL_MESSAGE_STRUCT_PTR tmp_imsg_ptr; _mqx_uint i,j,raw_message_size; _GET_KERNEL_DATA(kernel_data); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL) { return; } /* Endif */ #endif /* Delete the message queues owned by the task, and free the messages * on the queues */ msgq_ptr = (MSGQ_STRUCT_PTR)td_ptr->MSG_QUEUE_HEAD; while (msgq_ptr != NULL) { if (msgq_ptr->TD_PTR != NULL) { msgq_ptr->TD_PTR->MESSAGES_AVAILABLE -= msgq_ptr->NO_OF_ENTRIES; } /* Endif */ imsg_ptr = msgq_ptr->FIRST_MSG_PTR; while ( imsg_ptr != NULL ) { tmp_imsg_ptr = imsg_ptr->NEXT; imsg_ptr->QUEUED = FALSE; _msg_free((pointer)&imsg_ptr->MESSAGE); imsg_ptr = tmp_imsg_ptr; } /* Endwhile */ qprev_ptr = msgq_ptr; msgq_ptr = msgq_ptr->NEXT_MSGQ_PTR; qprev_ptr->FIRST_MSG_PTR = NULL; qprev_ptr->NEXT_MSGQ_PTR = NULL; qprev_ptr->QUEUE = 0; } /* Endwhile */ td_ptr->MSG_QUEUE_HEAD = NULL; td_ptr->MSG_QUEUE_TAIL = NULL; /* Search through all of the message pools, looking for any messages * owned by this task. If found free them. */ msgpool_ptr = msg_component_ptr->MSGPOOLS_PTR; i = msg_component_ptr->MAX_MSGPOOLS + 1; while (--i) { if (msgpool_ptr->VALID == MSG_VALID) { /* Search through all of the message pool blocks for this pool */ msgpool_block_ptr = msgpool_ptr->MSGPOOL_BLOCK_PTR; while (msgpool_block_ptr != NULL) { raw_message_size = msgpool_block_ptr->RAW_MESSAGE_SIZE; imsg_ptr = (INTERNAL_MESSAGE_STRUCT_PTR) ((uchar _PTR_)msgpool_block_ptr + sizeof(MSGPOOL_BLOCK_STRUCT)); /* if the message is not free, not queued and the * owner is the task being destroyed, then Free the message */ j = msgpool_block_ptr->NUM_MESSAGES + 1; while (--j) { if ( (imsg_ptr->TD_PTR == td_ptr) && (imsg_ptr->FREE == FALSE) && (imsg_ptr->QUEUED == FALSE) ) { _msg_free(&imsg_ptr->MESSAGE); } /* Endif */ imsg_ptr =(INTERNAL_MESSAGE_STRUCT_PTR) ((uchar _PTR_)imsg_ptr + raw_message_size); } /* Endwhile */ msgpool_block_ptr = msgpool_block_ptr->NEXT_BLOCK_PTR; } /* Endwhile */ } /* Endif */ msgpool_ptr++; } /* Endwhile */ } /* Endbody */
/*! * \private * * \brief This is internal function used by a task to wait for a specified event. * * \param[in] event_ptr Read only. Pointer to the lightweight event. * \param[in] bit_mask Bit mask. Each set bit represents an event bit * to wait for. * \param[in] all TRUE (wait for all bits in bit_mask to be set), * FALSE (wait for any bit in bit_mask to be set). * \param[in] tick_ptr Pointer to the maximum number of ticks to wait * for the events to be set. If the value is NULL, then the timeout will be infinite. * \param[in] ticks_are_absolute TRUE (ticks represents absolute time), FALSE * (ticks represents relative time). * * \return MQX_OK * \return LWEVENT_WAIT_TIMEOUT (The time elapsed before an event signalled.) * \return MQX_LWEVENT_INVALID (Lightweight event is no longer valid or was never valid.) * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * * \see _lwevent_wait_for * \see _usr_lwevent_wait_for * \see _lwevent_wait_until * \see _usr_lwevent_wait_until * \see _lwevent_wait_ticks * \see _usr_lwevent_wait_ticks * \see LWEVENT_STRUCT * \see MQX_TICK_STRUCT */ _mqx_uint _lwevent_wait_internal ( LWEVENT_STRUCT_PTR event_ptr, _mqx_uint bit_mask, bool all, MQX_TICK_STRUCT_PTR tick_ptr, bool ticks_are_absolute ) { KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwevent_wait_internal, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif result = MQX_OK; td_ptr = kernel_data->ACTIVE_PTR; _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif if ( (all && (event_ptr->VALUE & bit_mask) == bit_mask) || (!all && (event_ptr->VALUE & bit_mask))) { /* store information about which bits caused task to be unblocked */ td_ptr->LWEVENT_BITS = event_ptr->VALUE & bit_mask; /* clear used automatic events */ event_ptr->VALUE &= ~(event_ptr->AUTO & bit_mask); _INT_ENABLE(); return (result); } /* Endif */ /* Must wait for a event to become available */ td_ptr->LWEVENT_BITS = bit_mask; if (all) { td_ptr->FLAGS |= TASK_LWEVENT_ALL_BITS_WANTED; } else { td_ptr->FLAGS &= ~TASK_LWEVENT_ALL_BITS_WANTED; } /* Endif */ /* Enqueue at end */ _QUEUE_ENQUEUE(&event_ptr->WAITING_TASKS, &td_ptr->AUX_QUEUE); /* Now put the task to sleep */ td_ptr->STATE = LWEVENT_BLOCKED; td_ptr->INFO = (_mqx_uint) &event_ptr->WAITING_TASKS; if (tick_ptr) { if (ticks_are_absolute) { _time_delay_until(tick_ptr); } else { _time_delay_for(tick_ptr); } /* Endif */ if (td_ptr->INFO) { /* Must have timed out */ /*_QUEUE_REMOVE(&event_ptr->WAITING_TASKS, &td_ptr->AUX_QUEUE);*/ result = LWEVENT_WAIT_TIMEOUT; } /* Endif */ } else { _task_block(); } /* Endif */ #if MQX_COMPONENT_DESTRUCTION if (event_ptr->VALID == 0) { /* We've been deleted */ result = MQX_LWEVENT_INVALID; } /* Endif */ #endif _INT_ENABLE(); return (result); }
/*! * \brief Allocates a message from the private message pool. * * The size of the message is determined by the message size that a task * specified when it called _msgpool_create(). The message is a resource of the * task until the task either frees it (_msg_free()) or puts it on a message * queue (_msgq_send() family of functions.) * * \param[in] pool A pool ID from _msgpool_create(). * * \return Pointer to a message (Success.) * \return NULL (Failure.) * * \warning On failure, calls _task_set_error() to set one the following task * error codes: * \li MQX_COMPONENT_DOES_NOT_EXIST (Message component is not created.) * \li MSGPOOL_INVALID_POOL_ID (Pool_id is not valid.) * \li MSGPOOL_OUT_OF_MESSAGES (All the messages in the pool are allocated.) * \li Task error codes from _mem_alloc_system() (If MQX needs to grow the pool.) * * \see _msg_alloc_system * \see _msg_free * \see _msgpool_create * \see _msgpool_destroy * \see _task_set_error * \see _mem_alloc * \see _mem_alloc_from * \see _mem_alloc_system * \see _mem_alloc_system_from * \see _mem_alloc_system_zero * \see _mem_alloc_system_zero_from * \see _mem_alloc_zero * \see _mem_alloc_zero_from * \see _mem_alloc_align * \see _mem_alloc_align_from * \see _mem_alloc_at * \see MESSAGE_HEADER_STRUCT */ pointer _msg_alloc ( _pool_id pool ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; #if MQX_CHECK_ERRORS MSG_COMPONENT_STRUCT_PTR msg_component_ptr; #endif register INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; register MESSAGE_HEADER_STRUCT_PTR message_ptr; register MSGPOOL_STRUCT_PTR msgpool_ptr; uint_16 grow_number; _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_msg_alloc, pool); #if MQX_CHECK_ERRORS msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); if (msg_component_ptr == NULL) { _task_set_error(MQX_COMPONENT_DOES_NOT_EXIST); _KLOGX3(KLOG_msg_alloc, NULL, MQX_COMPONENT_DOES_NOT_EXIST); return(NULL); } /* Endif */ #endif message_ptr = NULL; msgpool_ptr = (MSGPOOL_STRUCT_PTR)pool; #if MQX_CHECK_VALIDITY || MQX_CHECK_ERRORS if ( #if MQX_CHECK_VALIDITY (msgpool_ptr->VALID != MSG_VALID) #if MQX_CHECK_ERRORS || #endif #endif #if MQX_CHECK_ERRORS (msgpool_ptr->MSGPOOL_TYPE != MSG_POOL) #endif ) { _task_set_error(MSGPOOL_INVALID_POOL_ID); _KLOGX3(KLOG_msg_alloc, message_ptr, MSGPOOL_INVALID_POOL_ID); return (message_ptr); } /* Endif */ #endif if ( (msgpool_ptr->SIZE == 0) && (msgpool_ptr->GROW_NUMBER) && ( (msgpool_ptr->MAX < msgpool_ptr->GROW_LIMIT) || (msgpool_ptr->GROW_LIMIT == 0) ) ) { /* Attempt to add elements to the pool */ grow_number = msgpool_ptr->GROW_NUMBER; if (grow_number > (uint_16)(msgpool_ptr->GROW_LIMIT - msgpool_ptr->MAX)) { grow_number = msgpool_ptr->GROW_LIMIT - msgpool_ptr->MAX; } /* Endif */ _msgpool_add_internal(msgpool_ptr, grow_number); } /* Endif */ _INT_DISABLE(); imsg_ptr = msgpool_ptr->MSG_FREE_LIST_PTR; if (imsg_ptr == NULL) { _int_enable(); _task_set_error(MSGPOOL_OUT_OF_MESSAGES); _KLOGX3(KLOG_msg_alloc, message_ptr, MSGPOOL_OUT_OF_MESSAGES); return((pointer)message_ptr); } /* Endif */ msgpool_ptr->MSG_FREE_LIST_PTR = imsg_ptr->NEXT; --msgpool_ptr->SIZE; _INT_ENABLE(); imsg_ptr->FREE = FALSE; imsg_ptr->QUEUED = FALSE; if (kernel_data->IN_ISR) { imsg_ptr->TD_PTR = NULL; } else { imsg_ptr->TD_PTR = kernel_data->ACTIVE_PTR; } /* Endif */ message_ptr = (MESSAGE_HEADER_STRUCT_PTR)&imsg_ptr->MESSAGE; message_ptr->TARGET_QID = MSGQ_NULL_QUEUE_ID; message_ptr->SOURCE_QID = MSGQ_NULL_QUEUE_ID; message_ptr->SIZE = msgpool_ptr->MESSAGE_SIZE; message_ptr->CONTROL = MSG_HDR_ENDIAN | MSG_DATA_ENDIAN; _KLOGX3(KLOG_msg_alloc, message_ptr, MQX_OK); return (pointer)message_ptr; } /* Endbody */
/*! * \brief Tests the lightweight log component for consistency. * * \param[out] log_error_ptr Pointer to the lightweight log if error is found (NULL * if no error is found). * * \return MQX_OK Lightweight log component data is valid (Log_error_ptr is NULL.). * \return LOG_INVALID Information for a specific lightweight log is not valid * (Log_error_ptr contains log number of the first invalid lightweight log.). * \return MQX_INVALID_POINTER Log_error_ptr is NULL. * \return MQX_INVALID_COMPONENT_BASE Lightweight log component data is not valid * (Log_error_ptr is NULL.). * * \see _lwlog_create_component * \see _lwlog_create * \see _lwlog_create_at */ _mqx_uint _lwlog_test ( _mqx_uint *log_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWLOG_COMPONENT_STRUCT_PTR log_component_ptr; LWLOG_HEADER_STRUCT_PTR log_header_ptr; LWLOG_ENTRY_STRUCT_PTR entry_ptr; unsigned char *min_ptr; unsigned char *max_ptr; _mqx_uint i, j; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_ERRORS if ((void *) log_error_ptr == NULL) { return MQX_INVALID_POINTER; } /* Endif */ #endif *log_error_ptr = 0; log_component_ptr = (LWLOG_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_LWLOG]; if (log_component_ptr == NULL) { return (MQX_OK); } /* Endif */ _int_disable(); #if MQX_CHECK_VALIDITY if (log_component_ptr->VALID != LWLOG_VALID) { _int_enable(); return (MQX_INVALID_COMPONENT_BASE); } /* Endif */ #endif for (i = 0; i < LOG_MAXIMUM_NUMBER; i++) { log_header_ptr = log_component_ptr->LOGS[i]; if (log_header_ptr != NULL) { /* Verify the log pointers */ min_ptr = (unsigned char *) log_header_ptr + sizeof(LWLOG_HEADER_STRUCT_PTR); max_ptr = min_ptr + sizeof(LWLOG_ENTRY_STRUCT) * log_header_ptr->MAX_ENTRIES; if (((unsigned char *) log_header_ptr->READ_PTR < min_ptr) || ((unsigned char *) log_header_ptr->READ_PTR >= max_ptr) || ((unsigned char *) log_header_ptr->WRITE_PTR < min_ptr) || ((unsigned char *) log_header_ptr->WRITE_PTR >= max_ptr) || ((unsigned char *) log_header_ptr->OLDEST_PTR < min_ptr) || ((unsigned char *) log_header_ptr->OLDEST_PTR >= max_ptr)) { _int_enable(); *log_error_ptr = i; return (LOG_INVALID); } /* Endif */ /* Check each entry in the log */ entry_ptr = &log_header_ptr->FIRST_ENTRY; j = log_header_ptr->MAX_ENTRIES; while (entry_ptr->NEXT_PTR && j) { entry_ptr = entry_ptr->NEXT_PTR; --j; if (((unsigned char *) entry_ptr < min_ptr) || ((unsigned char *) entry_ptr >= max_ptr)) { _int_enable(); *log_error_ptr = i; return (LOG_INVALID); } /* Endif */ } /* Endwhile */ } /* Endif */ } /* Endfor */ _int_enable(); return (MQX_OK); } /* Endbody */