void *_io_set_handle ( /* [IN] which I/O handle to modify */ _mqx_uint stdio_type, /* [IN] the new I/O handle */ void *new_file_ptr ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TD_STRUCT_PTR active_ptr; register void *old_file_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_io_set_handle, stdio_type, new_file_ptr); switch ( (uint16_t)stdio_type ) { case IO_PROC_STDIN: old_file_ptr = kernel_data->PROCESSOR_STDIN; kernel_data->PROCESSOR_STDIN = new_file_ptr; break; case IO_PROC_STDOUT: old_file_ptr = kernel_data->PROCESSOR_STDOUT; kernel_data->PROCESSOR_STDOUT = new_file_ptr; break; case IO_PROC_STDERR: old_file_ptr = kernel_data->PROCESSOR_STDERR; kernel_data->PROCESSOR_STDERR = new_file_ptr; break; case IO_STDIN: active_ptr = kernel_data->ACTIVE_PTR; old_file_ptr = active_ptr->STDIN_STREAM; active_ptr->STDIN_STREAM = new_file_ptr; break; case IO_STDOUT: active_ptr = kernel_data->ACTIVE_PTR; old_file_ptr = active_ptr->STDOUT_STREAM; active_ptr->STDOUT_STREAM = new_file_ptr; break; case IO_STDERR: active_ptr = kernel_data->ACTIVE_PTR; old_file_ptr = active_ptr->STDERR_STREAM; active_ptr->STDERR_STREAM = new_file_ptr; break; default: old_file_ptr = NULL; } /* Endswitch */ _KLOGX2(KLOG_io_set_handle, old_file_ptr); return (old_file_ptr); } /* Endbody */
_mqx_uint _taskq_resume ( /* [IN] the task queue handle */ pointer users_task_queue_ptr, /* [IN] TRUE if all tasks on the queue to be resumed */ boolean all_tasks ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TD_STRUCT_PTR td_ptr; register TASK_QUEUE_STRUCT_PTR task_queue_ptr = (TASK_QUEUE_STRUCT_PTR)users_task_queue_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_taskq_resume, users_task_queue_ptr, all_tasks); #if MQX_CHECK_ERRORS if (task_queue_ptr == NULL){ _KLOGX2(KLOG_taskq_resume, MQX_INVALID_TASK_QUEUE); return(MQX_INVALID_TASK_QUEUE); } /* Endif */ #endif _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (task_queue_ptr->VALID != TASK_QUEUE_VALID) { _int_enable(); _KLOGX2(KLOG_taskq_resume, MQX_INVALID_TASK_QUEUE); return(MQX_INVALID_TASK_QUEUE); } /* Endif */ #endif if (_QUEUE_GET_SIZE(&task_queue_ptr->TD_QUEUE) == 0) { /* Task queue is empty */ _int_enable(); _KLOGX2(KLOG_taskq_resume, MQX_TASK_QUEUE_EMPTY); return(MQX_TASK_QUEUE_EMPTY); } /* Endif */ if (all_tasks) { while (_QUEUE_GET_SIZE(&task_queue_ptr->TD_QUEUE)) { _QUEUE_DEQUEUE(&task_queue_ptr->TD_QUEUE, td_ptr); _TASK_READY(td_ptr, kernel_data); } /* Endwhile */ } else { _QUEUE_DEQUEUE(&task_queue_ptr->TD_QUEUE, td_ptr); _TASK_READY(td_ptr, kernel_data); } /* Endif */ _INT_ENABLE(); _CHECK_RUN_SCHEDULER();/* Let higher priority task run */ _KLOGX2(KLOG_taskq_resume, MQX_OK); return( MQX_OK ); } /* Endbody */
/*! * \brief Gets the number that is associated with the name in the names database. * * \param[in] name Pointer to the name for which to get the associated number. * \param[out] number_ptr Pointer to the number. * * \return MQX_OK * \return MQX_COMPONENT_DOES_NOT_EXIST (Name component is not created.) * \return NAME_TOO_SHORT (Name is 0 length string.) * \return NAME_TOO_LONG (Name is longer than NAME_MAX_NAME_SIZE.) * \return MQX_INVALID_COMPONENT_BASE (Name component data are not valid.) * \return NAME_NOT_FOUND (Name is not in the names database.) * * \see _name_add * \see _name_create_component * \see _name_delete */ _mqx_uint _name_find ( char *name, _mqx_max_type_ptr number_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; void *handle; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_name_find, name, number_ptr); handle = kernel_data->KERNEL_COMPONENTS[KERNEL_NAME_MANAGEMENT]; #if MQX_CHECK_ERRORS if (handle == NULL) { _KLOGX2(KLOG_name_find, MQX_COMPONENT_DOES_NOT_EXIST); return (MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ #endif /* MQX_CHECK_ERRORS */ result = _name_find_internal(handle, name, number_ptr); _KLOGX3(KLOG_name_find, result, *number_ptr); return (result); } /* Endbody */
/*! * \brief Gets the name that is associated with the number in the names database. * * The function finds the first entry in the database that matches the number * and returns its name. * * \param[in] number Number for which to get the associated name. * \param[out] name_ptr Pointer to the name. * * \return MQX_OK * \return MQX_COMPONENT_DOES_NOT_EXIST (Name component is not created.) * \return MQX_INVALID_COMPONENT_BASE (Name component data are not valid.) * \return NAME_NOT_FOUND (Name is not in the names database.) * * \see _name_add * \see _name_create_component * \see _name_delete */ _mqx_uint _name_find_by_number ( _mqx_max_type number, char *name_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; void *handle; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_name_find_name, number, name_ptr); handle = kernel_data->KERNEL_COMPONENTS[KERNEL_NAME_MANAGEMENT]; if (handle == NULL) { _KLOGX2(KLOG_name_find_name, MQX_COMPONENT_DOES_NOT_EXIST); return (MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ result = _name_find_name_internal(handle, number, name_ptr); _KLOGX2(KLOG_name_find_name, result); return (result); } /* Endbody */
/*! * \brief Used by a task to clear the specified event bits in the ligtweight event. * * \param[in] event_ptr Pointer to the event. * \param[in] bit_mask Bit mask. Each bit represents an event bit to clear. * * \return MQX_OK * \return MQX_LWEVENT_INVALID (Lightweight event is not valid.) * * \see _lwevent_create * \see _lwevent_destroy * \see _lwevent_set * \see _lwevent_set_auto_clear * \see _lwevent_test * \see _lwevent_wait_for * \see _lwevent_wait_ticks * \see _lwevent_wait_until * \see _lwevent_get_signalled * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_clear ( LWEVENT_STRUCT_PTR event_ptr, _mqx_uint bit_mask ) { _KLOGM(KERNEL_DATA_STRUCT_PTR kernel_data); #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwevent_clear(event_ptr, bit_mask); } #endif _KLOGM(_GET_KERNEL_DATA(kernel_data)); _KLOGE3(KLOG_lwevent_clear, event_ptr, bit_mask); _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); _KLOGX2(KLOG_lwevent_clear, MQX_LWEVENT_INVALID); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif event_ptr->VALUE &= ~bit_mask; _INT_ENABLE(); _KLOGX2(KLOG_lwevent_clear, MQX_OK); return (MQX_OK); }
_mqx_uint _lwsem_wait_for ( /* [IN] the semaphore address */ LWSEM_STRUCT_PTR sem_ptr, /* [IN] the number of ticks to delay */ MQX_TICK_STRUCT_PTR ticks ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwsem_wait_for, sem_ptr, ticks); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwsem_wait_for, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (sem_ptr->VALID != LWSEM_VALID) { _KLOGX2(KLOG_lwsem_wait_for, MQX_INVALID_LWSEM); return(MQX_INVALID_LWSEM); } /* Endif */ #endif _INT_DISABLE(); if (sem_ptr->VALUE <= 0) { td_ptr = kernel_data->ACTIVE_PTR; /* Calculate time to wake up the task */ PSP_ADD_TICKS(ticks, &kernel_data->TIME, &td_ptr->TIMEOUT); result = _lwsem_wait_timed_internal(sem_ptr, td_ptr); } else { --sem_ptr->VALUE; /* Start CR 788 */ result = MQX_OK; /* End CR 788 */ } /* Endif */ //#if MQX_COMPONENT_DESTRUCTION /* We must check for component destruction */ if (sem_ptr->VALID != LWSEM_VALID) { _int_enable(); /* The semaphore has been deleted */ _KLOGX2(KLOG_lwsem_wait_for, MQX_INVALID_LWSEM); return(MQX_INVALID_LWSEM); } /* Endif */ //#endif _INT_ENABLE(); _KLOGX2(KLOG_lwsem_wait_for, result); return(result); } /* Endbody */
_mqx_uint _name_test ( /* [OUT] the address of the base name component in error */ pointer _PTR_ name_error_ptr, /* [OUT] the address of the name component extension in error */ pointer _PTR_ name_extension_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; NAME_COMPONENT_STRUCT_PTR name_component_ptr; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_name_test, name_error_ptr, name_extension_error_ptr); *name_error_ptr = NULL; *name_extension_error_ptr = NULL; name_component_ptr = (NAME_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_NAME_MANAGEMENT]; if (name_component_ptr == NULL) { _KLOGX2(KLOG_name_test, MQX_OK); return(MQX_OK); } /* Endif */ result = _name_test_internal( name_component_ptr, name_error_ptr, name_extension_error_ptr); _KLOGX2(KLOG_name_test, result); return(result); } /* Endbody */
pointer _lwmem_alloc_from ( /* [IN] the pool to allocate from */ _lwmem_pool_id pool_id, /* [IN] the size of the memory block */ _mem_size requested_size ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; pointer result; #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwmem_alloc_from(pool_id, requested_size); } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwmem_alloc_from, pool_id, requested_size); result = _lwmem_alloc_internal(requested_size, kernel_data->ACTIVE_PTR, pool_id, FALSE); _KLOGX2(KLOG_lwmem_alloc_from, result); return(result); }
/*! * \brief Installs the kernel ISR handler. The kernel ISR depends on the PSP. * * Some real-time applications need special event handling to occur outside the * scope of MQX. The need might arise that the latency in servicing an interrupt * be less than the MQX interrupt latency. If this is the case, an application can * use _int_install_kernel_isr() to bypass MQX and let the interrupt be serviced * immediately. * \n Because the function returns the previous kernel ISR, applications can * temporarily install an ISR or chain ISRs so that each new one calls the one * installed before it. * \n A kernel ISR must save the registers that it needs and must service the * hardware interrupt. When the kernel ISR is finished, it must restore the * registers and perform a return-from-interrupt instruction. * \n A kernel ISR cannot call MQX functions. However, it can put data in global * data, which a task can access. * * \note The function is not available for all PSPs. * * \param[in] vector Vector where the ISR is to be installed. * \param[in] isr_ptr Pointer to the ISR to install into the vector table. * * \return Pointer to the previous kernel ISR for the vector (Success.). * \return NULL * * \see _int_kernel_isr * \see _int_get_kernel_isr */ INT_KERNEL_ISR_FPTR _int_install_kernel_isr ( uint_32 vector, INT_KERNEL_ISR_FPTR isr_ptr ) { #if !MQX_ROM_VECTORS #if MQX_KERNEL_LOGGING KERNEL_DATA_STRUCT_PTR kernel_data; #endif INT_KERNEL_ISR_FPTR old_isr_ptr; uint_32 result_code; uint_32_ptr loc_ptr; #if MQX_KERNEL_LOGGING _GET_KERNEL_DATA(kernel_data); #endif _KLOGE3(KLOG_int_install_kernel_isr, vector, isr_ptr); #if MQX_CHECK_ERRORS result_code = MQX_OK; old_isr_ptr = NULL; if ( vector >= PSP_MAXIMUM_INTERRUPT_VECTORS ) { result_code = MQX_INVALID_VECTORED_INTERRUPT; } else { #endif loc_ptr = (uint_32_ptr)_int_get_vector_table(); old_isr_ptr = (INT_KERNEL_ISR_FPTR)loc_ptr[vector]; loc_ptr[vector] = (uint_32)isr_ptr; #if MQX_CHECK_ERRORS } /* Endif */ /* Set result code and return result. */ _task_set_error(result_code); #endif _KLOGX3(KLOG_int_install_kernel_isr, old_isr_ptr, result_code); return (old_isr_ptr); #else #if MQX_CHECK_ERRORS /* Set result code and return result. */ _task_set_error(MQX_INVALID_CONFIGURATION); #endif return NULL; #endif }
/*! * \brief Set the time slice in milliseconds. * * \param[in] task_id One of the following: * \n - Task ID for a task on this processor for which to set info. * \n - MQX_DEFAULT_TASK_ID (Set the time slice for the processor.) * \n - MQX_NULL_TASK_ID (Set the time slice for the calling task.) * \param[in] rr_interval New time slice (in milliseconds). * * \return old_rr_interval Previous time slice (Success.) * \return MAX_UINT_32 * * \warning On failure, calls _task_set_error() to set the task error code to * MQX_SCHED_INVALID_TASK_ID. * * \see _sched_set_rr_interval_ticks * \see _sched_get_rr_interval * \see _sched_get_rr_interval_ticks * \see _task_set_error */ uint32_t _sched_set_rr_interval ( _task_id task_id, uint32_t rr_interval ) { /* Body */ _KLOGM(KERNEL_DATA_STRUCT_PTR kernel_data); uint32_t old_rr_interval; MQX_TICK_STRUCT ticks; MQX_TICK_STRUCT old_ticks; _mqx_uint result; _KLOGM(_GET_KERNEL_DATA(kernel_data)); _KLOGE3(KLOG_sched_set_rr_interval, (_mqx_uint)task_id, rr_interval); #if MQX_CHECK_ERRORS /* Validate parameters */ if (0 == rr_interval) { _KLOGX2(KLOG_sched_set_rr_interval, MAX_UINT_32); _task_set_error( MQX_SCHED_INVALID_PARAMETER_PTR ); return (MAX_UINT_32); } /* Endif */ #endif /* Compute the number of tick events required to accomplish the least amount of time[ms]. */ /* tick_events = (required_time[ms] + (time_per_tick[ms] - 1)) / time_per_tick[ms]) --> * tick_events = ((required_time[ms] - 1) / time_per_tick[ms]) + 1 */ rr_interval--; /* Convert milliseconds to ticks, truncated */ PSP_MILLISECONDS_TO_TICKS_QUICK(rr_interval, &ticks); /* Resolve truncation by adding one tick. */ PSP_ADD_TICKS_TO_TICK_STRUCT(&ticks, 1, &ticks); result = _sched_set_rr_interval_internal(task_id, &ticks, &old_ticks); if (result != MQX_OK) { _task_set_error(result); _KLOGX2(KLOG_sched_set_rr_interval, MAX_UINT_32); return(MAX_UINT_32); } /* Endif */ old_rr_interval = PSP_TICKS_TO_MILLISECONDS(&old_ticks, &result); _KLOGX2(KLOG_sched_set_rr_interval, old_rr_interval); return(old_rr_interval); } /* Endbody */
_mqx_uint _event_clear ( /* [IN] - An event handle returned from a call to _event_open() or ** _event_open_fast() */ pointer users_event_ptr, /* [IN] - bit mask, each bit of which represents an event. */ _mqx_uint bit_mask ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; register EVENT_CONNECTION_STRUCT_PTR event_connection_ptr; register EVENT_STRUCT_PTR event_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_event_clear, users_event_ptr, bit_mask); event_connection_ptr = (EVENT_CONNECTION_STRUCT_PTR)users_event_ptr; #if MQX_CHECK_VALIDITY if (event_connection_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_clear, EVENT_INVALID_EVENT_HANDLE); return(EVENT_INVALID_EVENT_HANDLE); } /* Endif */ #endif _INT_DISABLE(); event_ptr = event_connection_ptr->EVENT_PTR; #if MQX_CHECK_VALIDITY if (event_ptr->VALID != EVENT_VALID) { _int_enable(); _KLOGX2(KLOG_event_clear, EVENT_INVALID_EVENT); return(EVENT_INVALID_EVENT); } /* Endif */ #endif event_ptr->EVENT &= ~bit_mask; _INT_ENABLE(); _KLOGX2(KLOG_event_clear, MQX_OK); return(MQX_OK); } /* Endbody */
/*! * \brief Adds the name and its associated number to the names database. * * \param[in] name The name to add. * \param[in] number The number to be associated with the name. * * \return MQX_OK * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * \return MQX_INVALID_COMPONENT_BASE (Name component data is not valid.) * \return MQX_OUT_OF_MEMORY (MQX cannot allocate memory for the name component.) * \return NAME_EXISTS (Name is already in the names database.) * \return NAME_TABLE_FULL (Names database is full.) * \return NAME_TOO_LONG (Name is longer than NAME_MAX_NAME_SIZE.) * \return NAME_TOO_SHORT (Name is 0 length string.) * * \warning Creates the name component with default values if it was not previously * created. * \warning Cannot be called from an ISR. * * \see _name_create_component * \see _name_delete * \see _name_find */ _mqx_uint _name_add ( char *name, _mqx_max_type number ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; void *handle; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_name_add, name, number); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_name_add, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif /* MQX_CHECK_ERRORS */ handle = kernel_data->KERNEL_COMPONENTS[KERNEL_NAME_MANAGEMENT]; if (handle == NULL) { result = _name_create_component(NAME_DEFAULT_INITIAL_NUMBER, NAME_DEFAULT_GROW_NUMBER, NAME_DEFAULT_MAXIMUM_NUMBER); handle = kernel_data->KERNEL_COMPONENTS[KERNEL_NAME_MANAGEMENT]; #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (handle == NULL) { _KLOGX2(KLOG_name_add, result); return (result); } /* Endif */ #endif /* MQX_CHECK_MEMORY_ALLOCATION_ERRORS */ } /* Endif */ result = _name_add_internal(handle, name, number); _KLOGX2(KLOG_name_add, result); return (result); } /* Endbody */
pointer _msgq_receive_for ( /* [IN] id of the queue from which a message is to be received */ _queue_id queue_id, /* ** [IN] the number of ticks which can expire before ** this request times out */ MQX_TICK_STRUCT_PTR tick_ptr ) { /* Body */ #if MQX_KERNEL_LOGGING KERNEL_DATA_STRUCT_PTR kernel_data; #endif MESSAGE_HEADER_STRUCT_PTR message_ptr; _mqx_uint error; #if MQX_KERNEL_LOGGING _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_msgq_receive_for, queue_id, tick_ptr); #endif message_ptr = _msgq_receive_internal(queue_id, tick_ptr, MSG_TIMEOUT_RELATIVE, &error); #if MQX_KERNEL_LOGGING if ( (error == MQX_OK) && (message_ptr == NULL) ) { _KLOGX3(KLOG_msgq_receive_for, message_ptr, MSGQ_MESSAGE_NOT_AVAILABLE); } else if (error == MQX_OK) { _KLOGX5(KLOG_msgq_receive_for, message_ptr, message_ptr->TARGET_QID, message_ptr->SOURCE_QID, *(_mqx_uint_ptr)((uchar_ptr)message_ptr+ sizeof(MESSAGE_HEADER_STRUCT))); } else { _KLOGX3(KLOG_msgq_receive_for, message_ptr, error); } /* Endif */ #endif return (pointer)message_ptr; } /* Endbody */
_mqx_uint _watchdog_test ( /* [OUT] the watchdog component base if an error occurs */ pointer _PTR_ watchdog_error_ptr, /* [OUT] the watchdog table pointer if an error occurs */ pointer _PTR_ watchdog_table_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; WATCHDOG_COMPONENT_STRUCT_PTR watchdog_component_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_watchdog_test, watchdog_error_ptr, watchdog_table_error_ptr); *watchdog_error_ptr = NULL; *watchdog_table_error_ptr = NULL; watchdog_component_ptr = (WATCHDOG_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_WATCHDOG]; if (watchdog_component_ptr == NULL) { _KLOGX2(KLOG_watchdog_test, MQX_OK); return(MQX_OK); } /* Endif */ *watchdog_error_ptr = watchdog_component_ptr; if (watchdog_component_ptr->VALID != WATCHDOG_VALID) { _KLOGX2(KLOG_watchdog_test, MQX_INVALID_COMPONENT_BASE); return(MQX_INVALID_COMPONENT_BASE); } /* Endif */ _KLOGX2(KLOG_watchdog_test, MQX_OK); return(MQX_OK); } /* Endbody */
_mqx_uint _task_set_priority ( /* [IN] the task id to use */ _task_id task_id, /* [IN] the new task priority */ _mqx_uint new_priority, /* [OUT] the location where the old task priority is to be placed */ _mqx_uint_ptr priority_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; READY_Q_STRUCT_PTR ready_q_ptr; TD_STRUCT_PTR td_ptr; TASK_QUEUE_STRUCT_PTR task_queue_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_task_set_priority, task_id, new_priority); #if MQX_CHECK_ERRORS if (new_priority > kernel_data->LOWEST_TASK_PRIORITY) { _KLOGX2(KLOG_task_set_priority, MQX_INVALID_PARAMETER); return(MQX_INVALID_PARAMETER); }/* Endif */ #endif td_ptr = (TD_STRUCT_PTR)_task_get_td(task_id); if (td_ptr == NULL) { _KLOGX2(KLOG_task_set_priority, MQX_INVALID_TASK_ID); return(MQX_INVALID_TASK_ID); } /* Endif */ _int_disable(); /* Return old priority */ *priority_ptr = td_ptr->HOME_QUEUE->PRIORITY; /* Make the change permanent */ ready_q_ptr = kernel_data->READY_Q_LIST; td_ptr->HOME_QUEUE = ready_q_ptr - new_priority; if (td_ptr->BOOSTED) { /* Can only change priority to a higher (lower value) */ if (new_priority < td_ptr->MY_QUEUE->PRIORITY) { /* Move the task to the correct priority level */ _sched_set_priority_internal(td_ptr, new_priority); } /* Endif */ } else { /* Move the task to the correct priority level */ _sched_set_priority_internal(td_ptr, new_priority); } /* Endif */ if (td_ptr->STATE == TASK_QUEUE_BLOCKED) { task_queue_ptr = (TASK_QUEUE_STRUCT_PTR) ((uchar_ptr)td_ptr->INFO - FIELD_OFFSET(TASK_QUEUE_STRUCT, TD_QUEUE)); if (task_queue_ptr->POLICY & MQX_TASK_QUEUE_BY_PRIORITY) { /* Requeue the td by priority */ _QUEUE_REMOVE(&task_queue_ptr->TD_QUEUE, td_ptr); _sched_insert_priorityq_internal(&task_queue_ptr->TD_QUEUE, td_ptr); }/* Endif */ }/* Endif */ /* Allow higher priority tasks to run */ _CHECK_RUN_SCHEDULER(); _int_enable(); _KLOGX2(KLOG_task_set_priority, MQX_OK); return MQX_OK; } /* Endbody */
/*! * \brief Tests all the periodic queues and their lightweight timers for * validity and consistency. * * \param[out] period_error_ptr Pointer to the first periodic queue that has * an error (NULL if no error is found). * \param[out] timer_error_ptr Pointer to the first timer that has an error * (NULL if no error is found). * * \return MQX_OK (No periodic queues have been created or no errors found * in any periodic queues or timers.) * \return MQX_LWTIMER_INVALID (Period_ptr points to an invalid periodic queue.) * \return Error from _queue_test() (A periodic queue or its queue was in error.) * * \see _lwtimer_add_timer_to_queue * \see _lwtimer_cancel_period * \see _lwtimer_cancel_timer * \see _lwtimer_create_periodic_queue */ _mqx_uint _lwtimer_test ( void **period_error_ptr, void **timer_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWTIMER_STRUCT_PTR timer_ptr; LWTIMER_PERIOD_STRUCT_PTR period_ptr; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwtimer_test, period_error_ptr, timer_error_ptr); *period_error_ptr = NULL; *timer_error_ptr = NULL; /* * It is not considered an error if the lwtimer component has not been * created yet */ if (kernel_data->LWTIMERS.NEXT == NULL) { return (MQX_OK); } /* Endif */ result = _queue_test(&kernel_data->LWTIMERS, period_error_ptr); if (result != MQX_OK) { _KLOGX3(KLOG_lwtimer_test, result, *period_error_ptr); return (result); } /* Endif */ _int_disable(); period_ptr = (void *) kernel_data->LWTIMERS.NEXT; while ((void *) period_ptr != (void *) &kernel_data->LWTIMERS) { if (period_ptr->VALID != LWTIMER_VALID) { _int_enable(); *period_error_ptr = period_ptr; _KLOGX3(KLOG_lwtimer_test, MQX_LWTIMER_INVALID, period_ptr); return (MQX_LWTIMER_INVALID); } /* Endif */ result = _queue_test(&period_ptr->TIMERS, timer_error_ptr); if (result != MQX_OK) { _int_enable(); *period_error_ptr = period_ptr; _KLOGX4(KLOG_lwtimer_test, result, *period_error_ptr, *timer_error_ptr); return (result); } /* Endif */ timer_ptr = (void *) period_ptr->TIMERS.NEXT; while (timer_ptr != (void *) &period_ptr->TIMERS) { if (timer_ptr->VALID != LWTIMER_VALID) { *period_error_ptr = period_ptr; *timer_error_ptr = timer_ptr; _KLOGX4(KLOG_lwtimer_test, MQX_LWTIMER_INVALID, period_ptr, timer_ptr); return (MQX_LWTIMER_INVALID); } /* Endif */ timer_ptr = (void *) timer_ptr->LINK.NEXT; } /* Endwhile */ period_ptr = (void *) period_ptr->LINK.NEXT; } /* Endwhile */ _int_enable(); _KLOGX2(KLOG_lwtimer_test, MQX_OK); return (MQX_OK); } /* Endbody */
_mqx_uint _lwsem_wait_ticks ( /* [IN] the semaphore address */ LWSEM_STRUCT_PTR sem_ptr, /* [IN] the number of ticks to delay, if 0, delay forever */ _mqx_uint time_in_ticks ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; _mqx_uint result; #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwsem_wait_ticks(sem_ptr, time_in_ticks); } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwsem_wait_ticks, sem_ptr, time_in_ticks); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwsem_wait_ticks, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (sem_ptr->VALID != LWSEM_VALID) { _KLOGX2(KLOG_lwsem_wait_ticks, MQX_INVALID_LWSEM); return(MQX_INVALID_LWSEM); } /* Endif */ #endif _INT_DISABLE(); if (sem_ptr->VALUE <= 0) { td_ptr = kernel_data->ACTIVE_PTR; if (time_in_ticks == 0) { td_ptr->STATE = LWSEM_BLOCKED; td_ptr->INFO = (_mqx_uint)&sem_ptr->TD_QUEUE; _QUEUE_UNLINK(td_ptr); _QUEUE_ENQUEUE(&sem_ptr->TD_QUEUE, &td_ptr->AUX_QUEUE); _sched_execute_scheduler_internal(); /* Let the other tasks run */ /* Another task has posted a semaphore, and it has been tranfered to this ** task. */ result = MQX_OK; } else { PSP_ADD_TICKS_TO_TICK_STRUCT(&kernel_data->TIME, time_in_ticks, &td_ptr->TIMEOUT); result = _lwsem_wait_timed_internal(sem_ptr, td_ptr); } /* Endif */ } else { --sem_ptr->VALUE; /* Start CR 788 */ result = MQX_OK; /* End CR 788 */ } /* Endif */ //#if MQX_COMPONENT_DESTRUCTION /* We must check for component destruction */ if (sem_ptr->VALID != LWSEM_VALID) { _int_enable(); /* The semaphore has been deleted */ _KLOGX2(KLOG_lwsem_wait_ticks, MQX_INVALID_LWSEM); return(MQX_INVALID_LWSEM); } /* Endif */ //#endif _INT_ENABLE(); _KLOGX2(KLOG_lwsem_wait_ticks, result); return(result); }
/*! * \brief Used by a task to set the specified event bits in an event. * * \param[in] event_ptr Pointer to the lightweight event to set bits in. * \param[in] bit_mask Bit mask. Each bit represents an event bit to be set. * * \return MQX_OK * \return MQX_LWEVENT_INVALID (Lightweight event was invalid.) * * \see _lwevent_create * \see _lwevent_destroy * \see _lwevent_set_auto_clear * \see _lwevent_clear * \see _lwevent_test * \see _lwevent_wait_for * \see _lwevent_wait_ticks * \see _lwevent_wait_until * \see _lwevent_get_signalled * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_set ( LWEVENT_STRUCT_PTR event_ptr, _mqx_uint bit_mask ) { KERNEL_DATA_STRUCT_PTR kernel_data; QUEUE_ELEMENT_STRUCT_PTR q_ptr; QUEUE_ELEMENT_STRUCT_PTR next_q_ptr; TD_STRUCT_PTR td_ptr; _mqx_uint set_bits; #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwevent_set(event_ptr, bit_mask); } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwevent_set, event_ptr, bit_mask); _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); _KLOGX2(KLOG_lwevent_set, MQX_LWEVENT_INVALID); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif set_bits = event_ptr->VALUE | bit_mask; if (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { /* Schedule waiting task(s) to run if bits ok */ q_ptr = event_ptr->WAITING_TASKS.NEXT; while (q_ptr != (QUEUE_ELEMENT_STRUCT_PTR) ((void *) &event_ptr->WAITING_TASKS)) { td_ptr = (void *) q_ptr; _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE); next_q_ptr = q_ptr->NEXT; if (((td_ptr->FLAGS & TASK_LWEVENT_ALL_BITS_WANTED) && ((td_ptr->LWEVENT_BITS & set_bits) == td_ptr->LWEVENT_BITS)) || ((!(td_ptr->FLAGS & TASK_LWEVENT_ALL_BITS_WANTED)) && (td_ptr->LWEVENT_BITS & set_bits))) { _QUEUE_REMOVE(&event_ptr->WAITING_TASKS, q_ptr); _TIME_DEQUEUE(td_ptr, kernel_data); td_ptr->INFO = 0; _TASK_READY(td_ptr, kernel_data); /* store information about which bits caused task to be unblocked */ td_ptr->LWEVENT_BITS &= set_bits; set_bits &= ~(event_ptr->AUTO & td_ptr->LWEVENT_BITS); } /* Endif */ q_ptr = next_q_ptr; } /* Endwhile */ } /* Endif */ event_ptr->VALUE = set_bits; _INT_ENABLE(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); _KLOGX2(KLOG_lwevent_set, MQX_OK); return (MQX_OK); }
_mqx_uint _sched_set_policy ( /* [IN] the task whose policy is to change: ** NULL_TASK_ID => the current task ** DEFAULT_TASK_ID => the kernel defaults for task creation ** any other => the specified task */ _task_id task_id, /* [IN] the new scheduling policy */ _mqx_uint policy ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; _mqx_uint old_policy = MQX_SCHED_FIFO; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_sched_set_policy, (_mqx_uint)task_id, policy); #if MQX_HAS_TIME_SLICE #if MQX_CHECK_ERRORS if (! ((policy == MQX_SCHED_FIFO) || (policy == MQX_SCHED_RR))) { _task_set_error(MQX_SCHED_INVALID_POLICY); _KLOGX3(KLOG_sched_set_policy, MAX_MQX_UINT, MQX_SCHED_INVALID_POLICY); return(MAX_MQX_UINT); } /* Endif */ #endif /* Handle default case */ if (task_id == MQX_DEFAULT_TASK_ID) { old_policy = kernel_data->SCHED_POLICY; kernel_data->SCHED_POLICY = policy; } else { td_ptr = (TD_STRUCT_PTR)_task_get_td(task_id); if (td_ptr == NULL) { _task_set_error(MQX_SCHED_INVALID_TASK_ID); _KLOGX3(KLOG_sched_set_policy, MAX_MQX_UINT, MQX_SCHED_INVALID_TASK_ID); return(MAX_MQX_UINT); } /* Endif */ if (td_ptr->FLAGS & MQX_TIME_SLICE_TASK) { old_policy = MQX_SCHED_RR; } else { old_policy = MQX_SCHED_FIFO; } /* Endif */ _int_disable(); if (policy == MQX_SCHED_RR) { td_ptr->FLAGS |= MQX_TIME_SLICE_TASK; } else { td_ptr->FLAGS &= ~MQX_TIME_SLICE_TASK; } /* Endif */ _int_enable(); } /* Endif */ #else #if MQX_CHECK_ERRORS if (policy != MQX_SCHED_FIFO) { _task_set_error(MQX_SCHED_INVALID_POLICY); _KLOGX3(KLOG_sched_set_policy, MAX_MQX_UINT, MQX_SCHED_INVALID_POLICY); return(MAX_MQX_UINT); } /* Endif */ old_policy = MQX_SCHED_FIFO; #endif #endif _KLOGX3(KLOG_sched_set_policy, old_policy, 0L); return(old_policy); } /* Endbody */
_mqx_uint _mutex_init ( /* [IN] - the address where the mutex is to be initialized */ register MUTEX_STRUCT_PTR mutex_ptr, /* [IN] - Initialization parameters for the mutex */ register MUTEX_ATTR_STRUCT_PTR attr_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MUTEX_COMPONENT_STRUCT_PTR mutex_component_ptr; MUTEX_ATTR_STRUCT default_attr; #if MQX_CHECK_ERRORS MUTEX_STRUCT_PTR mutex_chk_ptr; #endif _mqx_uint result; _GET_KERNEL_DATA(kernel_data); if (attr_ptr == NULL) { attr_ptr = &default_attr; _mutatr_init(attr_ptr); _KLOGE3(KLOG_mutex_init, mutex_ptr, NULL); } else { _KLOGE3(KLOG_mutex_init, mutex_ptr, attr_ptr); } /* Endif */ #if MQX_CHECK_ERRORS if (mutex_ptr == NULL) { _KLOGX2(KLOG_mutex_init, MQX_EINVAL); return(MQX_EINVAL); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (attr_ptr->VALID != MUTEX_VALID) { _KLOGX2(KLOG_mutex_init, MQX_EINVAL); return(MQX_EINVAL); } /* Endif */ #endif mutex_component_ptr = (MUTEX_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_MUTEXES]; if (mutex_component_ptr == NULL) { result = _mutex_create_component(); mutex_component_ptr = (MUTEX_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_MUTEXES]; #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (mutex_component_ptr == NULL){ _KLOGX2(KLOG_mutex_init, result); return(result); } /* Endif */ #endif } /* Endif */ #if MQX_CHECK_VALIDITY if (mutex_component_ptr->VALID != MUTEX_VALID) { _KLOGX2(KLOG_mutex_init, MQX_INVALID_COMPONENT_BASE); return(MQX_INVALID_COMPONENT_BASE); } /* Endif */ #endif _int_disable(); #if MQX_CHECK_ERRORS /* Check if mutex is already initialized */ mutex_chk_ptr = (MUTEX_STRUCT_PTR) ((pointer)mutex_component_ptr->MUTEXES.NEXT); while (mutex_chk_ptr != (MUTEX_STRUCT_PTR) ((pointer)&mutex_component_ptr->MUTEXES)) { if (mutex_chk_ptr == mutex_ptr) { _int_enable(); _KLOGX2(KLOG_mutex_init, MQX_EINVAL); return(MQX_EINVAL); } /* Endif */ mutex_chk_ptr = (MUTEX_STRUCT_PTR)((pointer)mutex_chk_ptr->LINK.NEXT); } /* Endif */ #endif mutex_ptr->PROTOCOLS = attr_ptr->SCHED_PROTOCOL | attr_ptr->WAIT_PROTOCOL; mutex_ptr->VALID = MUTEX_VALID; mutex_ptr->COUNT = attr_ptr->COUNT; mutex_ptr->PRIORITY_CEILING = attr_ptr->PRIORITY_CEILING; mutex_ptr->LOCK = 0; mutex_ptr->BOOSTED = 0; mutex_ptr->OWNER_TD = NULL; _QUEUE_INIT(&mutex_ptr->WAITING_TASKS, 0); _QUEUE_ENQUEUE(&mutex_component_ptr->MUTEXES, mutex_ptr); _int_enable(); _KLOGX2(KLOG_mutex_init, MQX_EOK); return(MQX_EOK); } /* Endbody */
_mqx_uint _taskq_suspend_task ( /* [IN] the task to suspend */ _task_id task_id, /* [IN] the task queue handle */ pointer users_task_queue_ptr ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TASK_QUEUE_STRUCT_PTR task_queue_ptr = (TASK_QUEUE_STRUCT_PTR)users_task_queue_ptr; register TD_STRUCT_PTR td_ptr; boolean me; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_taskq_suspend_task, task_id, users_task_queue_ptr); td_ptr = (TD_STRUCT_PTR)_task_get_td(task_id); me = (td_ptr == kernel_data->ACTIVE_PTR); #if MQX_CHECK_ERRORS if (td_ptr == NULL) { _KLOGX2(KLOG_taskq_suspend_task, MQX_INVALID_TASK_ID); return(MQX_INVALID_TASK_ID); } /* Endif */ if (task_queue_ptr == NULL){ _KLOGX2(KLOG_taskq_suspend_task, MQX_INVALID_PARAMETER); return(MQX_INVALID_PARAMETER); } /* Endif */ if (me && kernel_data->IN_ISR) { _KLOGX2(KLOG_taskq_suspend_task, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); }/* Endif */ #endif _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (task_queue_ptr->VALID != TASK_QUEUE_VALID) { _int_enable(); _KLOGX2(KLOG_taskq_suspend_task, MQX_INVALID_TASK_QUEUE); return(MQX_INVALID_TASK_QUEUE); } /* Endif */ #endif if (td_ptr->STATE != READY) { _INT_ENABLE(); _KLOGX2(KLOG_taskq_suspend_task, MQX_INVALID_TASK_STATE); return(MQX_INVALID_TASK_STATE); } /* Endif */ td_ptr->STATE = TASK_QUEUE_BLOCKED; _QUEUE_UNLINK(td_ptr); /* Remove task from ready to run queue */ td_ptr->INFO = (_mqx_uint)&task_queue_ptr->TD_QUEUE; if (task_queue_ptr->POLICY & MQX_TASK_QUEUE_BY_PRIORITY) { _sched_insert_priorityq_internal(&task_queue_ptr->TD_QUEUE, td_ptr); } else { _QUEUE_ENQUEUE(&task_queue_ptr->TD_QUEUE, td_ptr); } /* Endif */ if (me && (kernel_data->IN_ISR == 0)) { _sched_execute_scheduler_internal(); /* Let the other tasks run */ } /* Endif */ _INT_ENABLE(); _KLOGX2(KLOG_taskq_suspend_task, MQX_OK); return( MQX_OK ); } /* Endbody */
/*! * \brief Tests all the message poolsin the system for consistency and validity. * * The function checks the validity of each message in each private and system * message pool. It reports the first error that it finds. * * \param[out] pool_error_ptr (Initialized only if an error is found.) If the * message in a message pool has an error; one of the following: * \li A pointer to a pool ID if the message is from a private message pool. * \li A pointer to a system message pool if the message is from a system * message pool. * \param[out] msg_error_ptr Pointer to the message that has an error * (initialized only if an error is found). * * \return MQX_OK (all messages in all message pools passed) * \return MQX_COMPONENT_DOES_NOT_EXIST (Message component is not created.) * \return MSGQ_INVALID_MESSAGE (At least one message in at least one message * pool failed.) * * \warning Disables and enables interrupts. * * \see _msgpool_create * \see _msgpool_create_system */ _mqx_uint _msgpool_test ( void **pool_error_ptr, void **msg_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; MSG_COMPONENT_STRUCT_PTR msg_component_ptr; MSGPOOL_STRUCT_PTR msgpool_ptr; MSGPOOL_BLOCK_STRUCT_PTR msgpool_block_ptr; INTERNAL_MESSAGE_STRUCT_PTR imsg_ptr; _mqx_uint i,j,raw_message_size; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_msgpool_test, pool_error_ptr, msg_error_ptr); msg_component_ptr = _GET_MSG_COMPONENT_STRUCT_PTR(kernel_data); #if MQX_CHECK_ERRORS if (msg_component_ptr == NULL) { _KLOGX2(KLOG_msgpool_test, MQX_COMPONENT_DOES_NOT_EXIST); return(MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ #endif /* Check all the message pools */ msgpool_ptr = msg_component_ptr->MSGPOOLS_PTR; i = msg_component_ptr->MAX_MSGPOOLS + 1; while (--i) { _int_disable(); if (msgpool_ptr->VALID == MSG_VALID) { /* The pool has been created */ /* Search through all of the message pool blocks for this pool */ msgpool_block_ptr = msgpool_ptr->MSGPOOL_BLOCK_PTR; while (msgpool_block_ptr != NULL) { raw_message_size = msgpool_block_ptr->RAW_MESSAGE_SIZE; imsg_ptr = (INTERNAL_MESSAGE_STRUCT_PTR) msgpool_block_ptr->FIRST_IMSG_PTR; j = msgpool_block_ptr->NUM_MESSAGES + 1; while (--j) { if ((imsg_ptr->VALID != MSG_VALID) || (imsg_ptr->MSGPOOL_PTR != msgpool_ptr)) { _int_enable(); *pool_error_ptr = msgpool_ptr; *msg_error_ptr = imsg_ptr; _KLOGX4(KLOG_msgpool_test, MSGQ_INVALID_MESSAGE, msgpool_ptr, imsg_ptr); return(MSGQ_INVALID_MESSAGE); } /* Endif */ imsg_ptr =(INTERNAL_MESSAGE_STRUCT_PTR) ((unsigned char *)imsg_ptr + raw_message_size); } /* Endwhile */ msgpool_block_ptr = msgpool_block_ptr->NEXT_BLOCK_PTR; } /* Endwhile */ } /* Endif */ _int_enable(); msgpool_ptr++; } /* Endwhile */ _KLOGX2(KLOG_msgpool_test, MQX_OK); return(MQX_OK); } /* Endbody */
_mqx_uint _lwsem_test ( /* [OUT] the light weight semapohre in error */ pointer _PTR_ lwsem_error_ptr, /* [OUT] the td on a light weight semaphore in error */ pointer _PTR_ td_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWSEM_STRUCT_PTR sem_ptr; _mqx_uint queue_size; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwsem_test, lwsem_error_ptr, td_error_ptr); *td_error_ptr = NULL; *lwsem_error_ptr = NULL; #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwsem_test, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); }/* Endif */ #endif _int_disable(); result = _queue_test((QUEUE_STRUCT_PTR)&kernel_data->LWSEM, lwsem_error_ptr); if (result != MQX_OK) { _KLOGX3(KLOG_lwsem_test, result, *lwsem_error_ptr); return(result); } /* Endif */ sem_ptr = (LWSEM_STRUCT_PTR)((pointer)kernel_data->LWSEM.NEXT); queue_size = _QUEUE_GET_SIZE(&kernel_data->LWSEM); while (queue_size--) { if (sem_ptr->VALID != LWSEM_VALID) { result = MQX_INVALID_LWSEM; break; } /* Endif */ result = _queue_test(&sem_ptr->TD_QUEUE, td_error_ptr); if (result != MQX_OK) { break; } /* Endif */ sem_ptr = sem_ptr->NEXT; } /* Endwhile */ _int_enable(); if (result != MQX_OK) { *lwsem_error_ptr = (pointer)sem_ptr; } /* Endif */ _KLOGX4(KLOG_lwsem_test, result, *lwsem_error_ptr, *td_error_ptr); return(result); }
/*! * \brief Sets the scheduling policy for a task or the system. * * \param[in] task_id One of the following: * \n - Task on this processor for which to get info. * \n - MQX_DEFAULT_TASK_ID (Set the policy for the processor.) * \n - MQX_NULL_TASK_ID (Set the policy for the calling task.) * \param[in] policy New scheduling policy; one of the following: * \n - MQX_SCHED_FIFO * \n - MQX_SCHED_RR * * \return Previous scheduling policy MQX_SCHED_FIFO or MQX_SCHED_RR (Success.) * \return MAX_MQX_UINT (Failure.) * * \warning On failure, _task_set_error() is called to set the following task * error codes: * \n - MQX_SCHED_INVALID_POLICY (Policy is not one of the allowed policies.) * \n - MQX_SCHED_INVALID_TASK_ID (Task_id is not a valid task on this processor.) * * \see _sched_get_policy * \see _task_set_error */ _mqx_uint _sched_set_policy ( _task_id task_id, _mqx_uint policy ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data = NULL; (void) kernel_data; /* suppress 'unused variable' warning */ TD_STRUCT_PTR td_ptr = NULL; (void) td_ptr; /* suppress 'unused variable' warning */ _mqx_uint old_policy = MQX_SCHED_FIFO; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_sched_set_policy, (_mqx_uint)task_id, policy); #if MQX_HAS_TIME_SLICE #if MQX_CHECK_ERRORS if (! ((policy == MQX_SCHED_FIFO) || (policy == MQX_SCHED_RR))) { _task_set_error(MQX_SCHED_INVALID_POLICY); _KLOGX3(KLOG_sched_set_policy, MAX_MQX_UINT, MQX_SCHED_INVALID_POLICY); return(MAX_MQX_UINT); } /* Endif */ #endif /* Handle default case */ if (task_id == MQX_DEFAULT_TASK_ID) { old_policy = kernel_data->SCHED_POLICY; kernel_data->SCHED_POLICY = policy; } else { td_ptr = (TD_STRUCT_PTR)_task_get_td(task_id); if (td_ptr == NULL) { _task_set_error(MQX_SCHED_INVALID_TASK_ID); _KLOGX3(KLOG_sched_set_policy, MAX_MQX_UINT, MQX_SCHED_INVALID_TASK_ID); return(MAX_MQX_UINT); } /* Endif */ if (td_ptr->FLAGS & MQX_TIME_SLICE_TASK) { old_policy = MQX_SCHED_RR; } else { old_policy = MQX_SCHED_FIFO; } /* Endif */ _int_disable(); if (policy == MQX_SCHED_RR) { td_ptr->FLAGS |= MQX_TIME_SLICE_TASK; } else { td_ptr->FLAGS &= ~MQX_TIME_SLICE_TASK; } /* Endif */ _int_enable(); } /* Endif */ #else #if MQX_CHECK_ERRORS if (policy != MQX_SCHED_FIFO) { _task_set_error(MQX_SCHED_INVALID_POLICY); _KLOGX3(KLOG_sched_set_policy, MAX_MQX_UINT, MQX_SCHED_INVALID_POLICY); return (MAX_MQX_UINT); } /* Endif */ old_policy = MQX_SCHED_FIFO; #endif #endif _KLOGX3(KLOG_sched_set_policy, old_policy, 0L); return (old_policy); } /* Endbody */
_mqx_uint _event_set ( /* [IN] - An event handle returned from _event_open or _event_open_fast */ pointer users_event_ptr, /* [IN] - bit mask, each bit of which represents an event. */ _mqx_uint bit_mask ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; EVENT_STRUCT_PTR event_ptr; EVENT_COMPONENT_STRUCT_PTR event_component_ptr; EVENT_CONNECTION_STRUCT_PTR conn_ptr; EVENT_CONNECTION_STRUCT_PTR next_conn_ptr; EVENT_CONNECTION_STRUCT_PTR event_connection_ptr; TD_STRUCT_PTR new_td_ptr; _mqx_uint set_bits; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_event_set, users_event_ptr, bit_mask); event_connection_ptr = (EVENT_CONNECTION_STRUCT_PTR)users_event_ptr; #if MQX_CHECK_VALIDITY if (event_connection_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_set, EVENT_INVALID_EVENT_HANDLE); return(EVENT_INVALID_EVENT_HANDLE); } /* Endif */ #endif event_component_ptr = (EVENT_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_EVENTS]; #if MQX_CHECK_ERRORS if (event_component_ptr == NULL){ _KLOGX2(KLOG_event_set, MQX_COMPONENT_DOES_NOT_EXIST); return(MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (event_component_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_set, MQX_INVALID_COMPONENT_BASE); return(MQX_INVALID_COMPONENT_BASE); } /* Endif */ #endif #if MQX_IS_MULTI_PROCESSOR if (event_connection_ptr->REMOTE_CPU) { if (kernel_data->IPC) { /* This open is for a remote processor */ (*kernel_data->IPC)(TRUE, event_connection_ptr->REMOTE_CPU, KERNEL_EVENTS, IPC_EVENT_SET, 2, event_connection_ptr->EVENT_PTR, bit_mask); _KLOGX2(KLOG_event_set, kernel_data->ACTIVE_PTR->TASK_ERROR_CODE); return(kernel_data->ACTIVE_PTR->TASK_ERROR_CODE); } else { _KLOGX2(KLOG_event_set, EVENT_NOT_FOUND); return(EVENT_NOT_FOUND); }/* Endif */ }/* Endif */ #endif _INT_DISABLE(); event_ptr = event_connection_ptr->EVENT_PTR; #if MQX_CHECK_VALIDITY if (event_ptr->VALID != EVENT_VALID) { _INT_ENABLE(); _KLOGX2(KLOG_event_set, EVENT_INVALID_EVENT); return(EVENT_INVALID_EVENT); } /* Endif */ #endif set_bits = event_ptr->EVENT | bit_mask; if (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { /* Schedule waiting task(s) to run if bits ok */ conn_ptr = (EVENT_CONNECTION_STRUCT_PTR) ((pointer)event_ptr->WAITING_TASKS.NEXT); while (conn_ptr != (EVENT_CONNECTION_STRUCT_PTR) ((pointer)&event_ptr->WAITING_TASKS)) { next_conn_ptr = (EVENT_CONNECTION_STRUCT_PTR)conn_ptr->NEXT; if (((conn_ptr->FLAGS & EVENT_WANTS_ALL) && ((conn_ptr->MASK & set_bits) == conn_ptr->MASK)) || ((!(conn_ptr->FLAGS & EVENT_WANTS_ALL)) && (conn_ptr->MASK & set_bits))) { new_td_ptr = conn_ptr->TD_PTR; if ((new_td_ptr->STATE & STATE_MASK) == EVENT_BLOCKED) { /* He may have timed out */ conn_ptr->FLAGS |= EVENT_OCCURRED; _TIME_DEQUEUE(new_td_ptr, kernel_data); _TASK_READY(new_td_ptr, kernel_data); /* Only ready one task if event is an auto clear event */ if (event_ptr->AUTO_CLEAR) { set_bits &= ~conn_ptr->MASK; break; } /* Endif */ } /* Endif */ } /* Endif */ conn_ptr = next_conn_ptr; } /* Endwhile */ } /* Endif */ event_ptr->EVENT = set_bits; _INT_ENABLE(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); _KLOGX2(KLOG_event_set, MQX_OK); return(MQX_OK); } /* Endbody */
_mqx_uint _mem_free_part ( /* [IN] the address of the memory block whose size is to change */ pointer mem_ptr, /* [IN] the new size for the block */ _mem_size requested_size ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; STOREBLOCK_STRUCT_PTR block_ptr; STOREBLOCK_STRUCT_PTR prev_block_ptr; STOREBLOCK_STRUCT_PTR next_block_ptr; STOREBLOCK_STRUCT_PTR new_block_ptr; _mem_size size; _mem_size block_size; _mem_size new_block_size; _mqx_uint result_code; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_mem_free_part, mem_ptr, requested_size); #if MQX_CHECK_ERRORS /* Make sure a correct pointer was passed in. */ if (mem_ptr == NULL) { _task_set_error(MQX_INVALID_POINTER); _KLOGX2(KLOG_mem_free_part, MQX_INVALID_POINTER); return(MQX_INVALID_POINTER); } /* Endif */ #endif /* Verify the block size */ block_ptr = GET_MEMBLOCK_PTR(mem_ptr); #if MQX_CHECK_ERRORS if (! _MEMORY_ALIGNED(block_ptr)) { _task_set_error(MQX_INVALID_POINTER); _KLOGX2(KLOG_mem_free_part, MQX_INVALID_POINTER); return(MQX_INVALID_POINTER); } /* Endif */ if ( (block_ptr->BLOCKSIZE < MQX_MIN_MEMORY_STORAGE_SIZE) || BLOCK_IS_FREE(block_ptr) ) { _task_set_error(MQX_INVALID_POINTER); kernel_data->KD_POOL.POOL_BLOCK_IN_ERROR = block_ptr; _KLOGX3(KLOG_mem_free_part, MQX_INVALID_POINTER, block_ptr); return(MQX_INVALID_POINTER); } /* Endif */ #endif #if MQX_CHECK_VALIDITY _int_disable(); if ( ! VALID_CHECKSUM(block_ptr) ) { _int_enable(); _task_set_error(MQX_INVALID_CHECKSUM); kernel_data->KD_POOL.POOL_BLOCK_IN_ERROR = block_ptr; _KLOGX3(KLOG_mem_free_part, MQX_INVALID_CHECKSUM, block_ptr); return(MQX_INVALID_CHECKSUM); } /* Endif */ _int_enable(); #endif /* Walk through the memory resources of the task descriptor. * Two pointers are maintained, one to the current block * and one to the previous block. */ next_block_ptr = (STOREBLOCK_STRUCT_PTR) kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST; prev_block_ptr = GET_MEMBLOCK_PTR(&kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST); /* Scan the task's memory resource list searching for the block to * free, Stop when the current pointer is equal to the block to free * or the end of the list is reached. */ while ( next_block_ptr && ((pointer)next_block_ptr != mem_ptr) ) { /* The block is not found, and the end of the list has not been * reached, so move down the list. */ prev_block_ptr = GET_MEMBLOCK_PTR(next_block_ptr); next_block_ptr = (STOREBLOCK_STRUCT_PTR)prev_block_ptr->NEXTBLOCK; } /* Endwhile */ #if MQX_CHECK_ERRORS if ( next_block_ptr == NULL ) { /* The specified block does not belong to the calling task. */ _task_set_error(MQX_NOT_RESOURCE_OWNER); _KLOGX2(KLOG_mem_free_part, MQX_NOT_RESOURCE_OWNER); return(MQX_NOT_RESOURCE_OWNER); } /* Endif */ #endif /* determine the size of the block. */ block_size = block_ptr->BLOCKSIZE; size = requested_size + (_mem_size)FIELD_OFFSET(STOREBLOCK_STRUCT,USER_AREA); if (size < MQX_MIN_MEMORY_STORAGE_SIZE) { size = MQX_MIN_MEMORY_STORAGE_SIZE; } /* Endif */ _MEMORY_ALIGN_VAL_LARGER(size); #if MQX_CHECK_ERRORS /* Verify that the size parameter is within range of the block size. */ if (size <= block_size) { #endif /* Adjust the size to allow for the overhead and force alignment */ /* Compute the size of the new_ block that would be created. */ new_block_size = block_size - size; /* Decide if it worthwile to split the block. If the amount of space * returned is not at least twice the size of the block overhead, * then dont bother. */ if (new_block_size >= (2*MQX_MIN_MEMORY_STORAGE_SIZE) ) { /* Create an 'inuse' block */ new_block_ptr = (STOREBLOCK_STRUCT_PTR)((char _PTR_)block_ptr + size); new_block_ptr->BLOCKSIZE = new_block_size; PREV_PHYS(new_block_ptr) = block_ptr; new_block_ptr->TASK_NUMBER = block_ptr->TASK_NUMBER; new_block_ptr->MEM_POOL_PTR = block_ptr->MEM_POOL_PTR; CALC_CHECKSUM(new_block_ptr); _int_disable(); /* Split the block */ block_ptr->BLOCKSIZE = size; CALC_CHECKSUM(block_ptr); /* make sure right physical neighbour knows about it */ block_ptr = NEXT_PHYS(new_block_ptr); PREV_PHYS(block_ptr) = new_block_ptr; CALC_CHECKSUM(block_ptr); /* Link the new block onto the requestor's task descriptor. */ new_block_ptr->NEXTBLOCK = kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST; kernel_data->ACTIVE_PTR->MEMORY_RESOURCE_LIST = (char _PTR_)(&new_block_ptr->USER_AREA); _int_enable(); result_code = _mem_free((pointer)&new_block_ptr->USER_AREA); } else { result_code = MQX_OK; } /* Endif */ #if MQX_CHECK_ERRORS } else { result_code = MQX_INVALID_SIZE; } /* Endif */ #endif #if MQX_CHECK_ERRORS if ( result_code != MQX_OK ) { _task_set_error(result_code); } /* Endif */ #endif _KLOGX2(KLOG_mem_free_part, result_code); return (result_code); } /* Endbody */
_mqx_uint _watchdog_create_component ( /* [IN] the vector upon which timer interrupts on */ _mqx_uint timer_interrupt_vector, /* [IN] the function to call when a watchdog timer expires */ void (_CODE_PTR_ error_function)(pointer td_ptr) ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; WATCHDOG_COMPONENT_STRUCT_PTR watchdog_component_ptr; pointer interrupt_data; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_watchdog_create_component, timer_interrupt_vector, error_function); #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_watchdog_create_component, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif _lwsem_wait((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); if (kernel_data->KERNEL_COMPONENTS[KERNEL_WATCHDOG] != NULL) { _lwsem_post((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); _KLOGX2(KLOG_watchdog_create_component, MQX_OK); return(MQX_OK); } /* Endif */ #if MQX_CHECK_ERRORS if (!error_function) { _lwsem_post((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); _KLOGX2(KLOG_watchdog_create_component, WATCHDOG_INVALID_ERROR_FUNCTION); return(WATCHDOG_INVALID_ERROR_FUNCTION); } /* Endif */ if ((timer_interrupt_vector < kernel_data->FIRST_USER_ISR_VECTOR) || (timer_interrupt_vector > kernel_data->LAST_USER_ISR_VECTOR)) { _lwsem_post((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); _KLOGX2(KLOG_watchdog_create_component, WATCHDOG_INVALID_INTERRUPT_VECTOR); return(WATCHDOG_INVALID_INTERRUPT_VECTOR); } /* Endif */ #endif /* Get the watchdog component data structure */ watchdog_component_ptr = (WATCHDOG_COMPONENT_STRUCT_PTR) _mem_alloc_system_zero((_mem_size)sizeof(WATCHDOG_COMPONENT_STRUCT)); #if MQX_CHECK_MEMORY_ALLOCATION_ERRORS if (watchdog_component_ptr == NULL){ _lwsem_post((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); _KLOGX2(KLOG_watchdog_create_component, MQX_OUT_OF_MEMORY); return(MQX_OUT_OF_MEMORY); } /* Endif */ #endif _mem_set_type(watchdog_component_ptr, MEM_TYPE_WATCHDOG_COMPONENT); watchdog_component_ptr->ERROR_FUNCTION = (void (_CODE_PTR_)(TD_STRUCT_PTR))error_function; watchdog_component_ptr->VALID = WATCHDOG_VALID; watchdog_component_ptr->INTERRUPT_VECTOR = timer_interrupt_vector; interrupt_data = _int_get_isr_data(timer_interrupt_vector); _INT_DISABLE(); watchdog_component_ptr->TIMER_INTERRUPT_HANDLER = _int_install_isr( timer_interrupt_vector, _watchdog_isr, interrupt_data); #if MQX_CHECK_ERRORS if (!watchdog_component_ptr->TIMER_INTERRUPT_HANDLER) { _int_enable(); _lwsem_post((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); _mem_free(watchdog_component_ptr); _KLOGX2(KLOG_watchdog_create_component, WATCHDOG_INVALID_INTERRUPT_VECTOR); return(WATCHDOG_INVALID_INTERRUPT_VECTOR); } /* Endif */ #endif kernel_data->KERNEL_COMPONENTS[KERNEL_WATCHDOG] = watchdog_component_ptr; #if MQX_TASK_DESTRUCTION kernel_data->COMPONENT_CLEANUP[KERNEL_WATCHDOG] = _watchdog_cleanup; #endif _INT_ENABLE(); _lwsem_post((LWSEM_STRUCT_PTR)(&kernel_data->COMPONENT_CREATE_LWSEM)); _KLOGX2(KLOG_watchdog_create_component, MQX_OK); return(MQX_OK); } /* Endbody */