_mqx_uint _taskq_resume ( /* [IN] the task queue handle */ pointer users_task_queue_ptr, /* [IN] TRUE if all tasks on the queue to be resumed */ boolean all_tasks ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TD_STRUCT_PTR td_ptr; register TASK_QUEUE_STRUCT_PTR task_queue_ptr = (TASK_QUEUE_STRUCT_PTR)users_task_queue_ptr; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_taskq_resume, users_task_queue_ptr, all_tasks); #if MQX_CHECK_ERRORS if (task_queue_ptr == NULL){ _KLOGX2(KLOG_taskq_resume, MQX_INVALID_TASK_QUEUE); return(MQX_INVALID_TASK_QUEUE); } /* Endif */ #endif _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (task_queue_ptr->VALID != TASK_QUEUE_VALID) { _int_enable(); _KLOGX2(KLOG_taskq_resume, MQX_INVALID_TASK_QUEUE); return(MQX_INVALID_TASK_QUEUE); } /* Endif */ #endif if (_QUEUE_GET_SIZE(&task_queue_ptr->TD_QUEUE) == 0) { /* Task queue is empty */ _int_enable(); _KLOGX2(KLOG_taskq_resume, MQX_TASK_QUEUE_EMPTY); return(MQX_TASK_QUEUE_EMPTY); } /* Endif */ if (all_tasks) { while (_QUEUE_GET_SIZE(&task_queue_ptr->TD_QUEUE)) { _QUEUE_DEQUEUE(&task_queue_ptr->TD_QUEUE, td_ptr); _TASK_READY(td_ptr, kernel_data); } /* Endwhile */ } else { _QUEUE_DEQUEUE(&task_queue_ptr->TD_QUEUE, td_ptr); _TASK_READY(td_ptr, kernel_data); } /* Endif */ _INT_ENABLE(); _CHECK_RUN_SCHEDULER();/* Let higher priority task run */ _KLOGX2(KLOG_taskq_resume, MQX_OK); return( MQX_OK ); } /* Endbody */
void _time_delay_internal ( /* [IN] the task to delay */ register TD_STRUCT_PTR td_ptr ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TD_STRUCT_PTR td2_ptr; register TD_STRUCT_PTR tdprev_ptr; register _mqx_uint count; register _mqx_int result; _GET_KERNEL_DATA(kernel_data); /* Remove task from ready to run queue */ tdprev_ptr = (TD_STRUCT_PTR)((pointer)&kernel_data->TIMEOUT_QUEUE); if ( _QUEUE_GET_SIZE(&kernel_data->TIMEOUT_QUEUE) ) { /* Perform insertion sort by time */ td2_ptr = (TD_STRUCT_PTR)((pointer)kernel_data->TIMEOUT_QUEUE.NEXT); /* SPR P171-0023-01 use pre-decrement on while loop */ count = _QUEUE_GET_SIZE(&kernel_data->TIMEOUT_QUEUE) + 1; while ( --count ) { /* END SPR */ result = PSP_CMP_TICKS(&td2_ptr->TIMEOUT, &td_ptr->TIMEOUT); if (MQX_DELAY_ENQUEUE_POLICY(result)) { /* CR171 */ /* Enqueue before td2_ptr */ break; } /* Endif */ tdprev_ptr = td2_ptr; td2_ptr = td2_ptr->TD_NEXT; } /* Endwhile */ } /* Endif */ /* Remove from ready queue */ _QUEUE_UNLINK(td_ptr); /* Insert into timeout queue */ _QUEUE_INSERT(&kernel_data->TIMEOUT_QUEUE,tdprev_ptr,td_ptr); td_ptr->STATE |= IS_ON_TIMEOUT_Q; _sched_execute_scheduler_internal(); } /* Endbody */
_mqx_uint _lwsem_usr_check ( /* [IN] the location of the lwsem */ LWSEM_STRUCT_PTR tst_sem_ptr ) { KERNEL_DATA_STRUCT_PTR kernel_data; LWSEM_STRUCT_PTR sem_ptr; _mqx_uint result = MQX_INVALID_LWSEM; _mqx_uint queue_size; _GET_KERNEL_DATA(kernel_data); sem_ptr = (LWSEM_STRUCT_PTR)((pointer)kernel_data->USR_LWSEM.NEXT); queue_size = _QUEUE_GET_SIZE(&kernel_data->USR_LWSEM); while (queue_size--) { if (sem_ptr->VALID != LWSEM_VALID) { break; } if (tst_sem_ptr == sem_ptr) { result = MQX_OK; break; } sem_ptr = (LWSEM_STRUCT_PTR)(pointer)sem_ptr->NEXT; } return result; }
static _mqx_int _io_serial_mix_nextc ( /* [IN] the interrupt I/O context information */ IO_SERIAL_INT_DEVICE_STRUCT_PTR int_io_dev_ptr ) { /* Body */ unsigned char c; if (int_io_dev_ptr->HAVE_STOPPED_OUTPUT || (! int_io_dev_ptr->OUTPUT_ENABLED)) { return(-1); } /* Endif */ if (_CHARQ_EMPTY(int_io_dev_ptr->OUT_QUEUE)) { /* No output */ int_io_dev_ptr->OUTPUT_ENABLED = FALSE; if (_QUEUE_GET_SIZE(int_io_dev_ptr->OUT_WAITING_TASKS)) { _taskq_resume(int_io_dev_ptr->OUT_WAITING_TASKS, TRUE); } /* Endif */ return(-1); }/* Endif */ _CHARQ_DEQUEUE(int_io_dev_ptr->OUT_QUEUE, c); return((_mqx_int)c); } /* Endbody */
/* Start CR 1124: C runtime thread local storage */ _mqx_uint _task_reserve_space ( /* [IN} Amount of space to reserve */ _mqx_uint size ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; _mqx_uint off, algn; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_ERRORS /* Can only be called prior to creating any tasks! */ if (_QUEUE_GET_SIZE(&kernel_data->TD_LIST) != 0) { _mqx_fatal_error(MQX_EINVAL); } /* Endif */ #endif if (size > 7) algn = 7; else if (size > 3) algn = 3; else if (size > 1) algn = 1; else algn = 0; if (algn > kernel_data->TOS_RESERVED_ALIGN_MASK) { kernel_data->TOS_RESERVED_ALIGN_MASK = algn; } /* Endif */ off = (kernel_data->TOS_RESERVED_SIZE + algn) & ~algn; kernel_data->TOS_RESERVED_SIZE = off + size; return off; } /* Endbody */
/*! * \brief Checks whether the specified pointer corresponds with a valid lwevent. * * \param[in] tst_event_ptr Pointer to the lwevent. * * \return MQX_OK (Valid lwevent.) * \return MQX_LWEVENT_INVALID (Specified lwevent is not valid.) * * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_usr_check ( LWEVENT_STRUCT_PTR tst_event_ptr ) { KERNEL_DATA_STRUCT_PTR kernel_data; LWEVENT_STRUCT_PTR event_ptr; _mqx_uint result = MQX_LWEVENT_INVALID; _mqx_uint queue_size; _GET_KERNEL_DATA(kernel_data); event_ptr = (LWEVENT_STRUCT_PTR)((void *)kernel_data->USR_LWEVENTS.NEXT); queue_size = _QUEUE_GET_SIZE(&kernel_data->USR_LWEVENTS); while (queue_size--) { if (event_ptr->VALID != LWEVENT_VALID) { break; } if (tst_event_ptr == event_ptr) { result = MQX_OK; break; } event_ptr = (LWEVENT_STRUCT_PTR)(void *)event_ptr->LINK.NEXT; } return result; }
/*! * \brief Adds the lightweight timer to the periodic queue. * * The function inserts the timer in the queue in order of increasing offset * from the queue's start time. * * \param[in] period_ptr Pointer to the periodic queue. * \param[in] timer_ptr Pointer to the lightweight timer to add to the queue, * must be smaller than queue. * \param[in] ticks Tick offset from the timers period to expire at. * \param[in] func Function to call when the timer expires. * \param[in] parameter Parameter to pass to the function. * * \return MQX_OK * \return MQX_LWTIMER_INVALID (Period_ptr points to an invalid periodic queue.) * \return MQX_INVALID_PARAMETER (Ticks is greater than or equal to the * periodic queue's period.) * * \warning Disables and enables interrupts. * * \see _lwtimer_cancel_period * \see _lwtimer_cancel_timer * \see _lwtimer_create_periodic_queue * \see LWTIMER_PERIOD_STRUCT * \see LWTIMER_STRUCT */ _mqx_uint _lwtimer_add_timer_to_queue ( LWTIMER_PERIOD_STRUCT_PTR period_ptr, LWTIMER_STRUCT_PTR timer_ptr, _mqx_uint ticks, LWTIMER_ISR_FPTR func, pointer parameter ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWTIMER_STRUCT_PTR qe_ptr; _mqx_uint i; _GET_KERNEL_DATA(kernel_data); _KLOGE4(KLOG_lwtimer_add_timer_to_queue, period_ptr, timer_ptr, ticks); #if MQX_CHECK_ERRORS if (period_ptr->VALID != LWTIMER_VALID) { _KLOGX2(KLOG_lwtimer_add_timer_to_queue, MQX_LWTIMER_INVALID); return (MQX_LWTIMER_INVALID); } /* Endif */ if (ticks >= period_ptr->PERIOD) { _KLOGX2(KLOG_lwtimer_add_timer_to_queue, MQX_INVALID_PARAMETER); return (MQX_INVALID_PARAMETER); } /* Endif */ #endif timer_ptr->TIMER_FUNCTION = func; timer_ptr->PARAMETER = parameter; timer_ptr->PERIOD_PTR = period_ptr; timer_ptr->RELATIVE_TICKS = ticks; _int_disable(); /* Insert into queue in order of increasing offset from start time */ qe_ptr = (pointer) &period_ptr->TIMERS.NEXT; i = _QUEUE_GET_SIZE(&period_ptr->TIMERS) + 1; while (--i) { qe_ptr = (pointer) qe_ptr->LINK.NEXT; if (qe_ptr->RELATIVE_TICKS >= ticks) { qe_ptr = (pointer) qe_ptr->LINK.PREV; break; } /* Endif */ } /* Endwhile */ timer_ptr->VALID = LWTIMER_VALID; _QUEUE_INSERT(&period_ptr->TIMERS, qe_ptr, &timer_ptr->LINK); _int_enable(); _KLOGX2(KLOG_lwtimer_add_timer_to_queue, MQX_OK); return (MQX_OK); } /* Endbody */
_mqx_uint _queue_test ( /* [IN] the queue to test */ QUEUE_STRUCT_PTR q_ptr, /* [OUT] the element where the error was detected */ pointer _PTR_ element_in_error_ptr ) { /* Body */ QUEUE_ELEMENT_STRUCT_PTR element_ptr; QUEUE_ELEMENT_STRUCT_PTR prev_ptr; _mqx_uint size; _int_disable(); size = _QUEUE_GET_SIZE(q_ptr) + 1; element_ptr = q_ptr->NEXT; prev_ptr = (QUEUE_ELEMENT_STRUCT_PTR)((pointer)q_ptr); while (--size) { if (element_ptr == (pointer)q_ptr) { _int_enable(); /* Size too big for # elements on queue */ *element_in_error_ptr = element_ptr; return(MQX_CORRUPT_QUEUE); } /* Endif */ if (element_ptr->PREV != prev_ptr) { _int_enable(); *element_in_error_ptr = element_ptr; return(MQX_CORRUPT_QUEUE); } /* Endif */ prev_ptr = element_ptr; element_ptr = element_ptr->NEXT; } /* Endwhile */ /* Does the last element in the ring point back to the queue head */ if ((pointer)element_ptr != (pointer)q_ptr) { _int_enable(); *element_in_error_ptr = element_ptr; return(MQX_CORRUPT_QUEUE); } /* Endif */ /* Is the last element in ring pointed to by queues PREV field */ if (q_ptr->PREV != prev_ptr) { _int_enable(); *element_in_error_ptr = element_ptr; return(MQX_CORRUPT_QUEUE); } /* Endif */ _int_enable(); return(MQX_OK); } /* Endbody */
/*FUNCTION*------------------------------------------------------------------- * * Function Name : _kuart_period_isr * Returned Value : void * Comments : * Periodic interrupt for mix uart. * *END*----------------------------------------------------------------------*/ static void _kuart_period_isr(void * data_ptr) { IO_SERIAL_INT_DEVICE_STRUCT_PTR int_io_dev_ptr = data_ptr; KUART_INFO_STRUCT_PTR sci_info_ptr = int_io_dev_ptr->DEV_INFO_PTR; uint32_t dmaUC, dmaC; _int_disable(); dma_channel_status(sci_info_ptr->RX_DCH, &sci_info_ptr->RX_DMA_SEQ, &dmaUC); dmaC = int_io_dev_ptr->IQUEUE_SIZE - dmaUC; if (_QUEUE_GET_SIZE(int_io_dev_ptr->IN_WAITING_TASKS)) { if((int64_t)(dmaC + sci_info_ptr->dmaC - sci_info_ptr->readC) >= sci_info_ptr->rxwakeupnum){ _taskq_resume(int_io_dev_ptr->IN_WAITING_TASKS, TRUE); } } _int_enable(); }
_mqx_uint _mmu_destroy_vcontext ( /* [IN] the task for which a virtual context is to be removed */ _task_id task_id ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; PSP_VIRTUAL_CONTEXT_STRUCT_PTR context_ptr; PSP_PAGE_INFO_STRUCT_PTR mem_ptr; PSP_SUPPORT_STRUCT_PTR psp_support_ptr; _GET_KERNEL_DATA(kernel_data); psp_support_ptr = kernel_data->PSP_SUPPORT_PTR; td_ptr = _task_get_td(task_id); if (td_ptr == NULL) { return(MQX_INVALID_TASK_ID); }/* Endif */ _int_disable(); if ((td_ptr->FLAGS & TASK_MMU_CONTEXT_EXISTS) == 0) { _int_enable(); return(MQX_MMU_CONTEXT_DOES_NOT_EXIST); } /* Endif */ if (td_ptr == kernel_data->ACTIVE_PTR) { /* Remove task MMU pages from the MMU table */ _mmu_reset_vcontext_internal(); }/* Endif */ td_ptr->FLAGS &= ~TASK_MMU_CONTEXT_EXISTS; context_ptr = td_ptr->MMU_VIRTUAL_CONTEXT_PTR; td_ptr->MMU_VIRTUAL_CONTEXT_PTR = NULL; _lwsem_wait(&psp_support_ptr->VPAGE_FREELIST_LWSEM); _int_enable(); while (_QUEUE_GET_SIZE(&context_ptr->PAGE_INFO)) { _QUEUE_DEQUEUE(&context_ptr->PAGE_INFO, mem_ptr); _QUEUE_ENQUEUE(&psp_support_ptr->VPAGE_FREELIST, &mem_ptr->ELEMENT); } /* Endwhile */ _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); _mem_free(context_ptr); return(MQX_OK); } /* Endbody */
_mqx_int _io_serial_int_nextc ( /* [IN] the interrupt I/O context information */ IO_SERIAL_INT_DEVICE_STRUCT_PTR int_io_dev_ptr ) { /* Body */ uchar c; if (int_io_dev_ptr->FLAGS & IO_SERIAL_XON_XOFF) { if (int_io_dev_ptr->MUST_STOP_INPUT) { int_io_dev_ptr->MUST_STOP_INPUT = FALSE; int_io_dev_ptr->HAVE_STOPPED_INPUT = TRUE; return((int_32)CNTL_S); } else if (int_io_dev_ptr->MUST_START_INPUT) { int_io_dev_ptr->MUST_START_INPUT = FALSE; int_io_dev_ptr->HAVE_STOPPED_INPUT = FALSE; return((int_32)CNTL_Q); } /* Endif */ } /* Endif */ if (int_io_dev_ptr->HAVE_STOPPED_OUTPUT || (! int_io_dev_ptr->OUTPUT_ENABLED)) { return(-1); } /* Endif */ if (_CHARQ_EMPTY(int_io_dev_ptr->OUT_QUEUE)) { /* No output */ int_io_dev_ptr->OUTPUT_ENABLED = FALSE; if (_QUEUE_GET_SIZE(int_io_dev_ptr->OUT_WAITING_TASKS)) { _taskq_resume(int_io_dev_ptr->OUT_WAITING_TASKS, TRUE); } /* Endif */ return(-1); }/* Endif */ _CHARQ_DEQUEUE(int_io_dev_ptr->OUT_QUEUE, c); return((_mqx_int)c); } /* Endbody */
/*! * \brief Prints stacks usage. */ void _tad_stack_usage ( void ) { #if MQX_MONITOR_STACK && MQX_TD_HAS_STACK_LIMIT _mqx_uint_ptr stack_used, stack_limit; _mqx_int percent, size; TD_STRUCT_PTR td_ptr; TASK_TEMPLATE_STRUCT_PTR template_ptr; KERNEL_DATA_STRUCT_PTR kernel_data_ptr = _mqx_get_kernel_data (); char *task_name; printf ("\nStack Usage:\n"); printf ("Task Stack Base Stack Limit Stack Used %% Used Overflow?\n"); size = _QUEUE_GET_SIZE(&kernel_data_ptr->TD_LIST); td_ptr = (TD_STRUCT_PTR)((unsigned char *)kernel_data_ptr->TD_LIST.NEXT - FIELD_OFFSET(TD_STRUCT,TD_LIST_INFO)); while ((0 != size) && (NULL != td_ptr)) { #if MQX_TD_HAS_TASK_TEMPLATE_PTR template_ptr = td_ptr->TASK_TEMPLATE_PTR; task_name = template_ptr->TASK_NAME; #else char name[12]; sprintf(name, "0x%x", td_ptr->TASK_ID); task_name = name; #endif stack_used = td_ptr->STACK_LIMIT; stack_used++; while (*stack_used == 0x7374616B) { stack_used++; } if ((stack_used == td_ptr->STACK_LIMIT) || (stack_used == (_mqx_uint_ptr)td_ptr->STACK_LIMIT + 1)) { percent=100; } else { percent = ((_mqx_uint_ptr)td_ptr->STACK_BASE - stack_used) * 100 / ((_mqx_uint_ptr)td_ptr->STACK_BASE - (_mqx_uint_ptr)td_ptr->STACK_LIMIT); } printf ("%s 0x%lx 0x%lx 0x%lx %ld %% %s\n", task_name, td_ptr->STACK_BASE, td_ptr->STACK_LIMIT, stack_used, percent,percent>=100?"Yes":"No"); size--; td_ptr = (TD_STRUCT_PTR)((unsigned char *)(td_ptr->TD_LIST_INFO.NEXT) - FIELD_OFFSET(TD_STRUCT,TD_LIST_INFO)); } stack_used = stack_limit = (_mqx_uint_ptr)(((_mqx_uint)(kernel_data_ptr->INTERRUPT_STACK_PTR)) - kernel_data_ptr->INIT.INTERRUPT_STACK_SIZE); stack_used++; while (*stack_used == 0x7374616B) { stack_used++; } if ((stack_used == td_ptr->STACK_LIMIT) || (stack_used == (_mqx_uint_ptr)td_ptr->STACK_LIMIT + 1)) { printf ("Interrupt 0x%lx 0x%lx 0x%lx 100 %% Yes", kernel_data_ptr->INTERRUPT_STACK_PTR, stack_limit, stack_used); } else { percent = ((_mqx_uint_ptr)kernel_data_ptr->INTERRUPT_STACK_PTR - stack_used) * 100 / ((_mqx_uint_ptr)kernel_data_ptr->INTERRUPT_STACK_PTR - stack_limit); printf ("Interrupt 0x%lx 0x%lx 0x%lx %ld %% No", kernel_data_ptr->INTERRUPT_STACK_PTR, stack_limit, stack_used, percent); } printf ("\n"); #endif }
void _time_notify_kernel ( void ) { /* Body */ register KERNEL_DATA_STRUCT_PTR kernel_data; register TD_STRUCT_PTR td_ptr; register TD_STRUCT_PTR next_td_ptr; register _mqx_uint count; register _mqx_int result; _GET_KERNEL_DATA(kernel_data); /* ** Update the current time. */ PSP_INC_TICKS(&kernel_data->TIME); _INT_DISABLE(); if (kernel_data->GET_HWTICKS) { // The hardware clock may have counted passed it's reference // and have an interrupt pending. Thus, HW_TICKS may exceed // kernel_data->HW_TICKS_PER_TICK and this tick_ptr may need // normalizing. This is done in a moment. kernel_data->TIME.HW_TICKS = (*kernel_data->GET_HWTICKS) (kernel_data->GET_HWTICKS_PARAM); } /* Endif */ // The tick_ptr->HW_TICKS value might exceed the // kernel_data->HW_TICKS_PER_TICK and need to be // normalized for the PSP. PSP_NORMALIZE_TICKS(&kernel_data->TIME); /* ** Check for tasks on the timeout queue, and wake the appropriate ** ones up. The timeout queue is a time-priority queue. */ count = _QUEUE_GET_SIZE(&kernel_data->TIMEOUT_QUEUE); if (count) { td_ptr = (TD_STRUCT_PTR)((pointer)kernel_data->TIMEOUT_QUEUE.NEXT); ++count; while ( --count ) { next_td_ptr = td_ptr->TD_NEXT; result = PSP_CMP_TICKS(&kernel_data->TIME, &td_ptr->TIMEOUT); if (result >= 0) { --kernel_data->TIMEOUT_QUEUE.SIZE; _QUEUE_UNLINK(td_ptr); td_ptr->STATE &= ~IS_ON_TIMEOUT_Q; if (td_ptr->STATE & TD_IS_ON_AUX_QUEUE) { td_ptr->STATE &= ~TD_IS_ON_AUX_QUEUE; _QUEUE_REMOVE(td_ptr->INFO, &td_ptr->AUX_QUEUE); } /* Endif */ _TASK_READY(td_ptr, kernel_data); } else { break; /* No more to do */ } /* Endif */ td_ptr = next_td_ptr; } /* Endwhile */ } /* Endif */ #if MQX_HAS_TIME_SLICE /* ** Check if the currently running task is a time slice task ** and if its time has expired, put it at the end of its queue */ td_ptr = kernel_data->ACTIVE_PTR; if ( td_ptr->FLAGS & MQX_TIME_SLICE_TASK ) { PSP_INC_TICKS(&td_ptr->CURRENT_TIME_SLICE); if (! (td_ptr->FLAGS & TASK_PREEMPTION_DISABLED) ) { result = PSP_CMP_TICKS(&td_ptr->CURRENT_TIME_SLICE, &td_ptr->TIME_SLICE); if ( result >= 0 ) { _QUEUE_UNLINK(td_ptr); _TASK_READY(td_ptr,kernel_data); } /* Endif */ } /* Endif */ } /* Endif */ #endif _INT_ENABLE(); #if MQX_USE_TIMER /* If the timer component needs servicing, call its ISR function */ if (kernel_data->TIMER_COMPONENT_ISR != NULL) { (*kernel_data->TIMER_COMPONENT_ISR)(); }/* Endif */ #endif #if MQX_USE_LWTIMER /* If the lwtimer needs servicing, call its ISR function */ if (kernel_data->LWTIMER_ISR != NULL) { (*kernel_data->LWTIMER_ISR)(); }/* Endif */ #endif } /* Endbody */
/*! * \brief Tests the event component for validity and consistency. * * \param[out] event_error_ptr Pointer to the lightweight event that has an * error if MQX found an error in the lightweight event component (NULL if no error * is found). * \param[out] td_error_ptr TD on the lightweight event in error (NULL if no * error is found). * * \return MQX_OK * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * \return MQX_LWEVENT_INVALID (A lightweight event was invalid.) * \return code from _queue_test() (Waiting queue for a lightweight event has an error.) * * \warning Cannot be called from an ISR. * * \see _lwevent_create * \see _lwevent_destroy */ _mqx_uint _lwevent_test ( void **event_error_ptr, void **td_error_ptr ) { KERNEL_DATA_STRUCT_PTR kernel_data; LWEVENT_STRUCT_PTR event_ptr; _mqx_uint result; _mqx_uint queue_size; _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_lwevent_test, event_error_ptr); *td_error_ptr = NULL; *event_error_ptr = NULL; #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwevent_test, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR); }/* Endif */ #endif /* * It is not considered an error if the lwevent component has not been * created yet */ if (kernel_data->LWEVENTS.NEXT == NULL) { return (MQX_OK); } /* Endif */ result = _queue_test((QUEUE_STRUCT_PTR) &kernel_data->LWEVENTS, event_error_ptr); if (result != MQX_OK) { _KLOGX3(KLOG_lwevent_test, result, *event_error_ptr); return (result); } /* Endif */ event_ptr = (LWEVENT_STRUCT_PTR) ((void *) kernel_data->LWEVENTS.NEXT); queue_size = _QUEUE_GET_SIZE(&kernel_data->LWEVENTS); while (queue_size--) { if (event_ptr->VALID != LWEVENT_VALID) { result = MQX_LWEVENT_INVALID; break; } /* Endif */ result = _queue_test(&event_ptr->WAITING_TASKS, td_error_ptr); if (result != MQX_OK) { break; } /* Endif */ event_ptr = (LWEVENT_STRUCT_PTR) (void *) event_ptr->LINK.NEXT; } /* Endwhile */ _int_enable(); if (result != MQX_OK) { *event_error_ptr = (void *) event_ptr; } /* Endif */ _KLOGX4(KLOG_lwevent_test, result, *event_error_ptr, *td_error_ptr); return (result); }
/*! * \brief Used by a task to set the specified event bits in an event. * * \param[in] event_ptr Pointer to the lightweight event to set bits in. * \param[in] bit_mask Bit mask. Each bit represents an event bit to be set. * * \return MQX_OK * \return MQX_LWEVENT_INVALID (Lightweight event was invalid.) * * \see _lwevent_create * \see _lwevent_destroy * \see _lwevent_set_auto_clear * \see _lwevent_clear * \see _lwevent_test * \see _lwevent_wait_for * \see _lwevent_wait_ticks * \see _lwevent_wait_until * \see _lwevent_get_signalled * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_set ( LWEVENT_STRUCT_PTR event_ptr, _mqx_uint bit_mask ) { KERNEL_DATA_STRUCT_PTR kernel_data; QUEUE_ELEMENT_STRUCT_PTR q_ptr; QUEUE_ELEMENT_STRUCT_PTR next_q_ptr; TD_STRUCT_PTR td_ptr; _mqx_uint set_bits; #if MQX_ENABLE_USER_MODE && MQX_ENABLE_USER_STDAPI if (MQX_RUN_IN_USER_MODE) { return _usr_lwevent_set(event_ptr, bit_mask); } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwevent_set, event_ptr, bit_mask); _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); _KLOGX2(KLOG_lwevent_set, MQX_LWEVENT_INVALID); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif set_bits = event_ptr->VALUE | bit_mask; if (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { /* Schedule waiting task(s) to run if bits ok */ q_ptr = event_ptr->WAITING_TASKS.NEXT; while (q_ptr != (QUEUE_ELEMENT_STRUCT_PTR) ((void *) &event_ptr->WAITING_TASKS)) { td_ptr = (void *) q_ptr; _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE); next_q_ptr = q_ptr->NEXT; if (((td_ptr->FLAGS & TASK_LWEVENT_ALL_BITS_WANTED) && ((td_ptr->LWEVENT_BITS & set_bits) == td_ptr->LWEVENT_BITS)) || ((!(td_ptr->FLAGS & TASK_LWEVENT_ALL_BITS_WANTED)) && (td_ptr->LWEVENT_BITS & set_bits))) { _QUEUE_REMOVE(&event_ptr->WAITING_TASKS, q_ptr); _TIME_DEQUEUE(td_ptr, kernel_data); td_ptr->INFO = 0; _TASK_READY(td_ptr, kernel_data); /* store information about which bits caused task to be unblocked */ td_ptr->LWEVENT_BITS &= set_bits; set_bits &= ~(event_ptr->AUTO & td_ptr->LWEVENT_BITS); } /* Endif */ q_ptr = next_q_ptr; } /* Endwhile */ } /* Endif */ event_ptr->VALUE = set_bits; _INT_ENABLE(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); _KLOGX2(KLOG_lwevent_set, MQX_OK); return (MQX_OK); }
/*! * \private * * \brief Used by a task to destroy an instance of a lightweight event. * * \param[in] event_ptr Pointer to the lightweight event to be deinitialized. * \param[in] user User mode * * \return MQX_OK * \return MQX_LWEVENT_INVALID (Lightweight event was not valid.) * \return MQX_CANNOT_CALL_FUNCTION_FROM_ISR (Function cannot be called from an ISR.) * * \see _lwevent_destroy * \see LWEVENT_STRUCT */ _mqx_uint _lwevent_destroy_internal ( LWEVENT_STRUCT_PTR event_ptr, bool user ) { KERNEL_DATA_STRUCT_PTR kernel_data; #if MQX_COMPONENT_DESTRUCTION TD_STRUCT_PTR td_ptr; #endif #if MQX_ENABLE_USER_MODE if (user && !_psp_mem_check_access_mask((uint32_t)event_ptr, sizeof(LWEVENT_STRUCT), MPU_UM_R, MPU_UM_RW)) { return MQX_LWEVENT_INVALID; } #endif _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_lwevent_destroy, event_ptr); #if MQX_COMPONENT_DESTRUCTION #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwevent_destroy, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return (MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ #endif _int_disable(); #if MQX_CHECK_VALIDITY if (event_ptr->VALID != LWEVENT_VALID) { _int_enable(); _KLOGX2(KLOG_lwevent_destroy, MQX_LWEVENT_INVALID); return (MQX_LWEVENT_INVALID); } /* Endif */ #endif /* Effectively stop all access to the event */ event_ptr->VALID = 0; while (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { _QUEUE_DEQUEUE(&event_ptr->WAITING_TASKS, td_ptr); _BACKUP_POINTER(td_ptr, TD_STRUCT, AUX_QUEUE); _TIME_DEQUEUE(td_ptr, kernel_data); _TASK_READY(td_ptr, kernel_data); } /* Endwhile */ /* remove event from kernel LWEVENTS queue */ #if MQX_ENABLE_USER_MODE if (user) { _QUEUE_REMOVE(&kernel_data->USR_LWEVENTS, event_ptr); } else #endif { _QUEUE_REMOVE(&kernel_data->LWEVENTS, event_ptr); } _int_enable(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); #endif _KLOGX2(KLOG_lwevent_destroy, MQX_OK); return (MQX_OK); }
_mqx_uint _lwsem_test ( /* [OUT] the light weight semapohre in error */ pointer _PTR_ lwsem_error_ptr, /* [OUT] the td on a light weight semaphore in error */ pointer _PTR_ td_error_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; LWSEM_STRUCT_PTR sem_ptr; _mqx_uint queue_size; _mqx_uint result; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_lwsem_test, lwsem_error_ptr, td_error_ptr); *td_error_ptr = NULL; *lwsem_error_ptr = NULL; #if MQX_CHECK_ERRORS if (kernel_data->IN_ISR) { _KLOGX2(KLOG_lwsem_test, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); }/* Endif */ #endif _int_disable(); result = _queue_test((QUEUE_STRUCT_PTR)&kernel_data->LWSEM, lwsem_error_ptr); if (result != MQX_OK) { _KLOGX3(KLOG_lwsem_test, result, *lwsem_error_ptr); return(result); } /* Endif */ sem_ptr = (LWSEM_STRUCT_PTR)((pointer)kernel_data->LWSEM.NEXT); queue_size = _QUEUE_GET_SIZE(&kernel_data->LWSEM); while (queue_size--) { if (sem_ptr->VALID != LWSEM_VALID) { result = MQX_INVALID_LWSEM; break; } /* Endif */ result = _queue_test(&sem_ptr->TD_QUEUE, td_error_ptr); if (result != MQX_OK) { break; } /* Endif */ sem_ptr = sem_ptr->NEXT; } /* Endwhile */ _int_enable(); if (result != MQX_OK) { *lwsem_error_ptr = (pointer)sem_ptr; } /* Endif */ _KLOGX4(KLOG_lwsem_test, result, *lwsem_error_ptr, *td_error_ptr); return(result); }
_mqx_uint _sem_post ( /* [IN] - The semaphore handle returned by _sem_open. */ pointer users_sem_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; SEM_STRUCT_PTR sem_ptr; #if MQX_COMPONENT_DESTRUCTION SEM_COMPONENT_STRUCT_PTR sem_component_ptr; #endif SEM_CONNECTION_STRUCT_PTR new_sem_connection_ptr; SEM_CONNECTION_STRUCT_PTR sem_connection_ptr; TD_STRUCT_PTR new_td_ptr; boolean task_added = FALSE; boolean destroying_semaphore = FALSE; _GET_KERNEL_DATA(kernel_data); _KLOGE2(KLOG_sem_post, users_sem_ptr); sem_connection_ptr = (SEM_CONNECTION_STRUCT_PTR)users_sem_ptr; #if MQX_CHECK_VALIDITY if (sem_connection_ptr->VALID != SEM_VALID) { _KLOGX2(KLOG_sem_post, SEM_INVALID_SEMAPHORE_HANDLE); return(SEM_INVALID_SEMAPHORE_HANDLE); } /* Endif */ #endif sem_ptr = sem_connection_ptr->SEM_PTR; #if MQX_CHECK_ERRORS if (sem_ptr->POLICY & SEM_STRICT) { if (kernel_data->IN_ISR) { _KLOGX2(KLOG_sem_post, MQX_CANNOT_CALL_FUNCTION_FROM_ISR); return(MQX_CANNOT_CALL_FUNCTION_FROM_ISR); } /* Endif */ if (sem_connection_ptr->TD_PTR != kernel_data->ACTIVE_PTR) { /* ONLY OPENING task can use the semaphore */ _KLOGX2(KLOG_sem_post, SEM_INVALID_SEMAPHORE_HANDLE); return(SEM_INVALID_SEMAPHORE_HANDLE); } /* Endif */ if (sem_connection_ptr->POST_STATE == 0) { _KLOGX2(KLOG_sem_post, SEM_CANNOT_POST); return(SEM_CANNOT_POST); } /* Endif */ } /* Endif */ #endif _INT_DISABLE(); #if MQX_CHECK_VALIDITY if (sem_ptr->VALID != SEM_VALID) { _int_enable(); _KLOGX2(KLOG_sem_post, SEM_INVALID_SEMAPHORE); return(SEM_INVALID_SEMAPHORE); } /* Endif */ #endif if (sem_ptr->POLICY & SEM_STRICT) { #if MQX_CHECK_ERRORS if (sem_ptr->COUNT > sem_ptr->MAX_COUNT) { /* Corruption somewhere */ _int_enable(); _KLOGX2(KLOG_sem_post, SEM_INVALID_SEMAPHORE_COUNT); return(SEM_INVALID_SEMAPHORE_COUNT); } /* Endif */ #endif --sem_connection_ptr->POST_STATE; if (sem_connection_ptr->POST_STATE == 0) { _QUEUE_REMOVE(&sem_ptr->OWNING_TASKS, sem_connection_ptr); } /* Endif */ } /* Endif */ if (_QUEUE_GET_SIZE(&sem_ptr->WAITING_TASKS)) { /* Schedule a waiting task to run */ new_sem_connection_ptr = (SEM_CONNECTION_STRUCT_PTR) ((pointer)sem_ptr->WAITING_TASKS.NEXT); while (new_sem_connection_ptr != (pointer)&sem_ptr->WAITING_TASKS) { new_td_ptr = new_sem_connection_ptr->TD_PTR; if ((new_td_ptr->STATE & STATE_MASK) == SEM_BLOCKED) { _TIME_DEQUEUE(new_td_ptr, kernel_data); _TASK_READY(new_td_ptr, kernel_data); new_td_ptr->INFO = SEM_AVAILABLE; task_added = TRUE; break; } /* Endif */ new_sem_connection_ptr = (SEM_CONNECTION_STRUCT_PTR) new_sem_connection_ptr->NEXT; } /* Endwhile */ } /* Endif */ if (!task_added) { ++sem_ptr->COUNT; #if MQX_COMPONENT_DESTRUCTION if ( sem_ptr->DELAYED_DESTROY ) { if ( ( (sem_ptr->POLICY & SEM_STRICT) && (sem_ptr->COUNT == sem_ptr->MAX_COUNT) ) || ( !(sem_ptr->POLICY & SEM_STRICT) ) ) { /* Destroy the semaphore name */ sem_ptr->VALID = 0; destroying_semaphore = TRUE; } /* Endif */ } /* Endif */ #endif } /* Endif */ if (sem_connection_ptr->BOOSTED && (sem_connection_ptr->POST_STATE == 0)) { /* This task was boosted by a waiting task */ _sched_unboost_priority_internal(kernel_data->ACTIVE_PTR, sem_connection_ptr->BOOSTED); sem_connection_ptr->BOOSTED = 0; } /* Endif */ _INT_ENABLE(); /* Let higher priority task run */ _CHECK_RUN_SCHEDULER(); #if MQX_COMPONENT_DESTRUCTION if (destroying_semaphore) { sem_component_ptr = (SEM_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_SEMAPHORES]; if (sem_component_ptr != NULL) { _name_delete_internal(sem_component_ptr->NAME_TABLE_HANDLE, sem_ptr->NAME); } /* Endif */ _mem_free(sem_ptr); } /* Endif */ #endif _KLOGX2(KLOG_sem_post, MQX_OK); return(MQX_OK); } /* Endbody */
boolean _io_serial_int_addc ( /* [IN] the interrupt I/O context information */ IO_SERIAL_INT_DEVICE_STRUCT_PTR int_io_dev_ptr, /* [IN] the character to add to the input queue */ char c ) { /* Body */ CHARQ_STRUCT_PTR in_queue; _mqx_uint ioctl_val; if (int_io_dev_ptr->FLAGS & IO_SERIAL_XON_XOFF) { if (int_io_dev_ptr->HAVE_STOPPED_OUTPUT) { if (c == CNTL_Q) { int_io_dev_ptr->HAVE_STOPPED_OUTPUT = FALSE; return TRUE; } /* Endif */ } else { if (c == CNTL_S) { int_io_dev_ptr->HAVE_STOPPED_OUTPUT = TRUE; return TRUE; } /* Endif */ } /* Endif */ } /* Endif */ in_queue = int_io_dev_ptr->IN_QUEUE; if (_CHARQ_NOT_FULL(in_queue)) { _CHARQ_ENQUEUE(in_queue,c); if (int_io_dev_ptr->FLAGS & (IO_SERIAL_XON_XOFF | IO_SERIAL_HW_FLOW_CONTROL)) { if (_CHARQ_SIZE(in_queue) > int_io_dev_ptr->INPUT_HIGH_WATER_MARK) { if (!int_io_dev_ptr->HAVE_STOPPED_INPUT) { if (int_io_dev_ptr->FLAGS & IO_SERIAL_XON_XOFF) { int_io_dev_ptr->MUST_STOP_INPUT = TRUE; } else { if (int_io_dev_ptr->DEV_IOCTL != NULL) { ioctl_val = IO_SERIAL_RTS; (*int_io_dev_ptr->DEV_IOCTL)(int_io_dev_ptr->DEV_INFO_PTR, IO_IOCTL_SERIAL_CLEAR_HW_SIGNAL, &ioctl_val); } int_io_dev_ptr->HAVE_STOPPED_INPUT = TRUE; } /* Endif */ } /* Endif */ } else if (_CHARQ_SIZE(in_queue) < int_io_dev_ptr->INPUT_LOW_WATER_MARK) { if (int_io_dev_ptr->HAVE_STOPPED_INPUT) { if (int_io_dev_ptr->FLAGS & IO_SERIAL_XON_XOFF) { int_io_dev_ptr->MUST_START_INPUT = TRUE; } else { if (int_io_dev_ptr->DEV_IOCTL != NULL) { ioctl_val = IO_SERIAL_RTS; (*int_io_dev_ptr->DEV_IOCTL)(int_io_dev_ptr->DEV_INFO_PTR, IO_IOCTL_SERIAL_SET_HW_SIGNAL, &ioctl_val); } int_io_dev_ptr->HAVE_STOPPED_INPUT = FALSE; } /* Endif */ } /* Endif */ } /* Endif */ } /* Endif */ } else { /* indicate that tossed the character */ return FALSE; } /* Endif */ if (_QUEUE_GET_SIZE(int_io_dev_ptr->IN_WAITING_TASKS)) { _taskq_resume(int_io_dev_ptr->IN_WAITING_TASKS, TRUE); } /* Endif */ return TRUE; } /* Endbody */
_mqx_uint _event_set ( /* [IN] - An event handle returned from _event_open or _event_open_fast */ pointer users_event_ptr, /* [IN] - bit mask, each bit of which represents an event. */ _mqx_uint bit_mask ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; EVENT_STRUCT_PTR event_ptr; EVENT_COMPONENT_STRUCT_PTR event_component_ptr; EVENT_CONNECTION_STRUCT_PTR conn_ptr; EVENT_CONNECTION_STRUCT_PTR next_conn_ptr; EVENT_CONNECTION_STRUCT_PTR event_connection_ptr; TD_STRUCT_PTR new_td_ptr; _mqx_uint set_bits; _GET_KERNEL_DATA(kernel_data); _KLOGE3(KLOG_event_set, users_event_ptr, bit_mask); event_connection_ptr = (EVENT_CONNECTION_STRUCT_PTR)users_event_ptr; #if MQX_CHECK_VALIDITY if (event_connection_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_set, EVENT_INVALID_EVENT_HANDLE); return(EVENT_INVALID_EVENT_HANDLE); } /* Endif */ #endif event_component_ptr = (EVENT_COMPONENT_STRUCT_PTR) kernel_data->KERNEL_COMPONENTS[KERNEL_EVENTS]; #if MQX_CHECK_ERRORS if (event_component_ptr == NULL){ _KLOGX2(KLOG_event_set, MQX_COMPONENT_DOES_NOT_EXIST); return(MQX_COMPONENT_DOES_NOT_EXIST); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if (event_component_ptr->VALID != EVENT_VALID){ _KLOGX2(KLOG_event_set, MQX_INVALID_COMPONENT_BASE); return(MQX_INVALID_COMPONENT_BASE); } /* Endif */ #endif #if MQX_IS_MULTI_PROCESSOR if (event_connection_ptr->REMOTE_CPU) { if (kernel_data->IPC) { /* This open is for a remote processor */ (*kernel_data->IPC)(TRUE, event_connection_ptr->REMOTE_CPU, KERNEL_EVENTS, IPC_EVENT_SET, 2, event_connection_ptr->EVENT_PTR, bit_mask); _KLOGX2(KLOG_event_set, kernel_data->ACTIVE_PTR->TASK_ERROR_CODE); return(kernel_data->ACTIVE_PTR->TASK_ERROR_CODE); } else { _KLOGX2(KLOG_event_set, EVENT_NOT_FOUND); return(EVENT_NOT_FOUND); }/* Endif */ }/* Endif */ #endif _INT_DISABLE(); event_ptr = event_connection_ptr->EVENT_PTR; #if MQX_CHECK_VALIDITY if (event_ptr->VALID != EVENT_VALID) { _INT_ENABLE(); _KLOGX2(KLOG_event_set, EVENT_INVALID_EVENT); return(EVENT_INVALID_EVENT); } /* Endif */ #endif set_bits = event_ptr->EVENT | bit_mask; if (_QUEUE_GET_SIZE(&event_ptr->WAITING_TASKS)) { /* Schedule waiting task(s) to run if bits ok */ conn_ptr = (EVENT_CONNECTION_STRUCT_PTR) ((pointer)event_ptr->WAITING_TASKS.NEXT); while (conn_ptr != (EVENT_CONNECTION_STRUCT_PTR) ((pointer)&event_ptr->WAITING_TASKS)) { next_conn_ptr = (EVENT_CONNECTION_STRUCT_PTR)conn_ptr->NEXT; if (((conn_ptr->FLAGS & EVENT_WANTS_ALL) && ((conn_ptr->MASK & set_bits) == conn_ptr->MASK)) || ((!(conn_ptr->FLAGS & EVENT_WANTS_ALL)) && (conn_ptr->MASK & set_bits))) { new_td_ptr = conn_ptr->TD_PTR; if ((new_td_ptr->STATE & STATE_MASK) == EVENT_BLOCKED) { /* He may have timed out */ conn_ptr->FLAGS |= EVENT_OCCURRED; _TIME_DEQUEUE(new_td_ptr, kernel_data); _TASK_READY(new_td_ptr, kernel_data); /* Only ready one task if event is an auto clear event */ if (event_ptr->AUTO_CLEAR) { set_bits &= ~conn_ptr->MASK; break; } /* Endif */ } /* Endif */ } /* Endif */ conn_ptr = next_conn_ptr; } /* Endwhile */ } /* Endif */ event_ptr->EVENT = set_bits; _INT_ENABLE(); /* May need to let higher priority task run */ _CHECK_RUN_SCHEDULER(); _KLOGX2(KLOG_event_set, MQX_OK); return(MQX_OK); } /* Endbody */
_mqx_uint _mmu_add_vcontext ( /* [IN] the task to which a virtual context is to be added */ _task_id task_id, /* [IN] the virtual address to use */ pointer vaddr, /* [IN] the size */ _mem_size input_size, /* [IN] the MMU flags to use */ _mqx_uint flags ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; TD_STRUCT_PTR td_ptr; PSP_VIRTUAL_CONTEXT_STRUCT_PTR context_ptr; PSP_PAGE_INFO_STRUCT_PTR mem_ptr; PSP_SUPPORT_STRUCT_PTR psp_support_ptr; uint_32 page_size; int_32 test_size; int_32 size = (int_32)input_size; uint_32 result; uchar_ptr taddr; pointer tmp; _GET_KERNEL_DATA(kernel_data); td_ptr = _task_get_td(task_id); if (td_ptr == NULL) { return(MQX_INVALID_TASK_ID); }/* Endif */ context_ptr = td_ptr->MMU_VIRTUAL_CONTEXT_PTR; psp_support_ptr = kernel_data->PSP_SUPPORT_PTR; page_size = psp_support_ptr->PAGE_SIZE; _lwsem_wait(&psp_support_ptr->VPAGE_FREELIST_LWSEM); if ((td_ptr->FLAGS & TASK_MMU_CONTEXT_EXISTS) == 0) { _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); return(MQX_MMU_CONTEXT_DOES_NOT_EXIST); } /* Endif */ if (size > (_QUEUE_GET_SIZE(&psp_support_ptr->VPAGE_FREELIST) * page_size)) { _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); return(MQX_OUT_OF_MEMORY); }/* Endif */ /* Verify virtual memory not in use */ test_size = size; taddr = vaddr; while (test_size > 0) { /* Use Vtop !! */ if (_mmu_vtop(taddr,&tmp) == MQX_OK) { _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); return(MQX_INVALID_PARAMETER); }/* Endif */ taddr += page_size; test_size -= page_size; } /* Endwhile */ while (size > 0) { _QUEUE_DEQUEUE(&psp_support_ptr->VPAGE_FREELIST,mem_ptr); mem_ptr->VADDR = vaddr; result = _mmu_set_vmem_loc_internal(mem_ptr, flags); if (result != MQX_OK) { _QUEUE_ENQUEUE(&psp_support_ptr->VPAGE_FREELIST,mem_ptr); _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); return(MQX_OUT_OF_MEMORY); } /* Endif */ if (td_ptr == kernel_data->ACTIVE_PTR) { result = _mmu_add_vregion(mem_ptr->ADDR, mem_ptr->VADDR, page_size, flags); if (result != MQX_OK) { _QUEUE_ENQUEUE(&psp_support_ptr->VPAGE_FREELIST,mem_ptr); _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); return(MQX_OUT_OF_MEMORY); } /* Endif */ } /* Endif */ _QUEUE_ENQUEUE(&context_ptr->PAGE_INFO,mem_ptr); size -= page_size; vaddr = (pointer)((uint_32)vaddr + page_size); } /* Endwhile */ _lwsem_post(&psp_support_ptr->VPAGE_FREELIST_LWSEM); return(MQX_OK); } /* Endbody */