rtems_task Floating_point_task_1( rtems_task_argument argument ) { Chain_Control *ready_queues; Thread_Control *executing; FP_DECLARE; context_switch_restore_1st_fp_time = benchmark_timer_read(); executing = _Thread_Executing; ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY]); /* do not force context switch */ _Thread_Dispatch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Executing->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Executing->Registers ); /* switch to Floating_point_task_2 */ context_switch_save_idle_restore_initted_time = benchmark_timer_read(); FP_LOAD( 1.0 ); executing = _Thread_Executing; ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY]); /* do not force context switch */ _Thread_Dispatch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Executing->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Executing->Registers ); /* switch to Floating_point_task_2 */ }
bool _Thread_Set_life_protection( bool protect ) { bool previous_life_protection; ISR_Level level; Per_CPU_Control *cpu; Thread_Control *executing; Thread_Life_state previous_life_state; cpu = _Thread_Action_ISR_disable_and_acquire_for_executing( &level ); executing = cpu->executing; previous_life_state = executing->Life.state; previous_life_protection = _Thread_Is_life_protected( previous_life_state ); if ( protect ) { executing->Life.state = previous_life_state | THREAD_LIFE_PROTECTED; } else { executing->Life.state = previous_life_state & ~THREAD_LIFE_PROTECTED; } _Thread_Action_release_and_ISR_enable( cpu, level ); #if defined(RTEMS_SMP) /* * On SMP configurations it is possible that a life change of an executing * thread is requested, but this thread didn't notice it yet. The life * change is first marked in the life state field and then all scheduling and * other thread state updates are performed. The last step is to issues an * inter-processor interrupt if necessary. Since this takes some time we * have to synchronize here. */ if ( !_Thread_Is_life_protected( previous_life_state ) && _Thread_Is_life_changing( previous_life_state ) ) { _Thread_Disable_dispatch(); _Thread_Enable_dispatch(); } #endif if ( !protect && _Thread_Is_life_changing( previous_life_state ) ) { _Thread_Disable_dispatch(); _Thread_Start_life_change_for_executing( executing ); _Thread_Enable_dispatch(); } return previous_life_protection; }
CORE_mutex_Status _CORE_mutex_Initialize( CORE_mutex_Control *the_mutex, Thread_Control *executing, const CORE_mutex_Attributes *the_mutex_attributes, bool initially_locked ) { /* Add this to the RTEMS environment later ????????? rtems_assert( initial_lock == CORE_MUTEX_LOCKED || initial_lock == CORE_MUTEX_UNLOCKED ); */ the_mutex->Attributes = *the_mutex_attributes; if ( initially_locked ) { the_mutex->nest_count = 1; the_mutex->holder = executing; if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) || _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) { Priority_Control ceiling = the_mutex->Attributes.priority_ceiling; /* * The mutex initialization is only protected by the allocator lock in * general. Disable thread dispatching before the priority check to * prevent interference with priority inheritance. */ _Thread_Disable_dispatch(); if ( executing->current_priority < ceiling ) { _Thread_Enable_dispatch(); return CORE_MUTEX_STATUS_CEILING_VIOLATED; } #ifdef __RTEMS_STRICT_ORDER_MUTEX__ _Chain_Prepend_unprotected( &executing->lock_mutex, &the_mutex->queue.lock_queue ); the_mutex->queue.priority_before = executing->current_priority; #endif executing->resource_count++; _Thread_Change_priority( executing, ceiling, false ); _Thread_Enable_dispatch(); } } else { the_mutex->nest_count = 0; the_mutex->holder = NULL; } _Thread_queue_Initialize( &the_mutex->Wait_queue, _CORE_mutex_Is_fifo( the_mutex_attributes ) ? THREAD_QUEUE_DISCIPLINE_FIFO : THREAD_QUEUE_DISCIPLINE_PRIORITY, STATES_WAITING_FOR_MUTEX, CORE_MUTEX_TIMEOUT ); return CORE_MUTEX_STATUS_SUCCESSFUL; }
/* * sem_open * * Opens a named semaphore. Used in conjunction with the sem_close * and sem_unlink commands. * * 11.2.3 Initialize/Open a Named Semaphore, P1003.1b-1993, p.221 * * NOTE: When oflag is O_CREAT, then optional third and fourth * parameters must be present. */ sem_t *sem_open( const char *name, int oflag, ... /* mode_t mode, */ /* unsigned int value */ ) { /* * mode is set but never used. GCC gives a warning for this * and we need to tell GCC not to complain. But we have to * have it because we have to work through the variable * arguments to get to attr. */ mode_t mode RTEMS_COMPILER_UNUSED_ATTRIBUTE; va_list arg; unsigned int value = 0; int status; Objects_Id the_semaphore_id; POSIX_Semaphore_Control *the_semaphore; Objects_Locations location; size_t name_len; _Thread_Disable_dispatch(); if ( oflag & O_CREAT ) { va_start(arg, oflag); mode = va_arg( arg, mode_t ); value = va_arg( arg, unsigned int ); va_end(arg); }
void _TOD_Set_with_timestamp( const Timestamp_Control *tod_as_timestamp ) { TOD_Control *tod = &_TOD; uint32_t nanoseconds = _Timestamp_Get_nanoseconds( tod_as_timestamp ); Watchdog_Interval seconds_next = _Timestamp_Get_seconds( tod_as_timestamp ); Watchdog_Interval seconds_now; ISR_lock_Context lock_context; _Thread_Disable_dispatch(); seconds_now = _TOD_Seconds_since_epoch(); if ( seconds_next < seconds_now ) _Watchdog_Adjust_seconds( WATCHDOG_BACKWARD, seconds_now - seconds_next ); else _Watchdog_Adjust_seconds( WATCHDOG_FORWARD, seconds_next - seconds_now ); _TOD_Acquire( tod, &lock_context ); tod->now = *tod_as_timestamp; _TOD_Release( tod, &lock_context ); tod->seconds_trigger = nanoseconds; tod->is_set = true; _Thread_Enable_dispatch(); }
sem_t *sem_open( const char *name, int oflag, ... /* mode_t mode, */ /* unsigned int value */ ) { va_list arg; mode_t mode; unsigned int value = 0; int status; Objects_Id the_semaphore_id; POSIX_Semaphore_Control *the_semaphore; Objects_Locations location; size_t name_len; _Thread_Disable_dispatch(); if ( oflag & O_CREAT ) { va_start(arg, oflag); mode = va_arg( arg, mode_t ); value = va_arg( arg, unsigned int ); va_end(arg); }
void _TOD_Set_with_timestamp( const Timestamp_Control *tod ) { uint32_t nanoseconds = _Timestamp_Get_nanoseconds( tod ); Watchdog_Interval seconds_next = _Timestamp_Get_seconds( tod ); Watchdog_Interval seconds_now; _Thread_Disable_dispatch(); _TOD_Deactivate(); seconds_now = _TOD_Seconds_since_epoch(); if ( seconds_next < seconds_now ) _Watchdog_Adjust_seconds( WATCHDOG_BACKWARD, seconds_now - seconds_next ); else _Watchdog_Adjust_seconds( WATCHDOG_FORWARD, seconds_next - seconds_now ); _TOD.now = *tod; _TOD.seconds_trigger = nanoseconds; _TOD.is_set = true; _TOD_Activate(); _Thread_Enable_dispatch(); }
rtems_task Floating_point_task_2( rtems_task_argument argument ) { Thread_Control *executing; FP_DECLARE; context_switch_save_restore_idle_time = benchmark_timer_read(); executing = _Thread_Executing; _Thread_Executing = (Thread_Control *) _Thread_Ready_chain[FP1_PRIORITY].first; FP_LOAD( 1.0 ); /* do not force context switch */ _Context_Switch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Executing->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Executing->Registers ); /* switch to Floating_point_task_1 */ context_switch_save_restore_initted_time = benchmark_timer_read(); complete_test(); }
rtems_task Middle_task( rtems_task_argument argument ) { thread_dispatch_no_fp_time = benchmark_timer_read(); _Thread_Set_state( _Thread_Executing, STATES_SUSPENDED ); Middle_tcb = _Thread_Executing; _Thread_Executing = (Thread_Control *) _Thread_Ready_chain[LOW_PRIORITY].first; /* do not force context switch */ _Context_Switch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); _Context_Switch( &Middle_tcb->Registers, &_Thread_Executing->Registers ); benchmark_timer_initialize(); _Context_Switch(&Middle_tcb->Registers, &Low_tcb->Registers); }
rtems_task High_task( rtems_task_argument argument ) { rtems_interrupt_level level; benchmark_timer_initialize(); rtems_interrupt_disable( level ); isr_disable_time = benchmark_timer_read(); benchmark_timer_initialize(); rtems_interrupt_flash( level ); isr_flash_time = benchmark_timer_read(); benchmark_timer_initialize(); rtems_interrupt_enable( level ); isr_enable_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Disable_dispatch(); thread_disable_dispatch_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Enable_dispatch(); thread_enable_dispatch_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Set_state( _Thread_Executing, STATES_SUSPENDED ); thread_set_state_time = benchmark_timer_read(); _Context_Switch_necessary = true; benchmark_timer_initialize(); _Thread_Dispatch(); /* dispatches Middle_task */ }
ER rot_rdq( PRI tskpri ) { PRI priority; if (( tskpri <= 0 ) || ( tskpri >= PRIORITY_MAXIMUM-1 )) return E_PAR; _Thread_Disable_dispatch(); /* * Yield of processor will rotate the queue for this processor. */ priority = _ITRON_Task_Core_to_Priority(_Thread_Executing->current_priority); if ( priority == tskpri ) _Thread_Yield_processor(); else { _Thread_Rotate_Ready_Queue( _ITRON_Task_Core_to_Priority( tskpri ) ); } _Thread_Enable_dispatch(); return E_OK; }
int sem_unlink( const char *name ) { int status; register POSIX_Semaphore_Control *the_semaphore; Objects_Id the_semaphore_id; size_t name_len; _Thread_Disable_dispatch(); status = _POSIX_Semaphore_Name_to_id( name, &the_semaphore_id, &name_len ); if ( status != 0 ) { _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( status ); } the_semaphore = (POSIX_Semaphore_Control *) _Objects_Get_local_object( &_POSIX_Semaphore_Information, _Objects_Get_index( the_semaphore_id ) ); the_semaphore->linked = false; _POSIX_Semaphore_Namespace_remove( the_semaphore ); _POSIX_Semaphore_Delete( the_semaphore ); _Thread_Enable_dispatch(); return 0; }
int sched_yield( void ) { _Thread_Disable_dispatch(); _Thread_Yield_processor(); _Thread_Enable_dispatch(); return 0; }
void rtems_smp_secondary_cpu_initialize( void ) { Per_CPU_Control *self_cpu = _Per_CPU_Get(); Thread_Control *heir; #if defined(RTEMS_DEBUG) printk( "Made it to %d -- ", _Per_CPU_Get_index( self_cpu ) ); #endif _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ); _Per_CPU_Wait_for_state( self_cpu, PER_CPU_STATE_BEGIN_MULTITASKING ); _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_UP ); /* * The Scheduler will have selected the heir thread for each CPU core. * Now we have been requested to perform the first context switch. So * force a switch to the designated heir and make it executing on * THIS core. */ heir = self_cpu->heir; heir->is_executing = true; self_cpu->executing->is_executing = false; self_cpu->executing = heir; self_cpu->dispatch_necessary = false; /* * Threads begin execution in the _Thread_Handler() function. This function * will call _Thread_Enable_dispatch(). */ _Thread_Disable_dispatch(); _CPU_Context_switch_to_first_task_smp( &heir->Registers ); }
void _Watchdog_Report_chain( const char *name, Chain_Control *header ) { ISR_Level level; Chain_Node *node; _Thread_Disable_dispatch(); _ISR_Disable( level ); printk( "Watchdog Chain: %s %p\n", name, header ); if ( !_Chain_Is_empty( header ) ) { for ( node = _Chain_First( header ) ; node != _Chain_Tail(header) ; node = node->next ) { Watchdog_Control *watch = (Watchdog_Control *) node; _Watchdog_Report( NULL, watch ); } printk( "== end of %s \n", name ); } else { printk( "Chain is empty\n" ); } _ISR_Enable( level ); _Thread_Enable_dispatch(); }
void _API_Mutex_Lock( API_Mutex_Control *the_mutex ) { bool previous_thread_life_protection; ISR_Level level; previous_thread_life_protection = _Thread_Set_life_protection( true ); #if defined(RTEMS_SMP) _Thread_Disable_dispatch(); #endif _ISR_Disable( level ); _CORE_mutex_Seize( &the_mutex->Mutex, _Thread_Executing, the_mutex->Object.id, true, 0, level ); if ( the_mutex->Mutex.nest_count == 1 ) { the_mutex->previous_thread_life_protection = previous_thread_life_protection; } #if defined(RTEMS_SMP) _Thread_Enable_dispatch(); #endif }
rtems_task Middle_task( rtems_task_argument argument ) { Chain_Control *ready_queues; thread_dispatch_no_fp_time = benchmark_timer_read(); _Thread_Set_state( _Thread_Executing, STATES_SUSPENDED ); Middle_tcb = _Thread_Executing; ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[LOW_PRIORITY]); /* do not force context switch */ _Thread_Dispatch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); _Context_Switch( &Middle_tcb->Registers, &_Thread_Executing->Registers ); benchmark_timer_initialize(); _Context_Switch(&Middle_tcb->Registers, &Low_tcb->Registers); }
/* * rtems_rate_monotonic_reset_all_statistics */ void rtems_rate_monotonic_reset_all_statistics( void ) { Objects_Id id; rtems_status_code status; /* * Prevent allocation or deallocation of any of the periods while * we are cycling. Also this is an optimization which ensures that * we only disable/enable once. The call to * rtems_rate_monotonic_reset_statistics will be in a nested dispatch * disabled critical section. */ _Thread_Disable_dispatch(); /* * Cycle through all possible ids and try to reset each one. If it * is a period that is inactive, we just get an error back. No big deal. */ for ( id=_Rate_monotonic_Information.minimum_id ; id <= _Rate_monotonic_Information.maximum_id ; id++ ) { status = rtems_rate_monotonic_reset_statistics( id ); } /* * Done so exit thread dispatching disabled critical section. */ _Thread_Enable_dispatch(); }
rtems_status_code rtems_timer_create( rtems_name name, rtems_id *id ) { Timer_Control *the_timer; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; _Thread_Disable_dispatch(); /* to prevent deletion */ the_timer = _Timer_Allocate(); if ( !the_timer ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } the_timer->the_class = TIMER_DORMANT; _Watchdog_Initialize( &the_timer->Ticker, NULL, 0, NULL ); _Objects_Open( &_Timer_Information, &the_timer->Object, (Objects_Name) name ); *id = the_timer->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
int sched_yield( void ) { _Thread_Disable_dispatch(); _Scheduler_Yield( _Thread_Executing ); _Thread_Enable_dispatch(); return 0; }
/* * Timer Service Routine * * If we are in an ISR, then this is a normal clock tick. * If we are not, then it is the test case. */ epos_timer_service_routine test_unblock_task( epos_id timer, void *arg ) { bool in_isr; epos_status_code status; in_isr = epos_interrupt_is_in_progress(); status = epos_task_is_suspended( blocked_task_id ); if ( in_isr ) { status = epos_timer_fire_after( timer, 1, test_unblock_task, NULL ); directive_failed( status, "timer_fire_after failed" ); return; } if ( (status != RTEMS_ALREADY_SUSPENDED) ) { status = epos_timer_fire_after( timer, 1, test_unblock_task, NULL ); directive_failed( status, "timer_fire_after failed" ); return; } blocked_task_status = 2; _Thread_Disable_dispatch(); status = epos_task_resume( blocked_task_id ); _Thread_Unnest_dispatch(); directive_failed( status, "epos_task_resume" ); }
void pthread_testcancel( void ) { POSIX_API_Control *thread_support; Thread_Control *executing; bool cancel = false; /* * Don't even think about deleting a resource from an ISR. * Besides this request is supposed to be for _Thread_Executing * and the ISR context is not a thread. */ if ( _ISR_Is_in_progress() ) return; _Thread_Disable_dispatch(); executing = _Thread_Executing; thread_support = executing->API_Extensions[ THREAD_API_POSIX ]; if ( thread_support->cancelability_state == PTHREAD_CANCEL_ENABLE && thread_support->cancelation_requested ) cancel = true; _Thread_Enable_dispatch(); if ( cancel ) _POSIX_Thread_Exit( executing, PTHREAD_CANCELED ); }
/* * Timer Service Routine * * If we are in an ISR, then this is a normal clock tick. * If we are not, then it is the test case. */ rtems_timer_service_routine test_unblock_task( rtems_id timer, void *arg ) { bool in_isr; rtems_status_code status; in_isr = rtems_interrupt_is_in_progress(); status = rtems_task_is_suspended( blocked_task_id ); if ( in_isr ) { status = rtems_timer_fire_after( timer, 1, test_unblock_task, NULL ); directive_failed( status, "timer_fire_after failed" ); return; } if ( (status != RTEMS_ALREADY_SUSPENDED) ) { status = rtems_timer_fire_after( timer, 1, test_unblock_task, NULL ); directive_failed( status, "timer_fire_after failed" ); return; } blocked_task_status = 2; _Thread_Disable_dispatch(); status = rtems_task_resume( blocked_task_id ); _Thread_Unnest_dispatch(); #if defined( RTEMS_SMP ) directive_failed_with_level( status, "rtems_task_resume", 1 ); #else directive_failed( status, "rtems_task_resume" ); #endif }
rtems_status_code rtems_extension_create( rtems_name name, const rtems_extensions_table *extension_table, rtems_id *id ) { Extension_Control *the_extension; if ( !id ) return RTEMS_INVALID_ADDRESS; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; _Thread_Disable_dispatch(); /* to prevent deletion */ the_extension = _Extension_Allocate(); if ( !the_extension ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } _User_extensions_Add_set_with_table( &the_extension->Extension, extension_table ); _Objects_Open( &_Extension_Information, &the_extension->Object, (Objects_Name) name ); *id = the_extension->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
const void * rtems_monitor_mpci_next( void *object_info, rtems_monitor_mpci_t *canonical_mpci, rtems_id *next_id ) { const rtems_configuration_table *c = &Configuration; int n = rtems_object_id_get_index(*next_id); if (n >= 1) goto failed; if ( ! c->User_multiprocessing_table) goto failed; _Thread_Disable_dispatch(); *next_id += 1; return (void *) c; failed: *next_id = RTEMS_OBJECT_ID_FINAL; return 0; }
static void test_unblock_op(void) { rtems_status_code sc; rtems_id task_id; Thread_Control *executing; Scheduler_SMP_Node *executing_node; Thread_Control *other; size_t i; task_id = start_task(3); _Thread_Disable_dispatch(); executing = _Thread_Executing; executing_node = _Scheduler_SMP_Thread_get_node(executing); other = get_thread_by_id(task_id); for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) { test_case_unblock_op( executing, executing_node, other, states[i] ); } _Thread_Change_priority(executing, 1, true); rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED); _Thread_Enable_dispatch(); sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); }
void pthread_cleanup_push( void (*routine)( void * ), void *arg ) { POSIX_Cancel_Handler_control *handler; Chain_Control *handler_stack; POSIX_API_Control *thread_support; /* * The POSIX standard does not address what to do when the routine * is NULL. It also does not address what happens when we cannot * allocate memory or anything else bad happens. */ if ( !routine ) return; _Thread_Disable_dispatch(); handler = _Workspace_Allocate( sizeof( POSIX_Cancel_Handler_control ) ); if ( handler ) { thread_support = _Thread_Executing->API_Extensions[ THREAD_API_POSIX ]; handler_stack = &thread_support->Cancellation_Handlers; handler->routine = routine; handler->arg = arg; _Chain_Append( handler_stack, &handler->Node ); } _Thread_Enable_dispatch(); }
int clock_settime( clockid_t clock_id, const struct timespec *tp ) { if ( !tp ) rtems_set_errno_and_return_minus_one( EINVAL ); if ( clock_id == CLOCK_REALTIME ) { if ( tp->tv_sec < TOD_SECONDS_1970_THROUGH_1988 ) rtems_set_errno_and_return_minus_one( EINVAL ); _Thread_Disable_dispatch(); _TOD_Set( tp ); _Thread_Enable_dispatch(); } #ifdef _POSIX_CPUTIME else if ( clock_id == CLOCK_PROCESS_CPUTIME_ID ) rtems_set_errno_and_return_minus_one( ENOSYS ); #endif #ifdef _POSIX_THREAD_CPUTIME else if ( clock_id == CLOCK_THREAD_CPUTIME_ID ) rtems_set_errno_and_return_minus_one( ENOSYS ); #endif else rtems_set_errno_and_return_minus_one( EINVAL ); return 0; }
static void wait_for_giant(void) { SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER; _SMP_barrier_Wait(&giant_barrier, &state, CPU_COUNT); _Thread_Disable_dispatch(); }
Objects_Control *_Objects_Get( Objects_Information *information, Objects_Id id, Objects_Locations *location ) { Objects_Control *the_object; uint32_t index; /* * Extract the index portion of an Id in a way that produces a valid * index for objects within this class and an invalid value for objects * outside this class. * * If the Id matches the api, class, and node but index portion is 0, * then the subtraction will underflow and the addition of 1 will * result in a 0 index. The zeroth element in the local_table is * always NULL. * * If the Id is valid but the object has not been created yet, then * the local_table entry will be NULL. */ index = id - information->minimum_id + 1; /* * If the index is less than maximum, then it is OK to use it to * index into the local_table array. */ if ( index <= information->maximum ) { _Thread_Disable_dispatch(); if ( (the_object = information->local_table[ index ]) != NULL ) { *location = OBJECTS_LOCAL; return the_object; } /* * Valid Id for this API, Class and Node but the object has not * been allocated yet. */ _Thread_Enable_dispatch(); *location = OBJECTS_ERROR; return NULL; } /* * Object Id is not within this API and Class on this node. So * it may be global in a multiprocessing system. But it is clearly * invalid on a single processor system. */ *location = OBJECTS_ERROR; #if defined(RTEMS_MULTIPROCESSING) _Objects_MP_Is_remote( information, id, location, &the_object ); return the_object; #else return NULL; #endif }