rtems_status_code rtems_timer_create( rtems_name name, rtems_id *id ) { Timer_Control *the_timer; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; the_timer = _Timer_Allocate(); if ( !the_timer ) { _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } the_timer->the_class = TIMER_DORMANT; _Watchdog_Preinitialize( &the_timer->Ticker, _Per_CPU_Get_snapshot() ); _Objects_Open( &_Timer_Information, &the_timer->Object, (Objects_Name) name ); *id = the_timer->Object.id; _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_extension_create( rtems_name name, const rtems_extensions_table *extension_table, rtems_id *id ) { Extension_Control *the_extension; if ( !id ) return RTEMS_INVALID_ADDRESS; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; _Thread_Disable_dispatch(); /* to prevent deletion */ the_extension = _Extension_Allocate(); if ( !the_extension ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } _User_extensions_Add_set_with_table( &the_extension->Extension, extension_table ); _Objects_Open( &_Extension_Information, &the_extension->Object, (Objects_Name) name ); *id = the_extension->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_region_create( rtems_name name, void *starting_address, uintptr_t length, uintptr_t page_size, rtems_attribute attribute_set, rtems_id *id ) { rtems_status_code return_status; Region_Control *the_region; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !starting_address ) return RTEMS_INVALID_ADDRESS; if ( !id ) return RTEMS_INVALID_ADDRESS; the_region = _Region_Allocate(); if ( !the_region ) return_status = RTEMS_TOO_MANY; else { _Thread_queue_Initialize( &the_region->Wait_queue ); if ( _Attributes_Is_priority( attribute_set ) ) { the_region->wait_operations = &_Thread_queue_Operations_priority; } else { the_region->wait_operations = &_Thread_queue_Operations_FIFO; } the_region->maximum_segment_size = _Heap_Initialize( &the_region->Memory, starting_address, length, page_size ); if ( !the_region->maximum_segment_size ) { _Region_Free( the_region ); return_status = RTEMS_INVALID_SIZE; } else { the_region->attribute_set = attribute_set; _Objects_Open( &_Region_Information, &the_region->Object, (Objects_Name) name ); *id = the_region->Object.id; return_status = RTEMS_SUCCESSFUL; } } _Objects_Allocator_unlock(); return return_status; }
rtems_status_code rtems_timer_create( rtems_name name, rtems_id *id ) { Timer_Control *the_timer; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; _Thread_Disable_dispatch(); /* to prevent deletion */ the_timer = _Timer_Allocate(); if ( !the_timer ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } the_timer->the_class = TIMER_DORMANT; _Watchdog_Initialize( &the_timer->Ticker, NULL, 0, NULL ); _Objects_Open( &_Timer_Information, &the_timer->Object, (Objects_Name) name ); *id = the_timer->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_barrier_create( rtems_name name, rtems_attribute attribute_set, uint32_t maximum_waiters, rtems_id *id ) { Barrier_Control *the_barrier; CORE_barrier_Attributes the_attributes; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; /* Initialize core barrier attributes */ if ( _Attributes_Is_barrier_automatic( attribute_set ) ) { the_attributes.discipline = CORE_BARRIER_AUTOMATIC_RELEASE; if ( maximum_waiters == 0 ) return RTEMS_INVALID_NUMBER; } else the_attributes.discipline = CORE_BARRIER_MANUAL_RELEASE; the_attributes.maximum_count = maximum_waiters; _Thread_Disable_dispatch(); /* prevents deletion */ the_barrier = _Barrier_Allocate(); if ( !the_barrier ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } the_barrier->attribute_set = attribute_set; _CORE_barrier_Initialize( &the_barrier->Barrier, &the_attributes ); _Objects_Open( &_Barrier_Information, &the_barrier->Object, (Objects_Name) name ); *id = the_barrier->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_port_create( rtems_name name, void *internal_start, void *external_start, uint32_t length, rtems_id *id ) { register Dual_ported_memory_Control *the_port; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; if ( !_Addresses_Is_aligned( internal_start ) || !_Addresses_Is_aligned( external_start ) ) return RTEMS_INVALID_ADDRESS; _Thread_Disable_dispatch(); /* to prevent deletion */ the_port = _Dual_ported_memory_Allocate(); if ( !the_port ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } the_port->internal_base = internal_start; the_port->external_base = external_start; the_port->length = length - 1; _Objects_Open( &_Dual_ported_memory_Information, &the_port->Object, (Objects_Name) name ); *id = the_port->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_rate_monotonic_create( rtems_name name, rtems_id *id ) { Rate_monotonic_Control *the_period; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; the_period = _Rate_monotonic_Allocate(); if ( !the_period ) { _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } _ISR_lock_Initialize( &the_period->Lock, "Rate Monotonic Period" ); _Priority_Node_initialize( &the_period->Priority, 0 ); _Priority_Node_set_inactive( &the_period->Priority ); the_period->owner = _Thread_Get_executing(); the_period->state = RATE_MONOTONIC_INACTIVE; _Watchdog_Preinitialize( &the_period->Timer, _Per_CPU_Get_by_index( 0 ) ); _Watchdog_Initialize( &the_period->Timer, _Rate_monotonic_Timeout ); _Rate_monotonic_Reset_statistics( the_period ); _Objects_Open( &_Rate_monotonic_Information, &the_period->Object, (Objects_Name) name ); *id = the_period->Object.id; _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_rate_monotonic_create( rtems_name name, rtems_id *id ) { Rate_monotonic_Control *the_period; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; _Thread_Disable_dispatch(); /* to prevent deletion */ the_period = _Rate_monotonic_Allocate(); if ( !the_period ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } the_period->owner = _Thread_Executing; the_period->state = RATE_MONOTONIC_INACTIVE; _Watchdog_Initialize( &the_period->Timer, NULL, 0, NULL ); _Rate_monotonic_Reset_statistics( the_period ); _Objects_Open( &_Rate_monotonic_Information, &the_period->Object, (Objects_Name) name ); *id = the_period->Object.id; _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
bool _Thread_Initialize( Thread_Information *information, Thread_Control *the_thread, const Scheduler_Control *scheduler, void *stack_area, size_t stack_size, bool is_fp, Priority_Control priority, bool is_preemptible, Thread_CPU_budget_algorithms budget_algorithm, Thread_CPU_budget_algorithm_callout budget_callout, uint32_t isr_level, Objects_Name name ) { uintptr_t tls_size = _TLS_Get_size(); size_t actual_stack_size = 0; void *stack = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) void *fp_area = NULL; #endif bool extension_status; size_t i; bool scheduler_node_initialized = false; Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); #if defined( RTEMS_SMP ) if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { return false; } #endif memset( &the_thread->current_state, 0, information->Objects.size - offsetof( Thread_Control, current_state ) ); for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) { const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ]; *(void **) ( (char *) the_thread + add_on->destination_offset ) = (char *) the_thread + add_on->source_offset; } /* * Allocate and Initialize the stack for this thread. */ #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; #else if ( !stack_area ) { actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; the_thread->Start.core_allocated_stack = true; } else { stack = stack_area; actual_stack_size = stack_size; the_thread->Start.core_allocated_stack = false; } #endif _Stack_Initialize( &the_thread->Start.Initial_stack, stack, actual_stack_size ); /* Thread-local storage (TLS) area allocation */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); the_thread->Start.tls_area = _Workspace_Allocate_aligned( tls_alloc, tls_align ); if ( the_thread->Start.tls_area == NULL ) { goto failed; } } /* * Allocate the floating point area for this thread */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) if ( is_fp ) { fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE ); if ( !fp_area ) goto failed; fp_area = _Context_Fp_start( fp_area, 0 ); } the_thread->fp_context = fp_area; the_thread->Start.fp_context = fp_area; #endif /* * Get thread queue heads */ the_thread->Wait.spare_heads = _Freechain_Get( &information->Free_thread_queue_heads, _Workspace_Allocate, _Objects_Extend_size( &information->Objects ), THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count ) ); if ( the_thread->Wait.spare_heads == NULL ) { goto failed; } _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads ); /* * General initialization */ the_thread->is_fp = is_fp; the_thread->Start.isr_level = isr_level; the_thread->Start.is_preemptible = is_preemptible; the_thread->Start.budget_algorithm = budget_algorithm; the_thread->Start.budget_callout = budget_callout; _Thread_Timer_initialize( &the_thread->Timer, cpu ); switch ( budget_algorithm ) { case THREAD_CPU_BUDGET_ALGORITHM_NONE: case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: break; #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); break; #endif #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: break; #endif } #if defined(RTEMS_SMP) RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state ); the_thread->Scheduler.own_control = scheduler; the_thread->Scheduler.control = scheduler; the_thread->Scheduler.own_node = the_thread->Scheduler.node; _Resource_Node_initialize( &the_thread->Resource_node ); the_thread->Lock.current = &the_thread->Lock.Default; _SMP_ticket_lock_Initialize( &the_thread->Lock.Default ); _SMP_lock_Stats_initialize( &the_thread->Lock.Stats, "Thread Lock" ); _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" ); #endif _Thread_Debug_set_real_processor( the_thread, cpu ); /* Initialize the CPU for the non-SMP schedulers */ _Thread_Set_CPU( the_thread, cpu ); _Thread_queue_Initialize( &the_thread->Join_queue ); the_thread->current_state = STATES_DORMANT; the_thread->Wait.operations = &_Thread_queue_Operations_default; the_thread->current_priority = priority; the_thread->real_priority = priority; the_thread->Start.initial_priority = priority; RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags ); _Scheduler_Node_initialize( scheduler, the_thread ); scheduler_node_initialized = true; _Scheduler_Update_priority( the_thread, priority ); /* POSIX Keys */ _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs ); _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" ); _Thread_Action_control_initialize( &the_thread->Post_switch_actions ); RTEMS_STATIC_ASSERT( THREAD_LIFE_NORMAL == 0, Life_state ); /* * Open the object */ _Objects_Open( &information->Objects, &the_thread->Object, name ); /* * We assume the Allocator Mutex is locked and dispatching is * enabled when we get here. We want to be able to run the * user extensions with dispatching enabled. The Allocator * Mutex provides sufficient protection to let the user extensions * run safely. */ extension_status = _User_extensions_Thread_create( the_thread ); if ( extension_status ) return true; failed: if ( scheduler_node_initialized ) { _Scheduler_Node_destroy( scheduler, the_thread ); } _Workspace_Free( the_thread->Start.tls_area ); _Freechain_Put( &information->Free_thread_queue_heads, the_thread->Wait.spare_heads ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) _Workspace_Free( fp_area ); #endif _Thread_Stack_Free( the_thread ); return false; }
int _POSIX_Message_queue_Create_support( const char *name, int pshared, unsigned int oflag, struct mq_attr *attr_ptr, POSIX_Message_queue_Control **message_queue ) { POSIX_Message_queue_Control *the_mq; CORE_message_queue_Attributes *the_mq_attr; struct mq_attr attr; _Thread_Disable_dispatch(); /* * There is no real basis for the default values. They will work * but were not compared against any existing implementation for * compatibility. See README.mqueue for an example program we * think will print out the defaults. Report anything you find with it. */ if ( attr_ptr == NULL ) { attr.mq_maxmsg = 10; attr.mq_msgsize = 16; } else { if ( attr_ptr->mq_maxmsg < 0 ){ _Thread_Enable_dispatch(); set_errno_and_return_minus_one( EINVAL ); } if ( attr_ptr->mq_msgsize < 0 ){ _Thread_Enable_dispatch(); set_errno_and_return_minus_one( EINVAL ); } attr = *attr_ptr; } #if 0 && defined(RTEMS_MULTIPROCESSING) if ( pshared == PTHREAD_PROCESS_SHARED && !( _Objects_MP_Allocate_and_open( &_POSIX_Message_queue_Information, 0, the_mq->Object.id, FALSE ) ) ) { _POSIX_Message_queue_Free( the_mq ); _Thread_Enable_dispatch(); set_errno_and_return_minus_one( ENFILE ); } #endif the_mq = _POSIX_Message_queue_Allocate(); if ( !the_mq ) { _Thread_Enable_dispatch(); set_errno_and_return_minus_one( ENFILE ); } the_mq->process_shared = pshared; the_mq->oflag = oflag; the_mq->named = TRUE; the_mq->open_count = 1; the_mq->linked = TRUE; /* XXX * * Note that thread blocking discipline should be based on the * current scheduling policy. */ the_mq_attr = &the_mq->Message_queue.Attributes; the_mq_attr->discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( ! _CORE_message_queue_Initialize( &the_mq->Message_queue, OBJECTS_POSIX_MESSAGE_QUEUES, the_mq_attr, attr.mq_maxmsg, attr.mq_msgsize, #if 0 && defined(RTEMS_MULTIPROCESSING) _POSIX_Message_queue_MP_Send_extract_proxy #else NULL #endif ) ) { #if 0 && defined(RTEMS_MULTIPROCESSING) if ( pshared == PTHREAD_PROCESS_SHARED ) _Objects_MP_Close( &_POSIX_Message_queue_Information, the_mq->Object.id ); #endif _POSIX_Message_queue_Free( the_mq ); _Thread_Enable_dispatch(); set_errno_and_return_minus_one( ENOSPC ); } _Objects_Open( &_POSIX_Message_queue_Information, &the_mq->Object, (char *) name ); *message_queue = the_mq; #if 0 && defined(RTEMS_MULTIPROCESSING) if ( pshared == PTHREAD_PROCESS_SHARED ) _POSIX_Message_queue_MP_Send_process_packet( POSIX_MESSAGE_QUEUE_MP_ANNOUNCE_CREATE, the_mq->Object.id, (char *) name, 0 /* Not used */ ); #endif _Thread_Enable_dispatch(); return 0; }
rtems_status_code rtems_partition_create( rtems_name name, void *starting_address, uint32_t length, uint32_t buffer_size, rtems_attribute attribute_set, rtems_id *id ) { register Partition_Control *the_partition; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !starting_address ) return RTEMS_INVALID_ADDRESS; if ( !id ) return RTEMS_INVALID_ADDRESS; if ( length == 0 || buffer_size == 0 || length < buffer_size || !_Partition_Is_buffer_size_aligned( buffer_size ) ) return RTEMS_INVALID_SIZE; if ( !_Addresses_Is_aligned( starting_address ) ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; #endif _Thread_Disable_dispatch(); /* prevents deletion */ the_partition = _Partition_Allocate(); if ( !the_partition ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && !( _Objects_MP_Allocate_and_open( &_Partition_Information, name, the_partition->Object.id, false ) ) ) { _Partition_Free( the_partition ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_partition->starting_address = starting_address; the_partition->length = length; the_partition->buffer_size = buffer_size; the_partition->attribute_set = attribute_set; the_partition->number_of_used_blocks = 0; _Chain_Initialize( &the_partition->Memory, starting_address, length / buffer_size, buffer_size ); _Objects_Open( &_Partition_Information, &the_partition->Object, (Objects_Name) name ); *id = the_partition->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) _Partition_MP_Send_process_packet( PARTITION_MP_ANNOUNCE_CREATE, the_partition->Object.id, name, 0 /* Not used */ ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
int timer_create( clockid_t clock_id, struct sigevent *evp, timer_t *timerid ) { POSIX_Timer_Control *ptimer; if ( clock_id != CLOCK_REALTIME ) rtems_set_errno_and_return_minus_one( EINVAL ); /* * The data of the structure evp are checked in order to verify if they * are coherent. */ if (evp != NULL) { /* The structure has data */ if ( ( evp->sigev_notify != SIGEV_NONE ) && ( evp->sigev_notify != SIGEV_SIGNAL ) ) { /* The value of the field sigev_notify is not valid */ rtems_set_errno_and_return_minus_one( EINVAL ); } if ( !evp->sigev_signo ) rtems_set_errno_and_return_minus_one( EINVAL ); if ( !is_valid_signo(evp->sigev_signo) ) rtems_set_errno_and_return_minus_one( EINVAL ); } _Thread_Disable_dispatch(); /* to prevent deletion */ /* * Allocate a timer */ ptimer = _POSIX_Timer_Allocate(); if ( !ptimer ) { _Thread_Enable_dispatch(); rtems_set_errno_and_return_minus_one( EAGAIN ); } /* The data of the created timer are stored to use them later */ ptimer->state = POSIX_TIMER_STATE_CREATE_NEW; ptimer->thread_id = _Thread_Executing->Object.id; if ( evp != NULL ) { ptimer->inf.sigev_notify = evp->sigev_notify; ptimer->inf.sigev_signo = evp->sigev_signo; ptimer->inf.sigev_value = evp->sigev_value; } ptimer->overrun = 0; ptimer->timer_data.it_value.tv_sec = 0; ptimer->timer_data.it_value.tv_nsec = 0; ptimer->timer_data.it_interval.tv_sec = 0; ptimer->timer_data.it_interval.tv_nsec = 0; _Watchdog_Initialize( &ptimer->Timer, NULL, 0, NULL ); _Objects_Open(&_POSIX_Timer_Information, &ptimer->Object, (Objects_Name) 0); *timerid = ptimer->Object.id; _Thread_Enable_dispatch(); return 0; }
epos_status_code epos_message_queue_create( epos_name name, uint32_t count, size_t max_message_size, epos_attribute attribute_set, epos_id *id ) { register Message_queue_Control *the_message_queue; CORE_message_queue_Attributes the_msgq_attributes; #if defined(RTEMS_MULTIPROCESSING) bool is_global; #endif if ( !epos_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( (is_global = _Attributes_Is_global( attribute_set ) ) && !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; #endif if ( count == 0 ) return RTEMS_INVALID_NUMBER; if ( max_message_size == 0 ) return RTEMS_INVALID_SIZE; #if defined(RTEMS_MULTIPROCESSING) #if 1 /* * I am not 100% sure this should be an error. * It seems reasonable to create a que with a large max size, * and then just send smaller msgs from remote (or all) nodes. */ if ( is_global && (_MPCI_table->maximum_packet_size < max_message_size) ) return RTEMS_INVALID_SIZE; #endif #endif _Thread_Disable_dispatch(); /* protects object pointer */ the_message_queue = _Message_queue_Allocate(); if ( !the_message_queue ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( is_global && !( _Objects_MP_Allocate_and_open( &_Message_queue_Information, name, the_message_queue->Object.id, false ) ) ) { _Message_queue_Free( the_message_queue ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_message_queue->attribute_set = attribute_set; if (_Attributes_Is_priority( attribute_set ) ) the_msgq_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY; else the_msgq_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( ! _CORE_message_queue_Initialize( &the_message_queue->message_queue, &the_msgq_attributes, count, max_message_size ) ) { #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Objects_MP_Close( &_Message_queue_Information, the_message_queue->Object.id); #endif _Message_queue_Free( the_message_queue ); _Thread_Enable_dispatch(); return RTEMS_UNSATISFIED; } _Objects_Open( &_Message_queue_Information, &the_message_queue->Object, (Objects_Name) name ); *id = the_message_queue->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Message_queue_MP_Send_process_packet( MESSAGE_QUEUE_MP_ANNOUNCE_CREATE, the_message_queue->Object.id, name, 0 ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_region_create( rtems_name name, void *starting_address, uintptr_t length, uintptr_t page_size, rtems_attribute attribute_set, rtems_id *id ) { rtems_status_code return_status; Region_Control *the_region; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !starting_address ) return RTEMS_INVALID_ADDRESS; if ( !id ) return RTEMS_INVALID_ADDRESS; _RTEMS_Lock_allocator(); /* to prevent deletion */ the_region = _Region_Allocate(); if ( !the_region ) return_status = RTEMS_TOO_MANY; else { the_region->maximum_segment_size = _Heap_Initialize( &the_region->Memory, starting_address, length, page_size ); if ( !the_region->maximum_segment_size ) { _Region_Free( the_region ); return_status = RTEMS_INVALID_SIZE; } else { the_region->starting_address = starting_address; the_region->length = length; the_region->page_size = page_size; the_region->attribute_set = attribute_set; the_region->number_of_used_blocks = 0; _Thread_queue_Initialize( &the_region->Wait_queue, _Attributes_Is_priority( attribute_set ) ? THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO, STATES_WAITING_FOR_SEGMENT, RTEMS_TIMEOUT ); _Objects_Open( &_Region_Information, &the_region->Object, (Objects_Name) name ); *id = the_region->Object.id; return_status = RTEMS_SUCCESSFUL; } } _RTEMS_Unlock_allocator(); return return_status; }
bool _Thread_Initialize( Objects_Information *information, Thread_Control *the_thread, const Scheduler_Control *scheduler, void *stack_area, size_t stack_size, bool is_fp, Priority_Control priority, bool is_preemptible, Thread_CPU_budget_algorithms budget_algorithm, Thread_CPU_budget_algorithm_callout budget_callout, uint32_t isr_level, Objects_Name name ) { uintptr_t tls_size = _TLS_Get_size(); size_t actual_stack_size = 0; void *stack = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) void *fp_area = NULL; #endif bool extension_status; size_t i; bool scheduler_node_initialized = false; Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); #if defined( RTEMS_SMP ) if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { return false; } #endif for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) { const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ]; *(void **) ( (char *) the_thread + add_on->destination_offset ) = (char *) the_thread + add_on->source_offset; } /* * Initialize the Ada self pointer */ #if __RTEMS_ADA__ the_thread->rtems_ada_self = NULL; #endif the_thread->Start.tls_area = NULL; /* * Allocate and Initialize the stack for this thread. */ #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; #else if ( !stack_area ) { actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; the_thread->Start.core_allocated_stack = true; } else { stack = stack_area; actual_stack_size = stack_size; the_thread->Start.core_allocated_stack = false; } #endif _Stack_Initialize( &the_thread->Start.Initial_stack, stack, actual_stack_size ); /* Thread-local storage (TLS) area allocation */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); the_thread->Start.tls_area = _Workspace_Allocate_aligned( tls_alloc, tls_align ); if ( the_thread->Start.tls_area == NULL ) { goto failed; } } /* * Allocate the floating point area for this thread */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) if ( is_fp ) { fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE ); if ( !fp_area ) goto failed; fp_area = _Context_Fp_start( fp_area, 0 ); } the_thread->fp_context = fp_area; the_thread->Start.fp_context = fp_area; #endif /* * Initialize the thread timer */ _Watchdog_Initialize( &the_thread->Timer, NULL, 0, NULL ); #ifdef __RTEMS_STRICT_ORDER_MUTEX__ /* Initialize the head of chain of held mutexes */ _Chain_Initialize_empty(&the_thread->lock_mutex); #endif /* * Clear the extensions area so extension users can determine * if they are linked to the thread. An extension user may * create the extension long after tasks have been created * so they cannot rely on the thread create user extension * call. The object index starts with one, so the first extension context is * unused. */ for ( i = 1 ; i <= rtems_configuration_get_maximum_extensions() ; ++i ) the_thread->extensions[ i ] = NULL; /* * General initialization */ the_thread->Start.isr_level = isr_level; the_thread->Start.is_preemptible = is_preemptible; the_thread->Start.budget_algorithm = budget_algorithm; the_thread->Start.budget_callout = budget_callout; switch ( budget_algorithm ) { case THREAD_CPU_BUDGET_ALGORITHM_NONE: case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: break; #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); break; #endif #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: break; #endif } #if defined(RTEMS_SMP) the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED; the_thread->Scheduler.own_control = scheduler; the_thread->Scheduler.control = scheduler; the_thread->Scheduler.own_node = the_thread->Scheduler.node; _Resource_Node_initialize( &the_thread->Resource_node ); _CPU_Context_Set_is_executing( &the_thread->Registers, false ); #endif _Thread_Debug_set_real_processor( the_thread, cpu ); /* Initialize the CPU for the non-SMP schedulers */ _Thread_Set_CPU( the_thread, cpu ); the_thread->current_state = STATES_DORMANT; the_thread->Wait.queue = NULL; the_thread->resource_count = 0; the_thread->real_priority = priority; the_thread->Start.initial_priority = priority; _Scheduler_Node_initialize( scheduler, the_thread ); scheduler_node_initialized = true; _Thread_Set_priority( the_thread, priority ); /* * Initialize the CPU usage statistics */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Timestamp_Set_to_zero( &the_thread->cpu_time_used ); #else the_thread->cpu_time_used = 0; #endif /* * initialize thread's key vaule node chain */ _Chain_Initialize_empty( &the_thread->Key_Chain ); _Thread_Action_control_initialize( &the_thread->Post_switch_actions ); _Thread_Action_initialize( &the_thread->Life.Action, _Thread_Life_action_handler ); the_thread->Life.state = THREAD_LIFE_NORMAL; the_thread->Life.terminator = NULL; /* * Open the object */ _Objects_Open( information, &the_thread->Object, name ); /* * We assume the Allocator Mutex is locked and dispatching is * enabled when we get here. We want to be able to run the * user extensions with dispatching enabled. The Allocator * Mutex provides sufficient protection to let the user extensions * run safely. */ extension_status = _User_extensions_Thread_create( the_thread ); if ( extension_status ) return true; failed: if ( scheduler_node_initialized ) { _Scheduler_Node_destroy( scheduler, the_thread ); } _Workspace_Free( the_thread->Start.tls_area ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) _Workspace_Free( fp_area ); #endif _Thread_Stack_Free( the_thread ); return false; }
rtems_status_code rtems_semaphore_create( rtems_name name, uint32_t count, rtems_attribute attribute_set, rtems_task_priority priority_ceiling, rtems_id *id ) { register Semaphore_Control *the_semaphore; CORE_mutex_Attributes the_mutex_attr; CORE_semaphore_Attributes the_semaphore_attr; CORE_mutex_Status mutex_status; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) { if ( !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) ) return RTEMS_NOT_DEFINED; } else #endif if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) ) { if ( ! (_Attributes_Is_binary_semaphore( attribute_set ) && _Attributes_Is_priority( attribute_set ) ) ) return RTEMS_NOT_DEFINED; } if ( _Attributes_Is_inherit_priority( attribute_set ) && _Attributes_Is_priority_ceiling( attribute_set ) ) return RTEMS_NOT_DEFINED; if ( !_Attributes_Is_counting_semaphore( attribute_set ) && ( count > 1 ) ) return RTEMS_INVALID_NUMBER; _Thread_Disable_dispatch(); /* prevents deletion */ the_semaphore = _Semaphore_Allocate(); if ( !the_semaphore ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && ! ( _Objects_MP_Allocate_and_open( &_Semaphore_Information, name, the_semaphore->Object.id, false ) ) ) { _Semaphore_Free( the_semaphore ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_semaphore->attribute_set = attribute_set; /* * Initialize it as a counting semaphore. */ if ( _Attributes_Is_counting_semaphore( attribute_set ) ) { /* * This effectively disables limit checking. */ the_semaphore_attr.maximum_count = 0xFFFFFFFF; if ( _Attributes_Is_priority( attribute_set ) ) the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_PRIORITY; else the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO; /* * The following are just to make Purify happy. */ the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.priority_ceiling = PRIORITY_MINIMUM; _CORE_semaphore_Initialize( &the_semaphore->Core_control.semaphore, &the_semaphore_attr, count ); } else { /* * It is either simple binary semaphore or a more powerful mutex * style binary semaphore. This is the mutex style. */ if ( _Attributes_Is_priority( attribute_set ) ) the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY; else the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_FIFO; if ( _Attributes_Is_binary_semaphore( attribute_set ) ) { the_mutex_attr.priority_ceiling = priority_ceiling; the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.only_owner_release = false; if ( the_mutex_attr.discipline == CORE_MUTEX_DISCIPLINES_PRIORITY ) { if ( _Attributes_Is_inherit_priority( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT; the_mutex_attr.only_owner_release = true; } else if ( _Attributes_Is_priority_ceiling( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING; the_mutex_attr.only_owner_release = true; } } } else /* must be simple binary semaphore */ { the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_BLOCKS; the_mutex_attr.only_owner_release = false; } mutex_status = _CORE_mutex_Initialize( &the_semaphore->Core_control.mutex, &the_mutex_attr, (count == 1) ? CORE_MUTEX_UNLOCKED : CORE_MUTEX_LOCKED ); if ( mutex_status == CORE_MUTEX_STATUS_CEILING_VIOLATED ) { _Semaphore_Free( the_semaphore ); _Thread_Enable_dispatch(); return RTEMS_INVALID_PRIORITY; } } /* * Whether we initialized it as a mutex or counting semaphore, it is * now ready to be "offered" for use as a Classic API Semaphore. */ _Objects_Open( &_Semaphore_Information, &the_semaphore->Object, (Objects_Name) name ); *id = the_semaphore->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) _Semaphore_MP_Send_process_packet( SEMAPHORE_MP_ANNOUNCE_CREATE, the_semaphore->Object.id, name, 0 /* Not used */ ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
bool _Thread_Initialize( Objects_Information *information, Thread_Control *the_thread, void *stack_area, size_t stack_size, bool is_fp, Priority_Control priority, bool is_preemptible, Thread_CPU_budget_algorithms budget_algorithm, Thread_CPU_budget_algorithm_callout budget_callout, uint32_t isr_level, Objects_Name name ) { size_t actual_stack_size = 0; void *stack = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) void *fp_area; #endif void *sched = NULL; void *extensions_area; bool extension_status; int i; #if defined( RTEMS_SMP ) if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { return false; } #endif /* * Initialize the Ada self pointer */ #if __RTEMS_ADA__ the_thread->rtems_ada_self = NULL; #endif /* * Zero out all the allocated memory fields */ for ( i=0 ; i <= THREAD_API_LAST ; i++ ) the_thread->API_Extensions[i] = NULL; extensions_area = NULL; the_thread->libc_reent = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) fp_area = NULL; #endif /* * Allocate and Initialize the stack for this thread. */ #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; #else if ( !stack_area ) { actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; the_thread->Start.core_allocated_stack = true; } else { stack = stack_area; actual_stack_size = stack_size; the_thread->Start.core_allocated_stack = false; } #endif _Stack_Initialize( &the_thread->Start.Initial_stack, stack, actual_stack_size ); /* * Allocate the floating point area for this thread */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) if ( is_fp ) { fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE ); if ( !fp_area ) goto failed; fp_area = _Context_Fp_start( fp_area, 0 ); } the_thread->fp_context = fp_area; the_thread->Start.fp_context = fp_area; #endif /* * Initialize the thread timer */ _Watchdog_Initialize( &the_thread->Timer, NULL, 0, NULL ); #ifdef __RTEMS_STRICT_ORDER_MUTEX__ /* Initialize the head of chain of held mutexes */ _Chain_Initialize_empty(&the_thread->lock_mutex); #endif /* * Allocate the extensions area for this thread */ if ( _Thread_Maximum_extensions ) { extensions_area = _Workspace_Allocate( (_Thread_Maximum_extensions + 1) * sizeof( void * ) ); if ( !extensions_area ) goto failed; } the_thread->extensions = (void **) extensions_area; /* * Clear the extensions area so extension users can determine * if they are linked to the thread. An extension user may * create the extension long after tasks have been created * so they cannot rely on the thread create user extension * call. */ if ( the_thread->extensions ) { for ( i = 0; i <= _Thread_Maximum_extensions ; i++ ) the_thread->extensions[i] = NULL; } /* * General initialization */ the_thread->Start.is_preemptible = is_preemptible; the_thread->Start.budget_algorithm = budget_algorithm; the_thread->Start.budget_callout = budget_callout; switch ( budget_algorithm ) { case THREAD_CPU_BUDGET_ALGORITHM_NONE: case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: break; #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: the_thread->cpu_time_budget = _Thread_Ticks_per_timeslice; break; #endif #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: break; #endif } the_thread->Start.isr_level = isr_level; #if defined(RTEMS_SMP) the_thread->is_scheduled = false; the_thread->is_executing = false; /* Initialize the cpu field for the non-SMP schedulers */ the_thread->cpu = _Per_CPU_Get_by_index( 0 ); #endif the_thread->current_state = STATES_DORMANT; the_thread->Wait.queue = NULL; the_thread->resource_count = 0; the_thread->real_priority = priority; the_thread->Start.initial_priority = priority; sched =_Scheduler_Allocate( the_thread ); if ( !sched ) goto failed; _Thread_Set_priority( the_thread, priority ); /* * Initialize the CPU usage statistics */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Timestamp_Set_to_zero( &the_thread->cpu_time_used ); #else the_thread->cpu_time_used = 0; #endif /* * Open the object */ _Objects_Open( information, &the_thread->Object, name ); /* * We assume the Allocator Mutex is locked and dispatching is * enabled when we get here. We want to be able to run the * user extensions with dispatching enabled. The Allocator * Mutex provides sufficient protection to let the user extensions * run safely. */ extension_status = _User_extensions_Thread_create( the_thread ); if ( extension_status ) return true; failed: _Workspace_Free( the_thread->libc_reent ); for ( i=0 ; i <= THREAD_API_LAST ; i++ ) _Workspace_Free( the_thread->API_Extensions[i] ); _Workspace_Free( extensions_area ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) _Workspace_Free( fp_area ); #endif _Workspace_Free( sched ); _Thread_Stack_Free( the_thread ); return false; }
rtems_status_code rtems_semaphore_create( rtems_name name, uint32_t count, rtems_attribute attribute_set, rtems_task_priority priority_ceiling, rtems_id *id ) { Semaphore_Control *the_semaphore; CORE_mutex_Attributes the_mutex_attr; CORE_semaphore_Attributes the_semaphore_attr; CORE_mutex_Status mutex_status; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) { if ( !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) || _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) return RTEMS_NOT_DEFINED; } else #endif if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) && !( _Attributes_Is_binary_semaphore( attribute_set ) && !_Attributes_Is_priority( attribute_set ) ) ) { return RTEMS_NOT_DEFINED; } if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) ) { if ( ! (_Attributes_Is_binary_semaphore( attribute_set ) && _Attributes_Is_priority( attribute_set ) ) ) return RTEMS_NOT_DEFINED; } if ( !_Attributes_Has_at_most_one_protocol( attribute_set ) ) return RTEMS_NOT_DEFINED; if ( !_Attributes_Is_counting_semaphore( attribute_set ) && ( count > 1 ) ) return RTEMS_INVALID_NUMBER; #if !defined(RTEMS_SMP) /* * On uni-processor configurations the Multiprocessor Resource Sharing * Protocol is equivalent to the Priority Ceiling Protocol. */ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) { attribute_set |= RTEMS_PRIORITY_CEILING | RTEMS_PRIORITY; } #endif the_semaphore = _Semaphore_Allocate(); if ( !the_semaphore ) { _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && ! ( _Objects_MP_Allocate_and_open( &_Semaphore_Information, name, the_semaphore->Object.id, false ) ) ) { _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } #endif the_semaphore->attribute_set = attribute_set; /* * Initialize it as a counting semaphore. */ if ( _Attributes_Is_counting_semaphore( attribute_set ) ) { /* * This effectively disables limit checking. */ the_semaphore_attr.maximum_count = 0xFFFFFFFF; if ( _Attributes_Is_priority( attribute_set ) ) the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_PRIORITY; else the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO; /* * The following are just to make Purify happy. */ the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.priority_ceiling = PRIORITY_MINIMUM; _CORE_semaphore_Initialize( &the_semaphore->Core_control.semaphore, &the_semaphore_attr, count ); #if defined(RTEMS_SMP) } else if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) { MRSP_Status mrsp_status = _MRSP_Initialize( &the_semaphore->Core_control.mrsp, priority_ceiling, _Thread_Get_executing(), count != 1 ); if ( mrsp_status != MRSP_SUCCESSFUL ) { _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return _Semaphore_Translate_MRSP_status_code( mrsp_status ); } #endif } else { /* * It is either simple binary semaphore or a more powerful mutex * style binary semaphore. This is the mutex style. */ if ( _Attributes_Is_priority( attribute_set ) ) the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY; else the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_FIFO; if ( _Attributes_Is_binary_semaphore( attribute_set ) ) { the_mutex_attr.priority_ceiling = _RTEMS_tasks_Priority_to_Core( priority_ceiling ); the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.only_owner_release = false; if ( the_mutex_attr.discipline == CORE_MUTEX_DISCIPLINES_PRIORITY ) { if ( _Attributes_Is_inherit_priority( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT; the_mutex_attr.only_owner_release = true; } else if ( _Attributes_Is_priority_ceiling( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING; the_mutex_attr.only_owner_release = true; } } } else /* must be simple binary semaphore */ { the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_BLOCKS; the_mutex_attr.only_owner_release = false; } mutex_status = _CORE_mutex_Initialize( &the_semaphore->Core_control.mutex, _Thread_Get_executing(), &the_mutex_attr, count != 1 ); if ( mutex_status == CORE_MUTEX_STATUS_CEILING_VIOLATED ) { _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return RTEMS_INVALID_PRIORITY; } } /* * Whether we initialized it as a mutex or counting semaphore, it is * now ready to be "offered" for use as a Classic API Semaphore. */ _Objects_Open( &_Semaphore_Information, &the_semaphore->Object, (Objects_Name) name ); *id = the_semaphore->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) _Semaphore_MP_Send_process_packet( SEMAPHORE_MP_ANNOUNCE_CREATE, the_semaphore->Object.id, name, 0 /* Not used */ ); #endif _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }