/** * @brief Finalize a blocking operation. * * This method is used to finalize a blocking operation that was * satisfied. It may be used with thread queues or any other synchronization * object that uses the blocking states and watchdog times for timeout. * * This method will restore the previous ISR disable level during the cancel * operation. Thus it is an implicit _ISR_Enable(). * * @param[in] the_thread is the thread whose blocking is canceled * @param[in] lock_context is the previous ISR disable level */ static void _Thread_blocking_operation_Finalize( Thread_Control *the_thread, ISR_lock_Context *lock_context ) { /* * The thread is not waiting on anything after this completes. */ the_thread->Wait.queue = NULL; /* * If the sync state is timed out, this is very likely not needed. * But better safe than sorry when it comes to critical sections. */ if ( _Watchdog_Is_active( &the_thread->Timer ) ) { _Watchdog_Deactivate( &the_thread->Timer ); _Thread_queue_Release( lock_context ); (void) _Watchdog_Remove( &the_thread->Timer ); } else _Thread_queue_Release( lock_context ); /* * Global objects with thread queue's should not be operated on from an * ISR. But the sync code still must allow short timeouts to be processed * correctly. */ _Thread_Unblock( the_thread ); #if defined(RTEMS_MULTIPROCESSING) if ( !_Objects_Is_local_id( the_thread->Object.id ) ) _Thread_MP_Free_proxy( the_thread ); #endif }
void _CORE_RWLock_Obtain_for_writing( CORE_RWLock_Control *the_rwlock, Thread_Control *executing, Objects_Id id, bool wait, Watchdog_Interval timeout, CORE_RWLock_API_mp_support_callout api_rwlock_mp_support ) { ISR_lock_Context lock_context; /* * If unlocked, then OK to read. * Otherwise, we have to block. * If locked for reading and no waiters, then OK to read. * If any thread is waiting, then we wait. */ _Thread_queue_Acquire( &the_rwlock->Wait_queue, &lock_context ); switch ( the_rwlock->current_state ) { case CORE_RWLOCK_UNLOCKED: the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING; _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; return; case CORE_RWLOCK_LOCKED_FOR_READING: case CORE_RWLOCK_LOCKED_FOR_WRITING: break; } /* * If the thread is not willing to wait, then return immediately. */ if ( !wait ) { _Thread_queue_Release( &the_rwlock->Wait_queue, &lock_context ); executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE; return; } /* * We need to wait to enter this critical section */ executing->Wait.id = id; executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE; executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL; _Thread_queue_Enqueue_critical( &the_rwlock->Wait_queue, executing, STATES_WAITING_FOR_RWLOCK, timeout, CORE_RWLOCK_TIMEOUT, &lock_context ); /* return to API level so it can dispatch and we block */ }
void _Thread_queue_Extract_with_return_code( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, uint32_t return_code ) { ISR_lock_Context lock_context; _Thread_queue_Acquire( &lock_context ); if ( !_States_Is_waiting_on_thread_queue( the_thread->current_state ) ) { _Thread_queue_Release( &lock_context ); return; } if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { _Chain_Extract_unprotected( &the_thread->Object.Node ); } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ _RBTree_Extract( &the_thread->Wait.queue->Queues.Priority, &the_thread->RBNode ); _Thread_Priority_restore_default_change_handler( the_thread ); _Thread_Lock_restore_default( the_thread ); } the_thread->Wait.return_code = return_code; /* * We found a thread to unblock. * * NOTE: This is invoked with interrupts still disabled. */ _Thread_blocking_operation_Finalize( the_thread, &lock_context ); }
Thread_Control *_Thread_queue_Do_dequeue( Thread_queue_Control *the_thread_queue, const Thread_queue_Operations *operations #if defined(RTEMS_MULTIPROCESSING) , Thread_queue_MP_callout mp_callout #endif ) { Thread_queue_Context queue_context; Thread_Control *the_thread; _Thread_queue_Context_initialize( &queue_context ); _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout ); _Thread_queue_Acquire( the_thread_queue, &queue_context ); the_thread = _Thread_queue_First_locked( the_thread_queue, operations ); if ( the_thread != NULL ) { _Thread_queue_Extract_critical( &the_thread_queue->Queue, operations, the_thread, &queue_context ); } else { _Thread_queue_Release( the_thread_queue, &queue_context ); } return the_thread; }
Thread_Control *_Thread_queue_Dequeue( Thread_queue_Control *the_thread_queue ) { Thread_Control *the_thread; ISR_lock_Context lock_context; Thread_blocking_operation_States sync_state; the_thread = NULL; _Thread_queue_Acquire( &lock_context ); /* * Invoke the discipline specific dequeue method. */ if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) { the_thread = (Thread_Control *) _Chain_Get_first_unprotected( &the_thread_queue->Queues.Fifo ); } } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ RBTree_Node *first; first = _RBTree_Get( &the_thread_queue->Queues.Priority, RBT_LEFT ); if ( first ) { the_thread = THREAD_RBTREE_NODE_TO_THREAD( first ); _Thread_Priority_restore_default_change_handler( the_thread ); _Thread_Lock_restore_default( the_thread ); } } if ( the_thread == NULL ) { /* * We did not find a thread to unblock in the queue. Maybe the executing * thread is about to block on this thread queue. */ sync_state = the_thread_queue->sync_state; if ( (sync_state == THREAD_BLOCKING_OPERATION_TIMEOUT) || (sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED) ) { the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SATISFIED; the_thread = _Thread_Executing; } else { _Thread_queue_Release( &lock_context ); return NULL; } } /* * We found a thread to unblock. * * NOTE: This is invoked with interrupts still disabled. */ _Thread_blocking_operation_Finalize( the_thread, &lock_context ); return the_thread; }
rtems_status_code rtems_semaphore_flush( rtems_id id ) { Semaphore_Control *the_semaphore; Thread_queue_Context queue_context; the_semaphore = _Semaphore_Get( id, &queue_context ); if ( the_semaphore == NULL ) { #if defined(RTEMS_MULTIPROCESSING) if ( _Semaphore_MP_Is_remote( id ) ) { return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; } #endif return RTEMS_INVALID_ID; } _Thread_queue_Acquire_critical( &the_semaphore->Core_control.Wait_queue, &queue_context ); _Thread_queue_Context_set_MP_callout( &queue_context, _Semaphore_MP_Send_object_was_deleted ); switch ( the_semaphore->variant ) { #if defined(RTEMS_SMP) case SEMAPHORE_VARIANT_MRSP: _Thread_queue_Release( &the_semaphore->Core_control.Wait_queue, &queue_context ); return RTEMS_NOT_DEFINED; #endif default: _Assert( the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_INHERIT_PRIORITY || the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING || the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL || the_semaphore->variant == SEMAPHORE_VARIANT_SIMPLE_BINARY || the_semaphore->variant == SEMAPHORE_VARIANT_COUNTING ); _Thread_queue_Flush_critical( &the_semaphore->Core_control.Wait_queue.Queue, _Semaphore_Get_operations( the_semaphore ), _Thread_queue_Flush_status_unavailable, &queue_context ); break; } return RTEMS_SUCCESSFUL; }
void _CORE_mutex_Seize_interrupt_blocking( CORE_mutex_Control *the_mutex, Thread_Control *executing, Watchdog_Interval timeout, ISR_lock_Context *lock_context ) { #if !defined(RTEMS_SMP) /* * We must disable thread dispatching here since we enable the interrupts for * priority inheritance mutexes. */ _Thread_Dispatch_disable(); #endif if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) { Thread_Control *holder = the_mutex->holder; #if !defined(RTEMS_SMP) /* * To enable interrupts here works only since exactly one executing thread * exists and only threads are allowed to seize and surrender mutexes with * the priority inheritance protocol. On SMP configurations more than one * executing thread may exist, so here we must not release the lock, since * otherwise the current holder may be no longer the holder of the mutex * once we released the lock. */ _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); #endif _Thread_Inherit_priority( holder, executing ); #if !defined(RTEMS_SMP) _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context ); #endif } _Thread_queue_Enqueue_critical( &the_mutex->Wait_queue.Queue, the_mutex->operations, executing, STATES_WAITING_FOR_MUTEX, timeout, CORE_MUTEX_TIMEOUT, lock_context ); #if !defined(RTEMS_SMP) _Thread_Dispatch_enable( _Per_CPU_Get() ); #endif }
void _Thread_queue_Flush( Thread_queue_Control *the_thread_queue, #if defined(RTEMS_MULTIPROCESSING) Thread_queue_Flush_callout remote_extract_callout, #else Thread_queue_Flush_callout remote_extract_callout RTEMS_UNUSED, #endif uint32_t status ) { ISR_lock_Context lock_context; Thread_Control *the_thread; _Thread_queue_Acquire( the_thread_queue, &lock_context ); while ( (the_thread = _Thread_queue_First_locked( the_thread_queue ) ) ) { #if defined(RTEMS_MULTIPROCESSING) if ( _Objects_Is_local_id( the_thread->Object.id ) ) #endif the_thread->Wait.return_code = status; _Thread_queue_Extract_critical( &the_thread_queue->Queue, the_thread_queue->operations, the_thread, &lock_context ); #if defined(RTEMS_MULTIPROCESSING) if ( !_Objects_Is_local_id( the_thread->Object.id ) ) ( *remote_extract_callout )( the_thread ); #endif _Thread_queue_Acquire( the_thread_queue, &lock_context ); } _Thread_queue_Release( the_thread_queue, &lock_context ); }
void _Thread_queue_Enqueue( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, States_Control state, Watchdog_Interval timeout ) { ISR_lock_Context lock_context; Thread_blocking_operation_States sync_state; #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) the_thread = _Thread_MP_Allocate_proxy( state ); else #endif /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, state ); /* * If the thread wants to timeout, then schedule its timer. */ if ( timeout ) { _Watchdog_Initialize( &the_thread->Timer, _Thread_queue_Timeout, the_thread->Object.id, NULL ); _Watchdog_Insert_ticks( &the_thread->Timer, timeout ); } /* * Now initiate the enqueuing and checking if the blocking operation * should be completed or the thread has had its blocking condition * satisfied before we got here. */ _Thread_queue_Acquire( &lock_context ); sync_state = the_thread_queue->sync_state; the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; if ( sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { /* * Invoke the discipline specific enqueue method. */ if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { _Chain_Append_unprotected( &the_thread_queue->Queues.Fifo, &the_thread->Object.Node ); } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ _Thread_Lock_set( the_thread, &_Thread_queue_Lock ); _Thread_Priority_set_change_handler( the_thread, _Thread_queue_Requeue_priority, the_thread_queue ); _RBTree_Insert( &the_thread_queue->Queues.Priority, &the_thread->RBNode, _Thread_queue_Compare_priority, false ); } the_thread->Wait.queue = the_thread_queue; the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; _Thread_queue_Release( &lock_context ); } else { /* Cancel a blocking operation due to ISR */ _Assert( sync_state == THREAD_BLOCKING_OPERATION_TIMEOUT || sync_state == THREAD_BLOCKING_OPERATION_SATISFIED ); _Thread_blocking_operation_Finalize( the_thread, &lock_context ); } }
CORE_mutex_Status _CORE_mutex_Surrender( CORE_mutex_Control *the_mutex, #if defined(RTEMS_MULTIPROCESSING) Objects_Id id, CORE_mutex_API_mp_support_callout api_mutex_mp_support, #else Objects_Id id __attribute__((unused)), CORE_mutex_API_mp_support_callout api_mutex_mp_support __attribute__((unused)), #endif ISR_lock_Context *lock_context ) { Thread_Control *the_thread; Thread_Control *holder; holder = the_mutex->holder; /* * The following code allows a thread (or ISR) other than the thread * which acquired the mutex to release that mutex. This is only * allowed when the mutex in quetion is FIFO or simple Priority * discipline. But Priority Ceiling or Priority Inheritance mutexes * must be released by the thread which acquired them. */ if ( the_mutex->Attributes.only_owner_release ) { if ( !_Thread_Is_executing( holder ) ) { _ISR_lock_ISR_enable( lock_context ); return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE; } } _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context ); /* XXX already unlocked -- not right status */ if ( !the_mutex->nest_count ) { _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return CORE_MUTEX_STATUS_SUCCESSFUL; } the_mutex->nest_count--; if ( the_mutex->nest_count != 0 ) { /* * All error checking is on the locking side, so if the lock was * allowed to acquired multiple times, then we should just deal with * that. The RTEMS_DEBUG is just a validation. */ #if defined(RTEMS_DEBUG) switch ( the_mutex->Attributes.lock_nesting_behavior ) { case CORE_MUTEX_NESTING_ACQUIRES: _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return CORE_MUTEX_STATUS_SUCCESSFUL; #if defined(RTEMS_POSIX_API) case CORE_MUTEX_NESTING_IS_ERROR: /* should never occur */ _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED; #endif case CORE_MUTEX_NESTING_BLOCKS: /* Currently no API exercises this behavior. */ break; } #else _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */ return CORE_MUTEX_STATUS_SUCCESSFUL; #endif } /* * Formally release the mutex before possibly transferring it to a * blocked thread. */ if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) || _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) { CORE_mutex_Status pop_status = _CORE_mutex_Pop_priority( the_mutex, holder ); if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL ) { _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); return pop_status; } holder->resource_count--; } the_mutex->holder = NULL; /* * Now we check if another thread was waiting for this mutex. If so, * transfer the mutex to that thread. */ if ( ( the_thread = _Thread_queue_First_locked( &the_mutex->Wait_queue ) ) ) { /* * We must extract the thread now since this will restore its default * thread lock. This is necessary to avoid a deadlock in the * _Thread_Change_priority() below due to a recursive thread queue lock * acquire. */ _Thread_queue_Extract_locked( &the_mutex->Wait_queue, the_thread ); #if defined(RTEMS_MULTIPROCESSING) _Thread_Dispatch_disable(); if ( _Objects_Is_local_id( the_thread->Object.id ) ) #endif { the_mutex->holder = the_thread; the_mutex->nest_count = 1; switch ( the_mutex->Attributes.discipline ) { case CORE_MUTEX_DISCIPLINES_FIFO: case CORE_MUTEX_DISCIPLINES_PRIORITY: break; case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT: _CORE_mutex_Push_priority( the_mutex, the_thread ); the_thread->resource_count++; break; case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING: _CORE_mutex_Push_priority( the_mutex, the_thread ); the_thread->resource_count++; _Thread_Raise_priority( the_thread, the_mutex->Attributes.priority_ceiling ); break; } } _Thread_queue_Unblock_critical( &the_mutex->Wait_queue, the_thread, lock_context ); #if defined(RTEMS_MULTIPROCESSING) if ( !_Objects_Is_local_id( the_thread->Object.id ) ) { the_mutex->holder = NULL; the_mutex->nest_count = 1; ( *api_mutex_mp_support)( the_thread, id ); } _Thread_Dispatch_enable( _Per_CPU_Get() ); #endif } else { _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); } /* * Whether or not someone is waiting for the mutex, an * inherited priority must be lowered if this is the last * mutex (i.e. resource) this task has. */ if ( !_Thread_Owns_resources( holder ) ) { /* * Ensure that the holder resource count is visible to all other processors * and that we read the latest priority restore hint. */ _Atomic_Fence( ATOMIC_ORDER_ACQ_REL ); if ( holder->priority_restore_hint ) { Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable(); _Thread_Restore_priority( holder ); _Thread_Dispatch_enable( cpu_self ); } } return CORE_MUTEX_STATUS_SUCCESSFUL; }
static rtems_status_code _Semaphore_Set_priority( Semaphore_Control *the_semaphore, const Scheduler_Control *scheduler, rtems_task_priority new_priority, rtems_task_priority *old_priority_p, Thread_queue_Context *queue_context ) { rtems_status_code sc; bool valid; Priority_Control core_priority; Priority_Control old_priority; core_priority = _RTEMS_Priority_To_core( scheduler, new_priority, &valid ); if ( new_priority != RTEMS_CURRENT_PRIORITY && !valid ) { return RTEMS_INVALID_PRIORITY; } _Thread_queue_Acquire_critical( &the_semaphore->Core_control.Wait_queue, &queue_context->Lock_context ); switch ( the_semaphore->variant ) { case SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING: sc = _Semaphore_Is_scheduler_valid( &the_semaphore->Core_control.Mutex, scheduler ); old_priority = _CORE_ceiling_mutex_Get_priority( &the_semaphore->Core_control.Mutex ); if ( sc == RTEMS_SUCCESSFUL && new_priority != RTEMS_CURRENT_PRIORITY ) { _CORE_ceiling_mutex_Set_priority( &the_semaphore->Core_control.Mutex, core_priority ); } break; #if defined(RTEMS_SMP) case SEMAPHORE_VARIANT_MRSP: old_priority = _MRSP_Get_priority( &the_semaphore->Core_control.MRSP, scheduler ); if ( new_priority != RTEMS_CURRENT_PRIORITY ) { _MRSP_Set_priority( &the_semaphore->Core_control.MRSP, scheduler, core_priority ); } sc = RTEMS_SUCCESSFUL; break; #endif default: _Assert( the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_INHERIT_PRIORITY || the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL || the_semaphore->variant == SEMAPHORE_VARIANT_SIMPLE_BINARY || the_semaphore->variant == SEMAPHORE_VARIANT_COUNTING ); old_priority = 0; sc = RTEMS_NOT_DEFINED; break; } _Thread_queue_Release( &the_semaphore->Core_control.Wait_queue, &queue_context->Lock_context ); *old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority ); return sc; }
rtems_status_code rtems_semaphore_delete( rtems_id id ) { Semaphore_Control *the_semaphore; Thread_queue_Context queue_context; Status_Control status; _Objects_Allocator_lock(); the_semaphore = _Semaphore_Get( id, &queue_context ); if ( the_semaphore == NULL ) { _Objects_Allocator_unlock(); #if defined(RTEMS_MULTIPROCESSING) if ( _Semaphore_MP_Is_remote( id ) ) { return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; } #endif return RTEMS_INVALID_ID; } _Thread_queue_Acquire_critical( &the_semaphore->Core_control.Wait_queue, &queue_context.Lock_context ); switch ( the_semaphore->variant ) { case SEMAPHORE_VARIANT_MUTEX_INHERIT_PRIORITY: case SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING: case SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL: if ( _CORE_mutex_Is_locked( &the_semaphore->Core_control.Mutex.Recursive.Mutex ) ) { status = STATUS_RESOURCE_IN_USE; } else { status = STATUS_SUCCESSFUL; } break; #if defined(RTEMS_SMP) case SEMAPHORE_VARIANT_MRSP: status = _MRSP_Can_destroy( &the_semaphore->Core_control.MRSP ); break; #endif default: _Assert( the_semaphore->variant == SEMAPHORE_VARIANT_SIMPLE_BINARY || the_semaphore->variant == SEMAPHORE_VARIANT_COUNTING ); status = STATUS_SUCCESSFUL; break; } if ( status != STATUS_SUCCESSFUL ) { _Thread_queue_Release( &the_semaphore->Core_control.Wait_queue, &queue_context.Lock_context ); _Objects_Allocator_unlock(); return _Status_Get( status ); } _Objects_Close( &_Semaphore_Information, &the_semaphore->Object ); switch ( the_semaphore->variant ) { #if defined(RTEMS_SMP) case SEMAPHORE_VARIANT_MRSP: _MRSP_Destroy( &the_semaphore->Core_control.MRSP, &queue_context ); break; #endif default: _Assert( the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_INHERIT_PRIORITY || the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING || the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL || the_semaphore->variant == SEMAPHORE_VARIANT_SIMPLE_BINARY || the_semaphore->variant == SEMAPHORE_VARIANT_COUNTING ); _Thread_queue_Flush_critical( &the_semaphore->Core_control.Wait_queue.Queue, _Semaphore_Get_operations( the_semaphore ), _Thread_queue_Flush_status_object_was_deleted, &queue_context ); _Thread_queue_Destroy( &the_semaphore->Core_control.Wait_queue ); break; } #if defined(RTEMS_MULTIPROCESSING) if ( the_semaphore->is_global ) { _Objects_MP_Close( &_Semaphore_Information, id ); _Semaphore_MP_Send_process_packet( SEMAPHORE_MP_ANNOUNCE_DELETE, id, 0, /* Not used */ 0 /* Not used */ ); } #endif _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }