Ejemplo n.º 1
0
//---------------------------------------------------------------------------
void Mutex_Release( Mutex_t *pstMutex_ )
{
	KERNEL_TRACE_1( STR_MUTEX_RELEASE_1, (K_USHORT)Thread_GetID( g_pstCurrent ) );

    K_BOOL bSchedule = 0;

    // Disable the scheduler while we deal with internal data structures.
    Scheduler_SetScheduler( false );

    // This thread had better be the one that owns the Mutex_t currently...
    KERNEL_ASSERT( (g_pstCurrent == pstMutex_->pstOwner) );

    // If the owner had claimed the lock multiple times, decrease the lock
    // count and return immediately.
    if (pstMutex_->ucRecurse)
    {
        pstMutex_->ucRecurse--;
        Scheduler_SetScheduler( true );
        return;
    }

    // Restore the thread's original priority
    if (Thread_GetCurPriority( g_pstCurrent ) != Thread_GetPriority( g_pstCurrent ))
    {
        Thread_SetPriority( g_pstCurrent, Thread_GetPriority(g_pstCurrent) );
        // In this case, we want to reschedule
        bSchedule = 1;
    }

    // No threads are waiting on this Mutex_t?
    if ( LinkList_GetHead( (LinkList_t*)pstMutex_ ) == NULL)
    {
        // Re-initialize the Mutex_t to its default values
        pstMutex_->bReady = 1;
        pstMutex_->ucMaxPri = 0;
        pstMutex_->pstOwner = NULL;
    }
    else
    {
        // Wake the highest priority Thread_t pending on the Mutex_t
        if( Mutex_WakeNext( pstMutex_ ) )
        {
            // Switch threads if it's higher or equal priority than the current thread
            bSchedule = 1;
        }
    }

    // Must enable the scheduler again in order to switch threads.
    Scheduler_SetScheduler( true );
    if(bSchedule)
    {
        // Switch threads if a higher-priority thread was woken
        Thread_Yield();
    }
}
Ejemplo n.º 2
0
//---------------------------------------------------------------------------
void Notify_Wait( Notify_t *pstNotify_, bool *pbFlag_ )
{
    CS_ENTER();
    BlockingObject_Block( (ThreadList_t*)pstNotify_, Scheduler_GetCurrentThread() );
    if (pbFlag_)
    {
        *pbFlag_ = false;
    }
    CS_EXIT();

    Thread_Yield();
    if (pbFlag_)
    {
        *pbFlag_ = true;
    }
}
Ejemplo n.º 3
0
//---------------------------------------------------------------------------
void TimedNotify_Callback( Thread_t *pstOwner_, void *pvData_ )
{
    Notify_t *pstNotify = (Notify_t*)(pvData_);

    // Indicate that the semaphore has expired on the thread
    Thread_SetExpired( pstOwner_, true );

    // Wake up the thread that was blocked on this semaphore.
    Notify_WakeMe( pstNotify, pstOwner_ );

    if ( Thread_GetCurPriority( pstOwner_ ) >=
         Thread_GetCurPriority( Scheduler_GetCurrentThread() ) )
    {
        Thread_Yield();
    }
}
Ejemplo n.º 4
0
/*!
 * \brief TimedMutex_Calback
 *
 * This function is called from the timer-expired context to trigger a timeout
 * on this Mutex_t.  This results in the waking of the thread that generated
 * the Mutex_t claim call that was not completed in time.
 *
 * \param pstOwner_ Pointer to the thread to wake
 * \param pvData_   Pointer to the Mutex_t object that the thread is blocked on
 */
void TimedMutex_Calback(Thread_t *pstOwner_, void *pvData_)
{
	Mutex_t *pstMutex = (Mutex_t*)(pvData_);
		
	// Indicate that the Semaphore_t has expired on the thread
	Thread_SetExpired( pstOwner_, true );
		
	// Wake up the thread that was blocked on this Semaphore_t.
	Mutex_WakeMe( pstMutex, pstOwner_ );
		
	if ( Thread_GetCurPriority( pstOwner_ ) >=
 		 Thread_GetCurPriority( Scheduler_GetCurrentThread() ) )
	{
 		Thread_Yield();
	}
}
Ejemplo n.º 5
0
bool Notify_Wait( Notify_t *pstNotify_, K_ULONG ulWaitTimeMS_, bool *pbFlag_ )
{
    bool bUseTimer = false;
    Timer_t stNotifyTimer;

    CS_ENTER();
    if (ulWaitTimeMS_)
    {
        bUseTimer = true;
        Thread_SetExpired( Scheduler_GetCurrentThread(), false );

        Timer_Init( &stNotifyTimer );
        Timer_Start( &stNotifyTimer, 0, ulWaitTimeMS_, TimedNotify_Callback, (void*)pstNotify_);
    }

    Block(g_pstCurrent);

    if (pbFlag_)
    {
        *pbFlag_ = false;
    }
    CS_EXIT();

    Thread_Yield();

    if (pbFlag_)
    {
        *pbFlag_ = true;
    }

    if (bUseTimer)
    {
        Timer_Stop( &stNotifyTimer );
        return ( Thread_GetExpired( Scheduler_GetCurrentThread() ) == false );
    }

    return true;
}
Ejemplo n.º 6
0
//---------------------------------------------------------------------------
void Notify_Signal( Notify_t *pstNotify_ )
{
    bool bReschedule = false;

    CS_ENTER();
    Thread_t *pstCurrent = (Thread_t*)LinkList_GetHead((LinkListNode_t*)pstNotify_);
    while (pstCurrent != NULL)
    {
        BlockingObject_UnBlock( pstCurrent );
        if ( !bReschedule &&
           ( Thread_GetCurPriority( pstCurrent ) >=
             Thread_GetCurPriority( Scheduler_GetCurrentThread() ) ) )
        {
            bReschedule = true;
        }
        pstCurrent = (Thread_t*)LinkList_GetHead(pstNotify_);
    }
    CS_EXIT();

    if (bReschedule)
    {
        Thread_Yield();
    }
}
Ejemplo n.º 7
0
static void QueueAcquireWrite(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState, preQueuedState;
    size_t waitFor, key, spinState, spinCount;

    for (;;)
    {
        oldState = ReadLockState(lock);
        state = oldState;

        /* If there is no queue, we are the first one to wait;
         * allow unfairness for the current timer tick. */
        if (state <= OWN_EXCLUSIVE)
            LockUnfair(state) = CurrentTick();

        /* Wait for the most recent thread to enter the queue. */
        waitFor = LockEntry(state);

        if (++LockEntry(state) == LockExit(state))
        {
            /* The queue arithmetic will wrap if we continue. */
            Thread_Yield();
            continue;
        }

        /* Make reader threads coming in wait for us. */
        LockWriter(state) = LockEntry(state);

        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    /* This thread now has a place in the queue.
     * Threads behind us may be depending on us to wake them up. */

    preQueuedState = oldState;
    key = (size_t)lock ^ waitFor;
    spinState = LockSpin(oldState);

    for (;;)
    {
        /* Avoid write prefetching since we expect to wait. */
        oldState = *(ptrdiff_t*)lock;

        if (LockExit(oldState) != waitFor || LockOwners(oldState) != 0)
        {
            /* The thread ahead of us still hasn't acquired,
             * or some reader or writer owns the lock right now. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
                LockEntry(oldState) - LockExit(oldState) >= 2 &&
                LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
                LockOwners(oldState) == 0)
            {
                /* Under certain conditions, we can acquire immediately if we
                 * are the last thread in line and undo joining the queue. */
                if (preQueuedState <= OWN_EXCLUSIVE)
                    state = OWN_EXCLUSIVE;
                else
                {
                    state = oldState + OWN_EXCLUSIVE;
                    LockEntry(state) = LockEntry(preQueuedState);
                    LockWriter(state) = LockWriter(preQueuedState);
                }

                /* Atomically de-queue and acquire unfairly. */
                swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
                if (swapState == oldState)
                    return;
                continue;
            }

            /* spinState being low means spinning usually works.
             * Use a high count if it has been working recently. */
            spinCount = (spinState & SPIN_SIGN) ?
                CONDLOCK_LOW_SPINCOUNT :
                CONDLOCK_HIGH_SPINCOUNT;

            /* Spin and/or block until something changes.
             * Adjust the spin field based on whether spinning worked. */
            if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
                spinState = (spinState > 2) ? (spinState - 2) : 0;
            else
                spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
            continue;
        }

        state = oldState + OWN_EXCLUSIVE;

        /* Bump the exit ticket number. We're leaving the queue. */
        LockExit(state)++;

        /* Zero the top 4 fields if the queue is now empty. */
        if (LockExit(state) == LockEntry(state))
            state = LockOwners(state);
        else
        {
            /* Not empty, but we just acquired fairly.
             * Allow unfairness for a while. */
            LockUnfair(state) = CurrentTick();
            LockSpin(state) = spinState;
        }

        /* Ready to take exclusive ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            return;
    }
}
Ejemplo n.º 8
0
static void QueueAcquireRead(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState, preQueuedState;
    size_t waitFor, diff, key, spinState, spinCount;

    for (;;)
    {
        oldState = ReadLockState(lock);
        state = oldState;

        /* If there is no queue, we are the first one to wait;
         * allow unfairness for the current timer tick. */
        if (state <= OWN_EXCLUSIVE)
            LockUnfair(state) = CurrentTick();

        /* Insert a barrier every half revolution.
         * This stops writer arithmetic from wrapping. */
        if ((LockEntry(state) & ~FIELD_SIGN) == 0)
            LockWriter(state) = LockEntry(state);

        if (++LockEntry(state) == LockExit(state))
        {
            /* The queue arithmetic will wrap if we continue. */
            Thread_Yield();
            continue;
        }

        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    /* This thread now has a place in the queue.
     * Threads behind us may be depending on us to wake them up. */

    /* Wait for the most recent writer to enter the queue. */
    waitFor = LockWriter(state);
    key = (size_t)lock ^ waitFor;
    preQueuedState = oldState;
    spinState = LockSpin(oldState);

    for (;;)
    {
        /* Avoid write prefetching since we expect to wait. */
        oldState = *(ptrdiff_t*)lock;

        diff = LockExit(oldState) - waitFor;
        if ((diff & FIELD_SIGN) == 0)
        {
            /* The writer ahead of us in line already acquired.
             * Someone could have beat us unfairly.
             * Just wait for the current owner. */
            waitFor = LockExit(oldState);
            key = (size_t)lock ^ waitFor;
        }

        if ((diff & FIELD_SIGN) != 0 || (LockOwners(oldState) == OWN_EXCLUSIVE))
        {
            /* The writer ahead of us still hasn't acquired,
             * or someone owns the lock exclusively right now. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
                LockEntry(oldState) - LockExit(oldState) >= 2 &&
                LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
                (LockOwners(oldState) < OWN_MAXSHARED))
            {
                /* Under certain conditions, we can acquire immediately if we
                 * are the last thread in line and undo joining the queue. */
                if (preQueuedState <= OWN_EXCLUSIVE)
                    state = LockOwners(oldState) + 1;
                else
                {
                    state = oldState + 1;
                    LockEntry(state) = LockEntry(preQueuedState);
                    LockWriter(state) = LockWriter(preQueuedState);
                }

                /* Atomically de-queue and acquire unfairly. */
                swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
                if (swapState == oldState)
                    return;
                continue;
            }

            /* spinState being low means spinning usually works.
             * Use a high count if it has been working recently. */
            spinCount = (spinState & SPIN_SIGN) ?
                CONDLOCK_LOW_SPINCOUNT :
                CONDLOCK_HIGH_SPINCOUNT;

            /* Spin and/or block until something changes.
             * Adjust the spin field based on whether spinning worked. */
            if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
                spinState = (spinState > 2) ? (spinState - 2) : 0;
            else
                spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
            continue;
        }
        
        if (LockOwners(oldState) == OWN_MAXSHARED)
        {
            /* The owner arithmetic will overflow if we continue. */
            Thread_Yield();
            continue;
        }

        state = oldState + 1;

        /* Bump the exit ticket number. We're leaving the queue. */
        LockExit(state)++;

        /* Zero the top 4 fields if the queue is now empty. */
        if (LockExit(state) == LockEntry(state))
            state = LockOwners(state);
        else
        {
            /* Not empty, but we just acquired fairly.
             * Allow unfairness for a while. */
            LockUnfair(state) = CurrentTick();
            LockSpin(state) = spinState;
        }

        /* Ready to take shared ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    if ((LockExit(state) & ~FIELD_SIGN) == 0)
    {
        /* Wakes those waiting on the artificial barrier inserted each half
         * revolution (see above). */
        key = (size_t)lock ^ LockExit(state);
        CondLock_Broadcast(key);
    }
}
Ejemplo n.º 9
0
void Mutex_Claii( Mutex_t *pstMutex_ )
#endif
{
    KERNEL_TRACE_1( STR_MUTEX_CLAIM_1, (K_USHORT)Thread_GetID( g_pstCurrent ) );

#if KERNEL_USE_TIMEOUTS
    Timer_t stTimer;
    K_BOOL bUseTimer = false;
#endif

    // Disable the scheduler while claiming the Mutex_t - we're dealing with all
    // sorts of private thread data, can't have a thread switch while messing
    // with internal data structures.
    Scheduler_SetScheduler( false );

    // Check to see if the Mutex_t is claimed or not
    if (pstMutex_->bReady != 0)
    {
        // Mutex_t isn't claimed, claim it.
        pstMutex_->bReady = 0;
        pstMutex_->ucRecurse = 0;
        pstMutex_->ucMaxPri = Thread_GetPriority( g_pstCurrent );
        pstMutex_->pstOwner = g_pstCurrent;

        Scheduler_SetScheduler( true );

#if KERNEL_USE_TIMEOUTS
        return true;
#else
        return;
#endif
    }

    // If the Mutex_t is already claimed, check to see if this is the owner thread,
    // since we allow the Mutex_t to be claimed recursively.
    if (g_pstCurrent == pstMutex_->pstOwner)
    {
        // Ensure that we haven't exceeded the maximum recursive-lock count
        KERNEL_ASSERT( (pstMutex_->ucRecurse < 255) );
        pstMutex_->ucRecurse++;

        // Increment the lock count and bail
        Scheduler_SetScheduler( true );
#if KERNEL_USE_TIMEOUTS
        return true;
#else
        return;
#endif
    }

    // The Mutex_t is claimed already - we have to block now.  Move the
    // current thread to the list of threads waiting on the Mutex_t.
#if KERNEL_USE_TIMEOUTS
    if (ulWaitTimeMS_)
    {
		Thread_SetExpired( g_pstCurrent, false );
        
        Timer_Init( &stTimer );
        Timer_Start( &stTimer, false, ulWaitTimeMS_, (TimerCallback_t)TimedMutex_Calback, (void*)pstMutex_);
        bUseTimer = true;
    }
#endif
    BlockingObject_Block( (ThreadList_t*)pstMutex_, g_pstCurrent );

    // Check if priority inheritence is necessary.  We do this in order
    // to ensure that we don't end up with priority inversions in case
    // multiple threads are waiting on the same resource.
    if(pstMutex_->ucMaxPri <= Thread_GetPriority( g_pstCurrent ) )
    {
        pstMutex_->ucMaxPri = Thread_GetPriority( g_pstCurrent );

        Thread_t *pstTemp = (Thread_t*)(LinkList_GetHead( (LinkList_t*)pstMutex_ ));
        while(pstTemp)
        {
            Thread_InheritPriority( pstTemp, pstMutex_->ucMaxPri );
			if(pstTemp == (Thread_t*)(LinkList_GetTail( (LinkList_t*)pstMutex_ )) )
            {
                break;
            }
            pstTemp = (Thread_t*)LinkListNode_GetNext( (LinkListNode_t*)pstTemp );
        }
        Thread_InheritPriority( pstMutex_->pstOwner, pstMutex_->ucMaxPri );
    }

    // Done with thread data -reenable the scheduler
    Scheduler_SetScheduler( true );

    // Switch threads if this thread acquired the Mutex_t
    Thread_Yield();

#if KERNEL_USE_TIMEOUTS
    if (bUseTimer)
    {
        Timer_Stop( &stTimer );
        return ( Thread_GetExpired( g_pstCurrent ) == 0);
    }
    return true;
#endif
}