Ejemplo n.º 1
0
static int TryAcquireWrite(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState;

    for(;;)
    {
        oldState = ReadLockState(lock);
        state = oldState | OWN_EXCLUSIVE;

        /* Skipped when the lock is empty. */
        if (oldState != 0)
        {
            /* Detect whether there are existing owners. */
            if (LockOwners(oldState) != 0)
                return 0;

            /* Someone must be waiting. Acquiring would jump the queue. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) != 0)
                return 0;
        }

        /* Ready to take exclusive ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            return 1;
    }
}
Ejemplo n.º 2
0
static int TryAcquireRead(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState;

    for(;;)
    {
        oldState = ReadLockState(lock);
        state = oldState + 1;

        /* Skipped when owners is the only active field
         * and we did not try to add too many shared owners. */
        if (state >= OWN_EXCLUSIVE)
        {
            /* Detect whether adding another shared owner is impossible. */
            if (LockOwners(oldState) >= OWN_MAXSHARED)
                return 0;

            /* If writer == exit, no writers are waiting. */
            if ((LockWriter(state) ^ LockExit(state)) != 0)
            {
                /* Writers are waiting. Acquiring would jump the queue. */
                if (((CurrentTick() - LockUnfair(oldState)) & 14) != 0)
                    return 0;
            }
        }

        /* Ready to take shared ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            return 1;
    }
}
Ejemplo n.º 3
0
/// <summary>
/// Try begin general method if another method is not running currently. General methods don't include StopConfiguration
/// </summary>
MI_Result TryBeginLcmOperation(
    _In_z_ const MI_Char* methodName,
    _Outptr_result_maybenull_ MI_Instance **cimErrorDetails)
{
    MI_Char* originalMethodName;
    int waitResult = 0;

    *cimErrorDetails = NULL;
    originalMethodName = (MI_Char*)Atomic_CompareAndSwap(&g_activeOperationMethodName, (ptrdiff_t) NULL, (ptrdiff_t)methodName);

    if (originalMethodName != NULL)
	{
		// We silently let GetMetaConfiguration to go if the active operation is not SetMetaConfiguration
		if (Tcscasecmp(originalMethodName, MSFT_DSCLocalConfigManager_SendMetaConfigurationApply) == 0
			|| Tcscasecmp(methodName, MSFT_DSCLocalConfigManager_GetMetaConfiguration) != 0)
		{
			return GetCimMIError3Params(MI_RESULT_FAILED, cimErrorDetails, ID_LCM_MULTIPLE_METHOD_REQUEST, methodName, originalMethodName, methodName);
		}
	}
    waitResult = Sem_TimedWait(&g_h_ConfigurationStoppedEvent, (int)0); //Ignore the result
    return MI_RESULT_OK;
}
Ejemplo n.º 4
0
void CheckInjector()
{
    /* The injector might not know we are here.
     * Open up the injector refresh semaphore for this process.
     * Then signal it once and wait for the NitsFT to be patched. */    
    if (Atomic_CompareAndSwap(&NITS_PRESENCE_STUB, NitsPresenceUnknown, NitsStubbedOut) == NitsPresenceUnknown)    
    {                
#if defined(_MSC_VER) || defined(ENABLE_UNITTESTING)
        // on linux; if we are not building to run unittests, 
        // in that case this will be a no-op and all further calls will bail out since
        // NITS_PRESENCE_STUB will be NitsStubbedOut after we get here.
        LoadInjectorIfRequired();        
        SignalInjector();
#endif
        /* Re-enter through the function table. This should be patched! */
    }
    else
    {
        //printf("\nsignalSent was already 1\n");
    }

    /* The function table should be unchanged. The caller will attempt to
     * replace itself with the simple stub, since the check is complete. */
}
Ejemplo n.º 5
0
void CScriptThread::OnQueryJobDone()
{
	//次函数从db线程调用过来
	if( !Atomic_CompareAndSwap( &m_nHasDbResult, m_nHasDbResult, 1 ) )
		PutSemaphore(&m_smThread);
}
Ejemplo n.º 6
0
static void QueueAcquireWrite(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState, preQueuedState;
    size_t waitFor, key, spinState, spinCount;

    for (;;)
    {
        oldState = ReadLockState(lock);
        state = oldState;

        /* If there is no queue, we are the first one to wait;
         * allow unfairness for the current timer tick. */
        if (state <= OWN_EXCLUSIVE)
            LockUnfair(state) = CurrentTick();

        /* Wait for the most recent thread to enter the queue. */
        waitFor = LockEntry(state);

        if (++LockEntry(state) == LockExit(state))
        {
            /* The queue arithmetic will wrap if we continue. */
            Thread_Yield();
            continue;
        }

        /* Make reader threads coming in wait for us. */
        LockWriter(state) = LockEntry(state);

        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    /* This thread now has a place in the queue.
     * Threads behind us may be depending on us to wake them up. */

    preQueuedState = oldState;
    key = (size_t)lock ^ waitFor;
    spinState = LockSpin(oldState);

    for (;;)
    {
        /* Avoid write prefetching since we expect to wait. */
        oldState = *(ptrdiff_t*)lock;

        if (LockExit(oldState) != waitFor || LockOwners(oldState) != 0)
        {
            /* The thread ahead of us still hasn't acquired,
             * or some reader or writer owns the lock right now. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
                LockEntry(oldState) - LockExit(oldState) >= 2 &&
                LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
                LockOwners(oldState) == 0)
            {
                /* Under certain conditions, we can acquire immediately if we
                 * are the last thread in line and undo joining the queue. */
                if (preQueuedState <= OWN_EXCLUSIVE)
                    state = OWN_EXCLUSIVE;
                else
                {
                    state = oldState + OWN_EXCLUSIVE;
                    LockEntry(state) = LockEntry(preQueuedState);
                    LockWriter(state) = LockWriter(preQueuedState);
                }

                /* Atomically de-queue and acquire unfairly. */
                swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
                if (swapState == oldState)
                    return;
                continue;
            }

            /* spinState being low means spinning usually works.
             * Use a high count if it has been working recently. */
            spinCount = (spinState & SPIN_SIGN) ?
                CONDLOCK_LOW_SPINCOUNT :
                CONDLOCK_HIGH_SPINCOUNT;

            /* Spin and/or block until something changes.
             * Adjust the spin field based on whether spinning worked. */
            if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
                spinState = (spinState > 2) ? (spinState - 2) : 0;
            else
                spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
            continue;
        }

        state = oldState + OWN_EXCLUSIVE;

        /* Bump the exit ticket number. We're leaving the queue. */
        LockExit(state)++;

        /* Zero the top 4 fields if the queue is now empty. */
        if (LockExit(state) == LockEntry(state))
            state = LockOwners(state);
        else
        {
            /* Not empty, but we just acquired fairly.
             * Allow unfairness for a while. */
            LockUnfair(state) = CurrentTick();
            LockSpin(state) = spinState;
        }

        /* Ready to take exclusive ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            return;
    }
}
Ejemplo n.º 7
0
static void QueueAcquireRead(
    _Inout_ ReadWriteLock* self
)
{
    volatile LockFields* lock = (LockFields*)self;
    size_t oldState, state, swapState, preQueuedState;
    size_t waitFor, diff, key, spinState, spinCount;

    for (;;)
    {
        oldState = ReadLockState(lock);
        state = oldState;

        /* If there is no queue, we are the first one to wait;
         * allow unfairness for the current timer tick. */
        if (state <= OWN_EXCLUSIVE)
            LockUnfair(state) = CurrentTick();

        /* Insert a barrier every half revolution.
         * This stops writer arithmetic from wrapping. */
        if ((LockEntry(state) & ~FIELD_SIGN) == 0)
            LockWriter(state) = LockEntry(state);

        if (++LockEntry(state) == LockExit(state))
        {
            /* The queue arithmetic will wrap if we continue. */
            Thread_Yield();
            continue;
        }

        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    /* This thread now has a place in the queue.
     * Threads behind us may be depending on us to wake them up. */

    /* Wait for the most recent writer to enter the queue. */
    waitFor = LockWriter(state);
    key = (size_t)lock ^ waitFor;
    preQueuedState = oldState;
    spinState = LockSpin(oldState);

    for (;;)
    {
        /* Avoid write prefetching since we expect to wait. */
        oldState = *(ptrdiff_t*)lock;

        diff = LockExit(oldState) - waitFor;
        if ((diff & FIELD_SIGN) == 0)
        {
            /* The writer ahead of us in line already acquired.
             * Someone could have beat us unfairly.
             * Just wait for the current owner. */
            waitFor = LockExit(oldState);
            key = (size_t)lock ^ waitFor;
        }

        if ((diff & FIELD_SIGN) != 0 || (LockOwners(oldState) == OWN_EXCLUSIVE))
        {
            /* The writer ahead of us still hasn't acquired,
             * or someone owns the lock exclusively right now. */
            if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
                LockEntry(oldState) - LockExit(oldState) >= 2 &&
                LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
                (LockOwners(oldState) < OWN_MAXSHARED))
            {
                /* Under certain conditions, we can acquire immediately if we
                 * are the last thread in line and undo joining the queue. */
                if (preQueuedState <= OWN_EXCLUSIVE)
                    state = LockOwners(oldState) + 1;
                else
                {
                    state = oldState + 1;
                    LockEntry(state) = LockEntry(preQueuedState);
                    LockWriter(state) = LockWriter(preQueuedState);
                }

                /* Atomically de-queue and acquire unfairly. */
                swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
                if (swapState == oldState)
                    return;
                continue;
            }

            /* spinState being low means spinning usually works.
             * Use a high count if it has been working recently. */
            spinCount = (spinState & SPIN_SIGN) ?
                CONDLOCK_LOW_SPINCOUNT :
                CONDLOCK_HIGH_SPINCOUNT;

            /* Spin and/or block until something changes.
             * Adjust the spin field based on whether spinning worked. */
            if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
                spinState = (spinState > 2) ? (spinState - 2) : 0;
            else
                spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
            continue;
        }
        
        if (LockOwners(oldState) == OWN_MAXSHARED)
        {
            /* The owner arithmetic will overflow if we continue. */
            Thread_Yield();
            continue;
        }

        state = oldState + 1;

        /* Bump the exit ticket number. We're leaving the queue. */
        LockExit(state)++;

        /* Zero the top 4 fields if the queue is now empty. */
        if (LockExit(state) == LockEntry(state))
            state = LockOwners(state);
        else
        {
            /* Not empty, but we just acquired fairly.
             * Allow unfairness for a while. */
            LockUnfair(state) = CurrentTick();
            LockSpin(state) = spinState;
        }

        /* Ready to take shared ownership. */
        swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
        if (swapState == oldState)
            break;
    }

    if ((LockExit(state) & ~FIELD_SIGN) == 0)
    {
        /* Wakes those waiting on the artificial barrier inserted each half
         * revolution (see above). */
        key = (size_t)lock ^ LockExit(state);
        CondLock_Broadcast(key);
    }
}
Ejemplo n.º 8
0
void CachedLock_AcquireWrite(
    _Inout_ CachedLock* self
)
{
    ptrdiff_t oldState, state, swapState;
    ptrdiff_t oldMask, zeroMask, index;
    volatile ptrdiff_t* master = &self->master;

    /* The order of steps here is important.
     *  1) Stop shared readers from racing through the latches.
     *  2) Scan the latches to find inactive ones.
     *  3) Get exclusive access to the central lock.
     *  4) Wait for existing shared readers in the latches to leave.
     *
     * Doing (3) before (1) lets readers race through the latches if the
     *   central lock is still held by a reader from previous contention.
     * Doing (3) before (2) leads to deadlock if there are no active latches
     *   and another writer gets the central lock first.
     * Doing (3) after (4) lets readers race through the central lock. */

    for (;;)
    {
        oldState = PAL_PREFETCH(master);

        /* The first thread atomically sets s_masterMask. */
        if (oldState == 0)
            state = s_masterMask + MASTER_INCREMENT;
        else
            state = oldState + MASTER_INCREMENT;

        swapState = Atomic_CompareAndSwap(master, oldState, state);
        if (swapState == oldState)
            break;
    }

    /* Reader threads will now observe that master != 0. */

    if (oldState == 0)
    {
        /* This is the thread that set s_masterMask. */
        zeroMask = 0;
        for (index = 0; index < POOL_LINES; index++)
        {
            /* Determine if all shared threads are gone. */
            if (self->latches[LATCHES_PER_LINE * index] == 0)
                zeroMask |= ((ptrdiff_t)1 << index);
        }

        /* Determine if there are any CPUs with shared threads remaining.
         * Other exclusive threads could be waiting. */
        if ((Atomic_And(master, ~zeroMask) & MASTER_MASK) == 0)
            CondLock_Broadcast((ptrdiff_t)self);
    }

    ReadWriteLock_AcquireWrite(&self->lock);

    for (;;)
    {
        /* Wait for all the latches to empty. */
        oldMask = *master;
        if ((oldMask & MASTER_MASK) == 0)
            return;

        CondLock_Wait((ptrdiff_t)self, master, oldMask, CONDLOCK_DEFAULT_SPINCOUNT);
    }
}