Exemple #1
0
//----------------------------------------------------------------------------
// SpinLock::SpinToAcquire   , non-inline function, called from inline Acquire
//  
//  Spin waiting for a spinlock to become free.
//
//  
void
SpinLock::SpinToAcquire ()
{
	ULONG				ulBackoffs = 0;
	ULONG				ulSpins = 0;

	while (true)
	{
		for (unsigned i = ulSpins+10000;
			 ulSpins < i;
			 ulSpins++)
		{
			// Note: Must cast through volatile to ensure the lock is
			// refetched from memory.
			//
			if (*((volatile LONG*)&m_lock) == 0)
			{
				break;
			}
			pause();			// indicate to the processor that we are spining 
		}

		// Try the inline atomic test again.
		//
		if (GetLockNoWait ())
		{
			break;
		}

        //backoff
        ulBackoffs++;

		if ((ulBackoffs % BACKOFF_LIMIT) == 0)
		{	
			Sleep (500);
		}
		else
        {
			__SwitchToThread (0);
        }
	}

#ifdef _DEBUG
		//profile info
	SpinLockProfiler::IncrementCollisions (m_LockType);
	SpinLockProfiler::IncrementSpins (m_LockType, ulSpins);
	SpinLockProfiler::IncrementBackoffs (ulBackoffs);
#endif

} // SpinLock::SpinToAcquire ()
Exemple #2
0
// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
// Parameters:
//  switchCount - number of times the YieldThread was called in a loop
void GCToOSInterface::YieldThread(uint32_t switchCount)
{
    LIMITED_METHOD_CONTRACT;
    __SwitchToThread(0, switchCount);
}
Exemple #3
0
// Causes the calling thread to sleep for the specified number of milliseconds
// Parameters:
//  sleepMSec   - time to sleep before switching to another thread
void GCToOSInterface::Sleep(uint32_t sleepMSec)
{
    LIMITED_METHOD_CONTRACT;
    __SwitchToThread(sleepMSec, 0);
}
Exemple #4
0
VOID FinalizerThread::FinalizerThreadWorker(void *args)
{
    // TODO: The following line should be removed after contract violation is fixed.
    // See bug 27409
    SCAN_IGNORE_THROW;
    SCAN_IGNORE_TRIGGER;

    // This is used to stitch together the exception handling at the base of our thread with
    // any eventual transitions into different AppDomains for finalization.
    _ASSERTE(args != NULL);
    pThreadTurnAround = (ManagedThreadCallState *) args;

    BOOL bPriorityBoosted = FALSE;

    while (!fQuitFinalizer)
    {
        // Wait for work to do...

        _ASSERTE(GetFinalizerThread()->PreemptiveGCDisabled());
#ifdef _DEBUG
        if (g_pConfig->FastGCStressLevel())
        {
            GetFinalizerThread()->m_GCOnTransitionsOK = FALSE;
        }
#endif
        GetFinalizerThread()->EnablePreemptiveGC();
#ifdef _DEBUG
        if (g_pConfig->FastGCStressLevel())
        {
            GetFinalizerThread()->m_GCOnTransitionsOK = TRUE;
        }
#endif
#if 0
        // Setting the event here, instead of at the bottom of the loop, could
        // cause us to skip draining the Q, if the request is made as soon as
        // the app starts running.
        SignalFinalizationDone(TRUE);
#endif //0

        WaitForFinalizerEvent (hEventFinalizer);

#if defined(__linux__) && defined(FEATURE_EVENT_TRACE)
        if (g_TriggerHeapDump && (CLRGetTickCount64() > (LastHeapDumpTime + LINUX_HEAP_DUMP_TIME_OUT)))
        {
            s_forcedGCInProgress = true;
            GetFinalizerThread()->DisablePreemptiveGC();
            GCHeapUtilities::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking);
            GetFinalizerThread()->EnablePreemptiveGC();
            s_forcedGCInProgress = false;
            
            LastHeapDumpTime = CLRGetTickCount64();
            g_TriggerHeapDump = FALSE;
        }
#endif

        if (!bPriorityBoosted)
        {
            if (GetFinalizerThread()->SetThreadPriority(THREAD_PRIORITY_HIGHEST))
                bPriorityBoosted = TRUE;
        }

        GetFinalizerThread()->DisablePreemptiveGC();

        // TODO: The following call causes 12 more classes loaded.
        //if (!fNameSet) {
        //    fNameSet = TRUE;
        //    GetFinalizerThread()->SetName(L"FinalizerThread");
        //}

#ifdef _DEBUG
        // <TODO> workaround.  make finalization very lazy for gcstress 3 or 4.  
        // only do finalization if the system is quiescent</TODO>
        if (g_pConfig->GetGCStressLevel() > 1)
        {
            size_t last_gc_count;
            DWORD dwSwitchCount = 0;

            do
            {
                last_gc_count = GCHeapUtilities::GetGCHeap()->CollectionCount(0);
                GetFinalizerThread()->m_GCOnTransitionsOK = FALSE; 
                GetFinalizerThread()->EnablePreemptiveGC();
                __SwitchToThread (0, ++dwSwitchCount);
                GetFinalizerThread()->DisablePreemptiveGC();             
                // If no GCs happended, then we assume we are quiescent
                GetFinalizerThread()->m_GCOnTransitionsOK = TRUE; 
            } while (GCHeapUtilities::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
        }
#endif //_DEBUG

        // we might want to do some extra work on the finalizer thread
        // check and do it
        if (GetFinalizerThread()->HaveExtraWorkForFinalizer())
        {
            GetFinalizerThread()->DoExtraWorkForFinalizer();
        }
        LOG((LF_GC, LL_INFO100, "***** Calling Finalizers\n"));
        // We may mark the finalizer thread for abort.  If so the abort request is for previous finalizer method, not for next one.
        if (GetFinalizerThread()->IsAbortRequested())
        {
            GetFinalizerThread()->EEResetAbort(Thread::TAR_ALL);
        }
        FastInterlockExchange ((LONG*)&g_FinalizerIsRunning, TRUE);
        AppDomain::EnableADUnloadWorkerForFinalizer();

        do
        {
            FinalizeAllObjects(NULL, 0);
            _ASSERTE(GetFinalizerThread()->GetDomain()->IsDefaultDomain());

            if (AppDomain::HasWorkForFinalizerThread())
            {
                AppDomain::ProcessUnloadDomainEventOnFinalizeThread();                
            }
            else if (UnloadingAppDomain == NULL)
                break;
            else if (!GCHeapUtilities::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
            {
                break;
            }
            // Now schedule any objects from an unloading app domain for finalization 
            // on the next pass (even if they are reachable.)
            // Note that it may take several passes to complete the unload, if new objects are created during
            // finalization.
        }
        while(TRUE);

        if (UnloadingAppDomain != NULL)
        {
            SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocksInAppDomain(UnloadingAppDomain);
            {
                // Before we continue with AD unloading, mark the stage as
                // FINALIZED under the SystemDomain lock so that this portion
                // of unloading may be serialized with other parts of the CLR
                // that require the AD stage to be < FINALIZED, in particular
                // ETW's AD enumeration code used during its rundown events.
                SystemDomain::LockHolder lh;
                UnloadingAppDomain->SetFinalized(); // All finalizers have run except for FinalizableAndAgile objects
            }
            UnloadingAppDomain = NULL;
        }

        FastInterlockExchange ((LONG*)&g_FinalizerIsRunning, FALSE);
        // We may still have the finalizer thread for abort.  If so the abort request is for previous finalizer method, not for next one.
        if (GetFinalizerThread()->IsAbortRequested())
        {
            GetFinalizerThread()->EEResetAbort(Thread::TAR_ALL);
        }

        // Increment the loop count. This is currently used by the AddMemoryPressure heuristic to see
        // if finalizers have run since the last time it triggered GC.
        FastInterlockIncrement((LONG *)&g_FinalizerLoopCount);

        // Anyone waiting to drain the Q can now wake up.  Note that there is a
        // race in that another thread starting a drain, as we leave a drain, may
        // consider itself satisfied by the drain that just completed.  This is
        // acceptable.
        SignalFinalizationDone(TRUE);
    }
}
Exemple #5
0
//=====================================================================        
void SimpleRWLock::EnterRead()
{
    STATIC_CONTRACT_NOTHROW;
    STATIC_CONTRACT_CAN_TAKE_LOCK;

    // Custom contract is needed for PostEnter()'s unscoped GC_NoTrigger counter change
#ifdef ENABLE_CONTRACTS_IMPL
    CheckGCNoTrigger();
#endif //ENABLE_CONTRACTS_IMPL 

    GCX_MAYBE_PREEMP(m_gcMode == PREEMPTIVE);

#ifdef _DEBUG
    PreEnter();
#endif //_DEBUG

    DWORD dwSwitchCount = 0;

    while (TRUE)
    {
        // prevent writers from being starved. This assumes that writers are rare and 
        // dont hold the lock for a long time. 
        while (IsWriterWaiting())
        {
            int spinCount = m_spinCount;
            while (spinCount > 0) {
                spinCount--;
                YieldProcessor();
            }
            __SwitchToThread(0, ++dwSwitchCount);
        }

        if (TryEnterRead())
        {
            return;
        }

        DWORD i = g_SpinConstants.dwInitialDuration;
        do
        {
            if (TryEnterRead())
            {
                return;
            }

            if (g_SystemInfo.dwNumberOfProcessors <= 1)
            {
                break;
            }
            // Delay by approximately 2*i clock cycles (Pentium III).
            // This is brittle code - future processors may of course execute this
            // faster or slower, and future code generators may eliminate the loop altogether.
            // The precise value of the delay is not critical, however, and I can't think
            // of a better way that isn't machine-dependent.
            for (int delayCount = i; --delayCount; ) 
            {
                YieldProcessor();           // indicate to the processor that we are spining 
            }

            // exponential backoff: wait a factor longer in the next iteration
            i *= g_SpinConstants.dwBackoffFactor;
        }
        while (i < g_SpinConstants.dwMaximumDuration);

        __SwitchToThread(0, ++dwSwitchCount);
    }
}
Exemple #6
0
//=====================================================================        
void SimpleRWLock::EnterWrite()
{
    STATIC_CONTRACT_NOTHROW;
    STATIC_CONTRACT_CAN_TAKE_LOCK;

    // Custom contract is needed for PostEnter()'s unscoped GC_NoTrigger counter change
#ifdef ENABLE_CONTRACTS_IMPL
    CheckGCNoTrigger();
#endif //ENABLE_CONTRACTS_IMPL

    GCX_MAYBE_PREEMP(m_gcMode == PREEMPTIVE);

#ifdef _DEBUG
    PreEnter();
#endif //_DEBUG

    BOOL set = FALSE;

    DWORD dwSwitchCount = 0;

    while (TRUE)
    {
        if (TryEnterWrite())
        {
            return;
        }

        // set the writer waiting word, if not already set, to notify potential
        // readers to wait. Remember, if the word is set, so it can be reset later.
        if (!IsWriterWaiting())
        {
            SetWriterWaiting();
            set = TRUE;
        }

        DWORD i = g_SpinConstants.dwInitialDuration;
        do
        {
            if (TryEnterWrite())
            {
                return;
            }

            if (g_SystemInfo.dwNumberOfProcessors <= 1)
            {
                break;
            }
            // Delay by approximately 2*i clock cycles (Pentium III).
            // This is brittle code - future processors may of course execute this
            // faster or slower, and future code generators may eliminate the loop altogether.
            // The precise value of the delay is not critical, however, and I can't think
            // of a better way that isn't machine-dependent.
            for (int delayCount = i; --delayCount; ) 
            {
                YieldProcessor();           // indicate to the processor that we are spining 
            }

            // exponential backoff: wait a factor longer in the next iteration
            i *= g_SpinConstants.dwBackoffFactor;
        }
        while (i < g_SpinConstants.dwMaximumDuration);

        __SwitchToThread(0, ++dwSwitchCount);
    }
}