Пример #1
0
HRESULT CLRTestHookManager::AddTestHook(ICLRTestHook* hook)
{
    WRAPPER_NO_CONTRACT;
    DWORD newidx=FastInterlockIncrement(&m_nHooks);
    if (newidx>=NumItems(m_pHooks))
    {
        FastInterlockDecrement(&m_nHooks);
        return DISP_E_OVERFLOW;
    }
    m_pHooks[newidx-1].Set(hook);
    return S_OK;
}
Пример #2
0
void CNameSpace::GcRuntimeStructuresValid (BOOL bValid)
{
    WRAPPER_NO_CONTRACT;
    if (!bValid)
    {
        LONG result;
        result = FastInterlockIncrement (&m_GcStructuresInvalidCnt);
        _ASSERTE (result > 0);
    }
    else
    {
        LONG result;
        result = FastInterlockDecrement (&m_GcStructuresInvalidCnt);
        _ASSERTE (result >= 0);
    }
}
Пример #3
0
VOID FinalizerThread::FinalizerThreadWorker(void *args)
{
    // TODO: The following line should be removed after contract violation is fixed.
    // See bug 27409
    SCAN_IGNORE_THROW;
    SCAN_IGNORE_TRIGGER;

    // This is used to stitch together the exception handling at the base of our thread with
    // any eventual transitions into different AppDomains for finalization.
    _ASSERTE(args != NULL);
    pThreadTurnAround = (ManagedThreadCallState *) args;

    BOOL bPriorityBoosted = FALSE;

    while (!fQuitFinalizer)
    {
        // Wait for work to do...

        _ASSERTE(GetFinalizerThread()->PreemptiveGCDisabled());
#ifdef _DEBUG
        if (g_pConfig->FastGCStressLevel())
        {
            GetFinalizerThread()->m_GCOnTransitionsOK = FALSE;
        }
#endif
        GetFinalizerThread()->EnablePreemptiveGC();
#ifdef _DEBUG
        if (g_pConfig->FastGCStressLevel())
        {
            GetFinalizerThread()->m_GCOnTransitionsOK = TRUE;
        }
#endif
#if 0
        // Setting the event here, instead of at the bottom of the loop, could
        // cause us to skip draining the Q, if the request is made as soon as
        // the app starts running.
        SignalFinalizationDone(TRUE);
#endif //0

        WaitForFinalizerEvent (hEventFinalizer);

#if defined(__linux__) && defined(FEATURE_EVENT_TRACE)
        if (g_TriggerHeapDump && (CLRGetTickCount64() > (LastHeapDumpTime + LINUX_HEAP_DUMP_TIME_OUT)))
        {
            s_forcedGCInProgress = true;
            GetFinalizerThread()->DisablePreemptiveGC();
            GCHeapUtilities::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking);
            GetFinalizerThread()->EnablePreemptiveGC();
            s_forcedGCInProgress = false;
            
            LastHeapDumpTime = CLRGetTickCount64();
            g_TriggerHeapDump = FALSE;
        }
#endif

        if (!bPriorityBoosted)
        {
            if (GetFinalizerThread()->SetThreadPriority(THREAD_PRIORITY_HIGHEST))
                bPriorityBoosted = TRUE;
        }

        GetFinalizerThread()->DisablePreemptiveGC();

        // TODO: The following call causes 12 more classes loaded.
        //if (!fNameSet) {
        //    fNameSet = TRUE;
        //    GetFinalizerThread()->SetName(L"FinalizerThread");
        //}

#ifdef _DEBUG
        // <TODO> workaround.  make finalization very lazy for gcstress 3 or 4.  
        // only do finalization if the system is quiescent</TODO>
        if (g_pConfig->GetGCStressLevel() > 1)
        {
            size_t last_gc_count;
            DWORD dwSwitchCount = 0;

            do
            {
                last_gc_count = GCHeapUtilities::GetGCHeap()->CollectionCount(0);
                GetFinalizerThread()->m_GCOnTransitionsOK = FALSE; 
                GetFinalizerThread()->EnablePreemptiveGC();
                __SwitchToThread (0, ++dwSwitchCount);
                GetFinalizerThread()->DisablePreemptiveGC();             
                // If no GCs happended, then we assume we are quiescent
                GetFinalizerThread()->m_GCOnTransitionsOK = TRUE; 
            } while (GCHeapUtilities::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
        }
#endif //_DEBUG

        // we might want to do some extra work on the finalizer thread
        // check and do it
        if (GetFinalizerThread()->HaveExtraWorkForFinalizer())
        {
            GetFinalizerThread()->DoExtraWorkForFinalizer();
        }
        LOG((LF_GC, LL_INFO100, "***** Calling Finalizers\n"));
        // We may mark the finalizer thread for abort.  If so the abort request is for previous finalizer method, not for next one.
        if (GetFinalizerThread()->IsAbortRequested())
        {
            GetFinalizerThread()->EEResetAbort(Thread::TAR_ALL);
        }
        FastInterlockExchange ((LONG*)&g_FinalizerIsRunning, TRUE);
        AppDomain::EnableADUnloadWorkerForFinalizer();

        do
        {
            FinalizeAllObjects(NULL, 0);
            _ASSERTE(GetFinalizerThread()->GetDomain()->IsDefaultDomain());

            if (AppDomain::HasWorkForFinalizerThread())
            {
                AppDomain::ProcessUnloadDomainEventOnFinalizeThread();                
            }
            else if (UnloadingAppDomain == NULL)
                break;
            else if (!GCHeapUtilities::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
            {
                break;
            }
            // Now schedule any objects from an unloading app domain for finalization 
            // on the next pass (even if they are reachable.)
            // Note that it may take several passes to complete the unload, if new objects are created during
            // finalization.
        }
        while(TRUE);

        if (UnloadingAppDomain != NULL)
        {
            SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocksInAppDomain(UnloadingAppDomain);
            {
                // Before we continue with AD unloading, mark the stage as
                // FINALIZED under the SystemDomain lock so that this portion
                // of unloading may be serialized with other parts of the CLR
                // that require the AD stage to be < FINALIZED, in particular
                // ETW's AD enumeration code used during its rundown events.
                SystemDomain::LockHolder lh;
                UnloadingAppDomain->SetFinalized(); // All finalizers have run except for FinalizableAndAgile objects
            }
            UnloadingAppDomain = NULL;
        }

        FastInterlockExchange ((LONG*)&g_FinalizerIsRunning, FALSE);
        // We may still have the finalizer thread for abort.  If so the abort request is for previous finalizer method, not for next one.
        if (GetFinalizerThread()->IsAbortRequested())
        {
            GetFinalizerThread()->EEResetAbort(Thread::TAR_ALL);
        }

        // Increment the loop count. This is currently used by the AddMemoryPressure heuristic to see
        // if finalizers have run since the last time it triggered GC.
        FastInterlockIncrement((LONG *)&g_FinalizerLoopCount);

        // Anyone waiting to drain the Q can now wake up.  Note that there is a
        // race in that another thread starting a drain, as we leave a drain, may
        // consider itself satisfied by the drain that just completed.  This is
        // acceptable.
        SignalFinalizationDone(TRUE);
    }
}
Пример #4
0
ULONG CLRTestHookManager::AddRef()
{
    return FastInterlockIncrement(&m_cRef);
}