Esempio n. 1
0
// static 
void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
{
    //
    // step 1: ThreadStore::InitCurrentThread
    // step 2: add this thread to the ThreadStore
    //

    // The thread has been constructed, during which some data is initialized (like which RuntimeInstance the
    // thread belongs to), but it hasn't been added to the thread store because doing so takes a lock, which 
    // we want to avoid at construction time because the loader lock is held then.
    Thread * pAttachingThread = RawGetCurrentThread();

    // On CHK build, validate that our GetThread assembly implementation matches the C++ implementation using
    // TLS.
    CreateCurrentThreadBuffer();

    ASSERT(_fls_index != FLS_OUT_OF_INDEXES);
    Thread* pThreadFromCurrentFiber = (Thread*)PalFlsGetValue(_fls_index);

    if (pAttachingThread->IsInitialized())
    {
        if (pThreadFromCurrentFiber != pAttachingThread)
        {
            ASSERT_UNCONDITIONALLY("Multiple fibers encountered on a single thread");
            RhFailFast();
        }

        return;
    }

    if (pThreadFromCurrentFiber != NULL)
    {
        ASSERT_UNCONDITIONALLY("Multiple threads encountered from a single fiber");
        RhFailFast();
    }

    //
    // Init the thread buffer
    //
    pAttachingThread->Construct();
    ASSERT(pAttachingThread->m_ThreadStateFlags == Thread::TSF_Unknown);

    ThreadStore* pTS = GetThreadStore();
    ReaderWriterLock::WriteHolder write(&pTS->m_Lock, fAcquireThreadStoreLock);

    //
    // Set thread state to be attached
    //
    ASSERT(pAttachingThread->m_ThreadStateFlags == Thread::TSF_Unknown);
    pAttachingThread->m_ThreadStateFlags = Thread::TSF_Attached;

    pTS->m_ThreadList.PushHead(pAttachingThread);

    //
    // Associate the current fiber with the current thread.  This makes the current fiber the thread's "home"
    // fiber.  This fiber is the only fiber allowed to execute managed code on this thread.  When this fiber
    // is destroyed, we consider the thread to be destroyed.
    //
    PalFlsSetValue(_fls_index, pAttachingThread);
}
Esempio n. 2
0
void CheckForPalFallback()
{
#ifdef _DEBUG
    UInt32 disallowSetting = g_pRhConfig->GetDisallowRuntimeServicesFallback();
    if (disallowSetting == 0)
        return;

    // The fallback provider doesn't implement write watch, so we check for the write watch capability as a 
    // proxy for whether or not we're using the fallback provider since we don't have direct access to this 
    // information from here.

    if (disallowSetting == 1)
    {
        // If RH_DisallowRuntimeServicesFallback is set to 1, we want to fail fast if we discover that we're 
        // running against the fallback provider.  
        if (!PalHasCapability(WriteWatchCapability))
            RhFailFast();
    }
    else if (disallowSetting == 2)
    {
        // If RH_DisallowRuntimeServicesFallback is set to 2, we want to fail fast if we discover that we're 
        // NOT running against the fallback provider.  
        if (PalHasCapability(WriteWatchCapability))
            RhFailFast();
    }
#endif // _DEBUG
}
Esempio n. 3
0
void ThreadStore::DetachCurrentThreadIfHomeFiber()
{
    //
    // Note: we call this when each *fiber* is destroyed, because we receive that notification outside
    // of the Loader Lock.  This allows us to safely acquire the ThreadStore lock.  However, we have to be
    // extra careful to avoid cleaning up a thread unless the fiber being destroyed is the thread's "home"
    // fiber, as recorded in AttachCurrentThread.
    //

    // The thread may not have been initialized because it may never have run managed code before.
    Thread * pDetachingThread = RawGetCurrentThread();

    ASSERT(_fls_index != FLS_OUT_OF_INDEXES);
    Thread* pThreadFromCurrentFiber = (Thread*)PalFlsGetValue(_fls_index);

    if (!pDetachingThread->IsInitialized())
    {
        if (pThreadFromCurrentFiber != NULL)
        {
            ASSERT_UNCONDITIONALLY("Detaching a fiber from an unknown thread");
            RhFailFast();
        }
        return;
    }

    if (pThreadFromCurrentFiber == NULL)
    {
        // we've seen this thread, but not this fiber.  It must be a "foreign" fiber that was 
        // borrowing this thread.
        return;
    }

    if (pThreadFromCurrentFiber != pDetachingThread)
    {
        ASSERT_UNCONDITIONALLY("Detaching a thread from the wrong fiber");
        RhFailFast();
    }

#ifdef STRESS_LOG
    ThreadStressLog * ptsl = reinterpret_cast<ThreadStressLog *>(
        pDetachingThread->GetThreadStressLog());
    StressLog::ThreadDetach(ptsl);
#endif // STRESS_LOG

    ThreadStore* pTS = GetThreadStore();
    ReaderWriterLock::WriteHolder write(&pTS->m_Lock);
    ASSERT(rh::std::count(pTS->m_ThreadList.Begin(), pTS->m_ThreadList.End(), pDetachingThread) == 1);
    pTS->m_ThreadList.RemoveFirst(pDetachingThread);
    pDetachingThread->Destroy();
}
// Unregister a previously registered callout. Removes the first registration that matches on both callout
// address and filter type. Causes a fail fast if the registration doesn't exist.
void RestrictedCallouts::UnregisterRefCountedHandleCallback(void * pCalloutMethod, EEType * pTypeFilter)
{
    CrstHolder lh(&s_sLock);

    HandleTableRestrictedCallout * pCurrCallout = s_pHandleTableRestrictedCallouts;
    HandleTableRestrictedCallout * pPrevCallout = NULL;

    while (pCurrCallout)
    {
        if ((pCurrCallout->m_pCalloutMethod == pCalloutMethod) &&
            (pCurrCallout->m_pTypeFilter == pTypeFilter))
        {
            // Found a matching entry, remove it from the chain.
            if (pPrevCallout)
                pPrevCallout->m_pNext = pCurrCallout->m_pNext;
            else
                s_pHandleTableRestrictedCallouts = pCurrCallout->m_pNext;

            delete pCurrCallout;

            return;
        }

        pPrevCallout = pCurrCallout;
        pCurrCallout = pCurrCallout->m_pNext;
    }

    // If we get here we didn't find a matching registration, indicating a bug on the part of the caller.
    ASSERT_UNCONDITIONALLY("Attempted to unregister restricted callout that wasn't registered.");
    RhFailFast();
}
Esempio n. 5
0
void DllThreadDetach()
{
    // BEWARE: loader lock is held here!

    // Should have already received a call to FiberDetach for this thread's "home" fiber.
    Thread* pCurrentThread = ThreadStore::GetCurrentThreadIfAvailable();
    if (pCurrentThread != NULL && !pCurrentThread->IsDetached())
    {
        ASSERT_UNCONDITIONALLY("Detaching thread whose home fiber has not been detached");
        RhFailFast();
    }
}
// Unregister a previously registered callout. Removes the first registration that matches on both callout
// kind and address. Causes a fail fast if the registration doesn't exist.
void RestrictedCallouts::UnregisterGcCallout(GcRestrictedCalloutKind eKind, void * pCalloutMethod)
{
    // Validate callout kind.
    if (eKind >= GCRC_Count)
    {
        ASSERT_UNCONDITIONALLY("Invalid GC restricted callout kind.");
        RhFailFast();
    }

    CrstHolder lh(&s_sLock);

    GcRestrictedCallout * pCurrCallout = s_rgGcRestrictedCallouts[eKind];
    GcRestrictedCallout * pPrevCallout = NULL;

    while (pCurrCallout)
    {
        if (pCurrCallout->m_pCalloutMethod == pCalloutMethod)
        {
            // Found a matching entry, remove it from the chain.
            if (pPrevCallout)
                pPrevCallout->m_pNext = pCurrCallout->m_pNext;
            else
                s_rgGcRestrictedCallouts[eKind] = pCurrCallout->m_pNext;

            delete pCurrCallout;

            return;
        }

        pPrevCallout = pCurrCallout;
        pCurrCallout = pCurrCallout->m_pNext;
    }

    // If we get here we didn't find a matching registration, indicating a bug on the part of the caller.
    ASSERT_UNCONDITIONALLY("Attempted to unregister restricted callout that wasn't registered.");
    RhFailFast();
}
Esempio n. 7
0
void DllThreadDetach()
{
    // BEWARE: loader lock is held here!

    // Should have already received a call to FiberDetach for this thread's "home" fiber.
    Thread* pCurrentThread = ThreadStore::GetCurrentThreadIfAvailable();
    if (pCurrentThread != NULL && !pCurrentThread->IsDetached())
    {
        // Once shutdown starts, RuntimeThreadShutdown callbacks are ignored, implying that
        // it is no longer guaranteed that exiting threads will be detached.
        if (!g_processShutdownHasStarted)
        {
            ASSERT_UNCONDITIONALLY("Detaching thread whose home fiber has not been detached");
            RhFailFast();
        }
    }
}
// Register callback of the given type to the method with the given address. The most recently registered
// callbacks are called first. Returns true on success, false if insufficient memory was available for the
// registration.
bool RestrictedCallouts::RegisterGcCallout(GcRestrictedCalloutKind eKind, void * pCalloutMethod)
{
    // Validate callout kind.
    if (eKind >= GCRC_Count)
    {
        ASSERT_UNCONDITIONALLY("Invalid GC restricted callout kind.");
        RhFailFast();
    }

    GcRestrictedCallout * pCallout = new (nothrow) GcRestrictedCallout();
    if (pCallout == NULL)
        return false;

    pCallout->m_pCalloutMethod = pCalloutMethod;

    CrstHolder lh(&s_sLock);

    // Link new callout to head of the chain according to its type.
    pCallout->m_pNext = s_rgGcRestrictedCallouts[eKind];
    s_rgGcRestrictedCallouts[eKind] = pCallout;

    return true;
}
Esempio n. 9
0
void DetectCPUFeatures()
{
#if !defined(CORERT) // @TODO: CORERT: DetectCPUFeatures

#ifdef _X86_
    // We depend on fxsave / fxrstor.  These were added to Pentium II and later, so they're pretty well guaranteed to be
    // available, but we double-check anyway and fail fast if they are not supported.
    CPU_INFO cpuInfo;
    PalCpuIdEx(1, 0, &cpuInfo);
    if (!(cpuInfo.Edx & X86_FXSR))  
        RhFailFast();
#endif

#ifdef _AMD64_
    // AMD has a "fast" mode for fxsave/fxrstor, which omits the saving of xmm registers.  The OS will enable this mode
    // if it is supported.  So if we continue to use fxsave/fxrstor, we must manually save/restore the xmm registers.
    CPU_INFO cpuInfo;
    PalCpuIdEx(0x80000001, 0, &cpuInfo);
    if (cpuInfo.Edx & AMD_FFXSR)
        g_fHasFastFxsave = true;
#endif

#endif // !CORERT
}
Esempio n. 10
0
void ThreadStore::WaitForSuspendComplete()
{
    UInt32 waitResult = m_SuspendCompleteEvent.Wait(INFINITE, false);
    if (waitResult == WAIT_FAILED)
        RhFailFast();
}