Exemplo n.º 1
0
// static 
void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
{
    //
    // step 1: ThreadStore::InitCurrentThread
    // step 2: add this thread to the ThreadStore
    //

    // The thread has been constructed, during which some data is initialized (like which RuntimeInstance the
    // thread belongs to), but it hasn't been added to the thread store because doing so takes a lock, which 
    // we want to avoid at construction time because the loader lock is held then.
    Thread * pAttachingThread = RawGetCurrentThread();

    // On CHK build, validate that our GetThread assembly implementation matches the C++ implementation using
    // TLS.
    CreateCurrentThreadBuffer();

    ASSERT(_fls_index != FLS_OUT_OF_INDEXES);
    Thread* pThreadFromCurrentFiber = (Thread*)PalFlsGetValue(_fls_index);

    if (pAttachingThread->IsInitialized())
    {
        if (pThreadFromCurrentFiber != pAttachingThread)
        {
            ASSERT_UNCONDITIONALLY("Multiple fibers encountered on a single thread");
            RhFailFast();
        }

        return;
    }

    if (pThreadFromCurrentFiber != NULL)
    {
        ASSERT_UNCONDITIONALLY("Multiple threads encountered from a single fiber");
        RhFailFast();
    }

    //
    // Init the thread buffer
    //
    pAttachingThread->Construct();
    ASSERT(pAttachingThread->m_ThreadStateFlags == Thread::TSF_Unknown);

    ThreadStore* pTS = GetThreadStore();
    ReaderWriterLock::WriteHolder write(&pTS->m_Lock, fAcquireThreadStoreLock);

    //
    // Set thread state to be attached
    //
    ASSERT(pAttachingThread->m_ThreadStateFlags == Thread::TSF_Unknown);
    pAttachingThread->m_ThreadStateFlags = Thread::TSF_Attached;

    pTS->m_ThreadList.PushHead(pAttachingThread);

    //
    // Associate the current fiber with the current thread.  This makes the current fiber the thread's "home"
    // fiber.  This fiber is the only fiber allowed to execute managed code on this thread.  When this fiber
    // is destroyed, we consider the thread to be destroyed.
    //
    PalFlsSetValue(_fls_index, pAttachingThread);
}
Exemplo n.º 2
0
void ThreadStore::DetachCurrentThreadIfHomeFiber()
{
    //
    // Note: we call this when each *fiber* is destroyed, because we receive that notification outside
    // of the Loader Lock.  This allows us to safely acquire the ThreadStore lock.  However, we have to be
    // extra careful to avoid cleaning up a thread unless the fiber being destroyed is the thread's "home"
    // fiber, as recorded in AttachCurrentThread.
    //

    // The thread may not have been initialized because it may never have run managed code before.
    Thread * pDetachingThread = RawGetCurrentThread();

    ASSERT(_fls_index != FLS_OUT_OF_INDEXES);
    Thread* pThreadFromCurrentFiber = (Thread*)PalFlsGetValue(_fls_index);

    if (!pDetachingThread->IsInitialized())
    {
        if (pThreadFromCurrentFiber != NULL)
        {
            ASSERT_UNCONDITIONALLY("Detaching a fiber from an unknown thread");
            RhFailFast();
        }
        return;
    }

    if (pThreadFromCurrentFiber == NULL)
    {
        // we've seen this thread, but not this fiber.  It must be a "foreign" fiber that was 
        // borrowing this thread.
        return;
    }

    if (pThreadFromCurrentFiber != pDetachingThread)
    {
        ASSERT_UNCONDITIONALLY("Detaching a thread from the wrong fiber");
        RhFailFast();
    }

#ifdef STRESS_LOG
    ThreadStressLog * ptsl = reinterpret_cast<ThreadStressLog *>(
        pDetachingThread->GetThreadStressLog());
    StressLog::ThreadDetach(ptsl);
#endif // STRESS_LOG

    ThreadStore* pTS = GetThreadStore();
    ReaderWriterLock::WriteHolder write(&pTS->m_Lock);
    ASSERT(rh::std::count(pTS->m_ThreadList.Begin(), pTS->m_ThreadList.End(), pDetachingThread) == 1);
    pTS->m_ThreadList.RemoveFirst(pDetachingThread);
    pDetachingThread->Destroy();
}
Exemplo n.º 3
0
// Unregister a previously registered callout. Removes the first registration that matches on both callout
// address and filter type. Causes a fail fast if the registration doesn't exist.
void RestrictedCallouts::UnregisterRefCountedHandleCallback(void * pCalloutMethod, EEType * pTypeFilter)
{
    CrstHolder lh(&s_sLock);

    HandleTableRestrictedCallout * pCurrCallout = s_pHandleTableRestrictedCallouts;
    HandleTableRestrictedCallout * pPrevCallout = NULL;

    while (pCurrCallout)
    {
        if ((pCurrCallout->m_pCalloutMethod == pCalloutMethod) &&
            (pCurrCallout->m_pTypeFilter == pTypeFilter))
        {
            // Found a matching entry, remove it from the chain.
            if (pPrevCallout)
                pPrevCallout->m_pNext = pCurrCallout->m_pNext;
            else
                s_pHandleTableRestrictedCallouts = pCurrCallout->m_pNext;

            delete pCurrCallout;

            return;
        }

        pPrevCallout = pCurrCallout;
        pCurrCallout = pCurrCallout->m_pNext;
    }

    // If we get here we didn't find a matching registration, indicating a bug on the part of the caller.
    ASSERT_UNCONDITIONALLY("Attempted to unregister restricted callout that wasn't registered.");
    RhFailFast();
}
Exemplo n.º 4
0
void DllThreadDetach()
{
    // BEWARE: loader lock is held here!

    // Should have already received a call to FiberDetach for this thread's "home" fiber.
    Thread* pCurrentThread = ThreadStore::GetCurrentThreadIfAvailable();
    if (pCurrentThread != NULL && !pCurrentThread->IsDetached())
    {
        ASSERT_UNCONDITIONALLY("Detaching thread whose home fiber has not been detached");
        RhFailFast();
    }
}
Exemplo n.º 5
0
// Unregister a previously registered callout. Removes the first registration that matches on both callout
// kind and address. Causes a fail fast if the registration doesn't exist.
void RestrictedCallouts::UnregisterGcCallout(GcRestrictedCalloutKind eKind, void * pCalloutMethod)
{
    // Validate callout kind.
    if (eKind >= GCRC_Count)
    {
        ASSERT_UNCONDITIONALLY("Invalid GC restricted callout kind.");
        RhFailFast();
    }

    CrstHolder lh(&s_sLock);

    GcRestrictedCallout * pCurrCallout = s_rgGcRestrictedCallouts[eKind];
    GcRestrictedCallout * pPrevCallout = NULL;

    while (pCurrCallout)
    {
        if (pCurrCallout->m_pCalloutMethod == pCalloutMethod)
        {
            // Found a matching entry, remove it from the chain.
            if (pPrevCallout)
                pPrevCallout->m_pNext = pCurrCallout->m_pNext;
            else
                s_rgGcRestrictedCallouts[eKind] = pCurrCallout->m_pNext;

            delete pCurrCallout;

            return;
        }

        pPrevCallout = pCurrCallout;
        pCurrCallout = pCurrCallout->m_pNext;
    }

    // If we get here we didn't find a matching registration, indicating a bug on the part of the caller.
    ASSERT_UNCONDITIONALLY("Attempted to unregister restricted callout that wasn't registered.");
    RhFailFast();
}
Exemplo n.º 6
0
void DllThreadDetach()
{
    // BEWARE: loader lock is held here!

    // Should have already received a call to FiberDetach for this thread's "home" fiber.
    Thread* pCurrentThread = ThreadStore::GetCurrentThreadIfAvailable();
    if (pCurrentThread != NULL && !pCurrentThread->IsDetached())
    {
        // Once shutdown starts, RuntimeThreadShutdown callbacks are ignored, implying that
        // it is no longer guaranteed that exiting threads will be detached.
        if (!g_processShutdownHasStarted)
        {
            ASSERT_UNCONDITIONALLY("Detaching thread whose home fiber has not been detached");
            RhFailFast();
        }
    }
}
Exemplo n.º 7
0
// Register callback of the given type to the method with the given address. The most recently registered
// callbacks are called first. Returns true on success, false if insufficient memory was available for the
// registration.
bool RestrictedCallouts::RegisterGcCallout(GcRestrictedCalloutKind eKind, void * pCalloutMethod)
{
    // Validate callout kind.
    if (eKind >= GCRC_Count)
    {
        ASSERT_UNCONDITIONALLY("Invalid GC restricted callout kind.");
        RhFailFast();
    }

    GcRestrictedCallout * pCallout = new (nothrow) GcRestrictedCallout();
    if (pCallout == NULL)
        return false;

    pCallout->m_pCalloutMethod = pCalloutMethod;

    CrstHolder lh(&s_sLock);

    // Link new callout to head of the chain according to its type.
    pCallout->m_pNext = s_rgGcRestrictedCallouts[eKind];
    s_rgGcRestrictedCallouts[eKind] = pCallout;

    return true;
}
Exemplo n.º 8
0
// The invoke of a funclet is a bit special and requires an assembly thunk, but we don't want to break the
// stackwalk due to this.  So this routine will unwind through the assembly thunks used to invoke funclets.
// It's also used to disambiguate exceptionally- and non-exceptionally-invoked funclets.
bool StackFrameIterator::HandleFuncletInvokeThunk()
{
#if defined(USE_PORTABLE_HELPERS) // @TODO: Currently no funclet invoke defined in a portable way
    return false;
#else // defined(USE_PORTABLE_HELPERS)

    ASSERT((m_dwFlags & MethodStateCalculated) == 0);

    if (
#ifdef TARGET_X86
        !EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallFunclet2)
#else
        !EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallCatchFunclet2) &&
        !EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallFinallyFunclet2) &&
        !EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallFilterFunclet2)
#endif
        )
    {
        return false;
    }

    PTR_UIntNative SP;

#ifdef TARGET_X86
    // First, unwind RhpCallFunclet
    SP = (PTR_UIntNative)(m_RegDisplay.SP + 0x4);   // skip the saved assembly-routine-EBP
    m_RegDisplay.SetAddrOfIP(SP);
    m_RegDisplay.SetIP(*SP++);
    m_RegDisplay.SetSP((UIntNative)dac_cast<TADDR>(SP));
    m_ControlPC = dac_cast<PTR_VOID>(*(m_RegDisplay.pIP));

    ASSERT(
        EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallCatchFunclet2) ||
        EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallFinallyFunclet2) ||
        EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallFilterFunclet2)
        );
#endif

#ifdef TARGET_AMD64
    // Save the preserved regs portion of the REGDISPLAY across the unwind through the C# EH dispatch code.
    m_funcletPtrs.pRbp = m_RegDisplay.pRbp;
    m_funcletPtrs.pRdi = m_RegDisplay.pRdi;
    m_funcletPtrs.pRsi = m_RegDisplay.pRsi;
    m_funcletPtrs.pRbx = m_RegDisplay.pRbx;
    m_funcletPtrs.pR12 = m_RegDisplay.pR12;
    m_funcletPtrs.pR13 = m_RegDisplay.pR13;
    m_funcletPtrs.pR14 = m_RegDisplay.pR14;
    m_funcletPtrs.pR15 = m_RegDisplay.pR15;

    SP = (PTR_UIntNative)(m_RegDisplay.SP + 0x28);

    m_RegDisplay.pRbp = SP++;
    m_RegDisplay.pRdi = SP++;
    m_RegDisplay.pRsi = SP++;
    m_RegDisplay.pRbx = SP++;
    m_RegDisplay.pR12 = SP++;
    m_RegDisplay.pR13 = SP++;
    m_RegDisplay.pR14 = SP++;
    m_RegDisplay.pR15 = SP++;

    // RhpCallCatchFunclet puts a couple of extra things on the stack that aren't put there by the other two  
    // thunks, but we don't need to know what they are here, so we just skip them.
    if (EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallCatchFunclet2))
        SP += 2;
#elif defined(TARGET_X86)
    // Save the preserved regs portion of the REGDISPLAY across the unwind through the C# EH dispatch code.
    m_funcletPtrs.pRbp = m_RegDisplay.pRbp;
    m_funcletPtrs.pRdi = m_RegDisplay.pRdi;
    m_funcletPtrs.pRsi = m_RegDisplay.pRsi;
    m_funcletPtrs.pRbx = m_RegDisplay.pRbx;

    SP = (PTR_UIntNative)(m_RegDisplay.SP + 0x4);

    m_RegDisplay.pRdi = SP++;
    m_RegDisplay.pRsi = SP++;
    m_RegDisplay.pRbx = SP++;
    m_RegDisplay.pRbp = SP++;

#elif defined(TARGET_ARM)
    // RhpCallCatchFunclet puts a couple of extra things on the stack that aren't put there by the other two
    // thunks, but we don't need to know what they are here, so we just skip them.
    UIntNative uOffsetToR4 = EQUALS_CODE_ADDRESS(m_ControlPC, RhpCallCatchFunclet2) ? 0xC : 0x4;

    // Save the preserved regs portion of the REGDISPLAY across the unwind through the C# EH dispatch code.
    m_funcletPtrs.pR4  = m_RegDisplay.pR4;
    m_funcletPtrs.pR5  = m_RegDisplay.pR5;
    m_funcletPtrs.pR6  = m_RegDisplay.pR6;
    m_funcletPtrs.pR7  = m_RegDisplay.pR7;
    m_funcletPtrs.pR8  = m_RegDisplay.pR8;
    m_funcletPtrs.pR9  = m_RegDisplay.pR9;
    m_funcletPtrs.pR10 = m_RegDisplay.pR10;
    m_funcletPtrs.pR11 = m_RegDisplay.pR11;

    SP = (PTR_UIntNative)(m_RegDisplay.SP + uOffsetToR4);

    m_RegDisplay.pR4  = SP++;
    m_RegDisplay.pR5  = SP++;
    m_RegDisplay.pR6  = SP++;
    m_RegDisplay.pR7  = SP++;
    m_RegDisplay.pR8  = SP++;
    m_RegDisplay.pR9  = SP++;
    m_RegDisplay.pR10 = SP++;
    m_RegDisplay.pR11 = SP++;
#else
    SP = (PTR_UIntNative)(m_RegDisplay.SP);
    ASSERT_UNCONDITIONALLY("NYI for this arch");
#endif
    m_RegDisplay.SetAddrOfIP((PTR_PCODE)SP);
    m_RegDisplay.SetIP(*SP++);
    m_RegDisplay.SetSP((UIntNative)dac_cast<TADDR>(SP));
    m_ControlPC = dac_cast<PTR_VOID>(*(m_RegDisplay.pIP));

    // We expect to be called by the runtime's C# EH implementation, and since this function's notion of how 
    // to unwind through the stub is brittle relative to the stub itself, we want to check as soon as we can.
    ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC) && "unwind from funclet invoke stub failed");

    return true;
#endif // defined(USE_PORTABLE_HELPERS)
}