Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; STATIC_CONTRACT_MODE_COOPERATIVE; FireEtwGCFinalizersBegin_V1(GetClrInstanceId()); unsigned int fcount = 0; bool fTerminate = false; if (fobj == NULL) { if (AppDomain::HasWorkForFinalizerThread()) { return NULL; } fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable(); } Thread *pThread = GetThread(); #ifdef FEATURE_PROFAPI_ATTACH_DETACH ULONGLONG ui64TimestampLastCheckedProfAttachEventMs = 0; #endif //FEATURE_PROFAPI_ATTACH_DETACH // Finalize everyone while (fobj) { #ifdef FEATURE_PROFAPI_ATTACH_DETACH // Don't let an overloaded finalizer queue starve out // an attaching profiler. In between running finalizers, // check the profiler attach event without blocking. ProcessProfilerAttachIfNecessary(&ui64TimestampLastCheckedProfAttachEventMs); #endif // FEATURE_PROFAPI_ATTACH_DETACH if (fobj->GetHeader()->GetBits() & bitToCheck) { if (AppDomain::HasWorkForFinalizerThread()) { return NULL; } fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable(); } else { fcount++; fobj = DoOneFinalization(fobj, pThread, bitToCheck,&fTerminate); if (fTerminate) { break; } if (fobj == NULL) { if (AppDomain::HasWorkForFinalizerThread()) { return NULL; } fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable(); } } } FireEtwGCFinalizersEnd_V1(fcount, GetClrInstanceId()); return fobj; }
// //The function is called from managed code to quicly check if a packet is available. //This is a perf-critical function. Even helper method frames are not created. We fall //back to the VM to do heavy weight operations like creating a new CP thread. // FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, DWORD* numBytes) { FCALL_CONTRACT; #ifndef FEATURE_PAL Thread *pThread = GetThread(); size_t key=0; _ASSERTE(pThread); //Poll and wait if GC is in progress, to avoid blocking GC for too long. FC_GC_POLL(); *lpOverlapped = ThreadpoolMgr::CompletionPortDispatchWorkWithinAppDomain(pThread, errorCode, numBytes, &key, DefaultADID); if(*lpOverlapped == NULL) { return; } OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(*lpOverlapped)); if (overlapped->m_callback == NULL) { //We're not initialized yet, go back to the Vm, and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; return; } else { if(!pThread->IsRealThreadPoolResetNeeded()) { pThread->ResetManagedThreadObjectInCoopMode(ThreadNative::PRIORITY_NORMAL); pThread->InternalReset(FALSE, TRUE, FALSE, FALSE); if(ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadpoolMgr::CPThreadCounter.DangerousGetDirtyCounts())) { //We may have to create a CP thread, go back to the Vm, and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; } } else { //A more complete reset is needed (due to change in priority etc), go back to the VM, //and process the packet there. ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped); *lpOverlapped = NULL; } } // if this will be "dispatched" to the managed callback fire the IODequeue event: if (*lpOverlapped != NULL && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIODequeue(*lpOverlapped, OverlappedDataObject::GetOverlapped(*lpOverlapped), GetClrInstanceId()); #else // !FEATURE_PAL *lpOverlapped = NULL; #endif // !FEATURE_PAL return; }
FCIMPLEND FCIMPL1(LPOVERLAPPED, AllocateNativeOverlapped, OverlappedDataObject* overlappedUNSAFE) { FCALL_CONTRACT; LPOVERLAPPED lpOverlapped; OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(overlappedUNSAFE); OBJECTREF userObject = overlapped->m_userObject; HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, overlapped, userObject); if (g_pOverlappedDataClass == NULL) { g_pOverlappedDataClass = MscorlibBinder::GetClass(CLASS__OVERLAPPEDDATA); // We have optimization to avoid creating event if IO is in default domain. This depends on default domain // can not be unloaded. _ASSERTE(SystemDomain::System()->DefaultDomain()->GetId().m_dwId == DefaultADID); } CONSISTENCY_CHECK(overlapped->GetMethodTable() == g_pOverlappedDataClass); if (userObject != NULL) { if (userObject->GetMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable()) { BASEARRAYREF asArray = (BASEARRAYREF) userObject; OBJECTREF *pObj = (OBJECTREF*)(asArray->GetDataPtr()); SIZE_T num = asArray->GetNumComponents(); SIZE_T i; for (i = 0; i < num; i ++) { ValidatePinnedObject(pObj[i]); } } else { ValidatePinnedObject(userObject); } } NewHolder<NATIVEOVERLAPPED_AND_HANDLE> overlappedHolder(new NATIVEOVERLAPPED_AND_HANDLE()); overlappedHolder->m_handle = GetAppDomain()->CreateTypedHandle(overlapped, HNDTYPE_ASYNCPINNED); lpOverlapped = &(overlappedHolder.Extract()->m_overlapped); lpOverlapped->Internal = 0; lpOverlapped->InternalHigh = 0; lpOverlapped->Offset = overlapped->m_offsetLow; lpOverlapped->OffsetHigh = overlapped->m_offsetHigh; lpOverlapped->hEvent = (HANDLE)overlapped->m_eventHandle; overlapped->m_pNativeOverlapped = lpOverlapped; HELPER_METHOD_FRAME_END(); LOG((LF_INTEROP, LL_INFO10000, "In AllocNativeOperlapped thread 0x%x\n", GetThread())); if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIOPack(lpOverlapped, overlappedUNSAFE, GetClrInstanceId()); return lpOverlapped; }
void MulticoreJitFireEtw(const wchar_t * pAction, const wchar_t * pTarget, int p1, int p2, int p3) { LIMITED_METHOD_CONTRACT FireEtwMulticoreJit(GetClrInstanceId(), pAction, pTarget, p1, p2, p3); }