void ILStubCache::Init(LoaderHeap* pHeap) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(NULL == m_heap); m_heap = pHeap; }
FCIMPLEND FCIMPL1(OverlappedDataObject*, GetOverlappedFromNative, LPOVERLAPPED lpOverlapped) { FCALL_CONTRACT; CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass)); return OverlappedDataObject::GetOverlapped(lpOverlapped); }
FCIMPLEND FCIMPL1(void, FreeNativeOverlapped, LPOVERLAPPED lpOverlapped) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass)); DestroyAsyncPinningHandle(((NATIVEOVERLAPPED_AND_HANDLE*)lpOverlapped)->m_handle); delete lpOverlapped; HELPER_METHOD_FRAME_END(); }
// The runtime needs to know whether we're using workstation or server GC // long before the GCHeap is created. void InitializeHeapType(bool bServerHeap) { LIMITED_METHOD_CONTRACT; #ifdef FEATURE_SVR_GC IGCHeap::gcHeapType = bServerHeap ? IGCHeap::GC_HEAP_SVR : IGCHeap::GC_HEAP_WKS; #ifdef WRITE_BARRIER_CHECK if (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR) { g_GCShadow = 0; g_GCShadowEnd = 0; } #endif // WRITE_BARRIER_CHECK #else // FEATURE_SVR_GC UNREFERENCED_PARAMETER(bServerHeap); CONSISTENCY_CHECK(bServerHeap == false); #endif // FEATURE_SVR_GC }
// we have this so that the check of the global can be inlined // and we don't make the call to CheckMarker unless we need to. void BaseStackMarker::CheckForBackoutViolation() { WRAPPER_CONTRACT; STATIC_CONTRACT_DEBUG_ONLY; // The marker should always be re-enabled at this point. CONSISTENCY_CHECK_MSG(!m_fTemporarilyDisabled, "The stack guard was disabled but not properly re-enabled. This is a bug somewhere in the code called after this marker has been set up."); if (!m_pDebugState || m_fTemporarilyDisabled) { return; } // Reset the SO tolerance of the thread. m_pDebugState->SetSOTolerance(m_prevWasSOTolerant); if (m_fAddedToStack) { // Pop ourselves off of the stack of stack markers on the CLR debug state. CONSISTENCY_CHECK(m_pDebugState != NULL); BaseStackMarker *pPopResult = m_pDebugState->m_StackMarkerStack.PopStackMarker(); CONSISTENCY_CHECK_MSG(pPopResult == this, "The marker we pop off the stack should always be the current marker."); CONSISTENCY_CHECK_MSG(m_pPrevious == NULL, "PopStackMarker should reset the current marker's m_pPrevious field to NULL."); } // Not cancellable markers should only be checked when no cancellable markers are present. if (!m_fAllowDisabling && !(m_pDebugState->m_StackMarkerStack.IsEmpty())) { return; } if (m_fProtectedStackPage) { UndoPageProtectionInDebugger(); } if (m_fMarkerSet) { // Check to see if we overwrote the stack guard marker. CheckMarker(); } }
FCIMPLEND FCIMPL1(LPOVERLAPPED, AllocateNativeOverlapped, OverlappedDataObject* overlappedUNSAFE) { FCALL_CONTRACT; LPOVERLAPPED lpOverlapped; OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(overlappedUNSAFE); OBJECTREF userObject = overlapped->m_userObject; HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, overlapped, userObject); if (g_pOverlappedDataClass == NULL) { g_pOverlappedDataClass = MscorlibBinder::GetClass(CLASS__OVERLAPPEDDATA); // We have optimization to avoid creating event if IO is in default domain. This depends on default domain // can not be unloaded. _ASSERTE(SystemDomain::System()->DefaultDomain()->GetId().m_dwId == DefaultADID); } CONSISTENCY_CHECK(overlapped->GetMethodTable() == g_pOverlappedDataClass); if (userObject != NULL) { if (userObject->GetMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable()) { BASEARRAYREF asArray = (BASEARRAYREF) userObject; OBJECTREF *pObj = (OBJECTREF*)(asArray->GetDataPtr()); SIZE_T num = asArray->GetNumComponents(); SIZE_T i; for (i = 0; i < num; i ++) { ValidatePinnedObject(pObj[i]); } } else { ValidatePinnedObject(userObject); } } NewHolder<NATIVEOVERLAPPED_AND_HANDLE> overlappedHolder(new NATIVEOVERLAPPED_AND_HANDLE()); overlappedHolder->m_handle = GetAppDomain()->CreateTypedHandle(overlapped, HNDTYPE_ASYNCPINNED); lpOverlapped = &(overlappedHolder.Extract()->m_overlapped); lpOverlapped->Internal = 0; lpOverlapped->InternalHigh = 0; lpOverlapped->Offset = overlapped->m_offsetLow; lpOverlapped->OffsetHigh = overlapped->m_offsetHigh; lpOverlapped->hEvent = (HANDLE)overlapped->m_eventHandle; overlapped->m_pNativeOverlapped = lpOverlapped; HELPER_METHOD_FRAME_END(); LOG((LF_INTEROP, LL_INFO10000, "In AllocNativeOperlapped thread 0x%x\n", GetThread())); if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIOPack(lpOverlapped, overlappedUNSAFE, GetClrInstanceId()); return lpOverlapped; }
// -------------------------------------------------------------------------------------- // // Stores hot data reported by IBC in profile data (code:CorProfileData) to a stream. // Aligns output stream to 4-bytes. // __checkReturn HRESULT HotHeapWriter::SaveToStream( IStream *pStream, CorProfileData *pProfileData, UINT32 *pnSavedSize) const { _ASSERTE(pStream != NULL); _ASSERTE(pProfileData != NULL); _ASSERTE(pnSavedSize != NULL); #ifdef FEATURE_PREJIT HRESULT hr = S_OK; UINT32 nOffset = 0; UINT32 nValueHeapStart_PositiveOffset; UINT32 nValueOffsetTableStart_PositiveOffset; UINT32 nIndexTableStart_PositiveOffset; // data // // number of hot tokens UINT32 nHotItemsCount = pProfileData->GetHotTokens( GetTableIndex(), 1 << ProfilingFlags_MetaData, 1 << ProfilingFlags_MetaData, NULL, 0); CONSISTENCY_CHECK(nHotItemsCount != 0); NewArrayHolder<UINT32> hotItemArr = new (nothrow) UINT32[nHotItemsCount]; IfNullRet(hotItemArr); // get hot tokens static_assert_no_msg(sizeof(UINT32) == sizeof(mdToken)); pProfileData->GetHotTokens( GetTableIndex(), 1 << ProfilingFlags_MetaData, 1 << ProfilingFlags_MetaData, reinterpret_cast<mdToken *>(&hotItemArr[0]), nHotItemsCount); // convert tokens to rids for (UINT32 i = 0; i < nHotItemsCount; i++) { hotItemArr[i] = RidFromToken(hotItemArr[i]); } NewArrayHolder<RidOffsetPair> offsetMapping = new (nothrow) RidOffsetPair[nHotItemsCount]; IfNullRet(offsetMapping); // write data nValueHeapStart_PositiveOffset = nOffset; // note that we write hot items in the order they appear in pProfileData->GetHotTokens // this is so that we preserve the ordering optimizations done by IbcMerge for (UINT32 i = 0; i < nHotItemsCount; i++) { DataBlob data; IfFailRet(GetData( hotItemArr[i], &data)); // keep track of the offset at which each hot item is written offsetMapping[i].rid = hotItemArr[i]; offsetMapping[i].offset = nOffset; IfFailRet(StreamUtil::WriteToStream( pStream, data.GetDataPointer(), data.GetSize(), &nOffset)); } IfFailRet(StreamUtil::AlignDWORD(pStream, &nOffset)); // sort by rid so that a hot rid can be looked up by binary search qsort(offsetMapping, nHotItemsCount, sizeof(RidOffsetPair), RidOffsetPair::Compare); // initialize table of offsets to data NewArrayHolder<UINT32> dataIndices = new (nothrow) UINT32[nHotItemsCount]; IfNullRet(dataIndices); // fill in the hotItemArr (now sorted by rid) and dataIndices array with each offset for (UINT32 i = 0; i < nHotItemsCount; i++) { hotItemArr[i] = offsetMapping[i].rid; dataIndices[i] = offsetMapping[i].offset; } // table of offsets to data // nValueOffsetTableStart_PositiveOffset = nOffset; IfFailRet(StreamUtil::WriteToStream(pStream, &dataIndices[0], sizeof(UINT32) * nHotItemsCount, &nOffset)); // rid table (sorted) // nIndexTableStart_PositiveOffset = nOffset; IfFailRet(StreamUtil::WriteToStream(pStream, &hotItemArr[0], nHotItemsCount * sizeof(UINT32), &nOffset)); IfFailRet(StreamUtil::AlignDWORD(pStream, &nOffset)); { // hot pool header struct HotHeapHeader header; // fix offsets header.m_nIndexTableStart_NegativeOffset = nOffset - nIndexTableStart_PositiveOffset; header.m_nValueOffsetTableStart_NegativeOffset = nOffset - nValueOffsetTableStart_PositiveOffset; header.m_nValueHeapStart_NegativeOffset = nOffset - nValueHeapStart_PositiveOffset; // write header IfFailRet(StreamUtil::WriteToStream(pStream, &header, sizeof(header), &nOffset)); } *pnSavedSize = nOffset; #endif //FEATURE_PREJIT return S_OK; } // HotHeapWriter::PersistHotToStream