ThreadStressLog* StressLog::CreateThreadStressLog(Thread * pThread) { if (theLog.facilitiesToLog == 0) return NULL; if (pThread == NULL) pThread = ThreadStore::GetCurrentThread(); ThreadStressLog* msgs = reinterpret_cast<ThreadStressLog*>(pThread->GetThreadStressLog()); if (msgs != NULL) { return msgs; } // if it looks like we won't be allowed to allocate a new chunk, exit early if (VolatileLoad(&theLog.deadCount) == 0 && !AllowNewChunk (0)) { return NULL; } CrstHolder holder(theLog.pLock); msgs = CreateThreadStressLogHelper(pThread); return msgs; }
bool StressLog::AllowNewChunk (long numChunksInCurThread) { _ASSERTE (numChunksInCurThread <= VolatileLoad(&theLog.totalChunk)); UInt32 perThreadLimit = theLog.MaxSizePerThread; if (numChunksInCurThread == 0 /*&& IsSuspendEEThread()*/) return TRUE; if (ThreadStore::GetCurrentThread()->IsGCSpecial()) { perThreadLimit *= GC_STRESSLOG_MULTIPLY; } if ((UInt32)numChunksInCurThread * STRESSLOG_CHUNK_SIZE >= perThreadLimit) { return FALSE; } return (UInt32)VolatileLoad(&theLog.totalChunk) * STRESSLOG_CHUNK_SIZE < theLog.MaxSizeTotal; }
// This function is used to process a _TPM_Init indication. LIB_EXPORT void _TPM_Init( void ) { BOOL restored = FALSE; /* libtpms added */ g_powerWasLost = g_powerWasLost | _plat__WasPowerLost(); #if SIMULATION && !defined NDEBUG /* libtpms changed */ // If power was lost and this was a simulation, put canary in RAM used by NV // so that uninitialized memory can be detected more easily if(g_powerWasLost) { memset(&gc, 0xbb, sizeof(gc)); memset(&gr, 0xbb, sizeof(gr)); memset(&gp, 0xbb, sizeof(gp)); memset(&go, 0xbb, sizeof(go)); } #endif #if SIMULATION // Clear the flag that forces failure on self-test g_forceFailureMode = FALSE; #endif // Set initialization state TPMInit(); // Set g_DRTMHandle as unassigned g_DRTMHandle = TPM_RH_UNASSIGNED; // No H-CRTM, yet. g_DrtmPreStartup = FALSE; // Initialize the NvEnvironment. g_nvOk = NvPowerOn(); // Initialize cryptographic functions g_inFailureMode |= (CryptInit() == FALSE); /* libtpms changed */ if(!g_inFailureMode) { // Load the persistent data NvReadPersistent(); // Load the orderly data (clock and DRBG state). // If this is not done here, things break NvRead(&go, NV_ORDERLY_DATA, sizeof(go)); // Start clock. Need to do this after NV has been restored. TimePowerOn(); /* libtpms added begin */ VolatileLoad(&restored); if (restored) NVShadowRestore(); /* libtpms added end */ } return; }
//--------------------------------------------------------------------------------------- // // Initialize the static instance and lock. // HRESULT LOADEDMODULES::InitializeStatics() { HRESULT hr = S_OK; if (VolatileLoad(&s_pLoadedModules) == NULL) { // Initialize global read-write lock { NewHolder<UTSemReadWrite> pSemReadWrite = new (nothrow) UTSemReadWrite(); IfNullGo(pSemReadWrite); IfFailGo(pSemReadWrite->Init()); if (InterlockedCompareExchangeT<UTSemReadWrite *>(&m_pSemReadWrite, pSemReadWrite, NULL) == NULL) { // We won the initialization race pSemReadWrite.SuppressRelease(); } } // Initialize the global instance { NewHolder<LOADEDMODULES> pLoadedModules = new (nothrow) LOADEDMODULES(); IfNullGo(pLoadedModules); { LOCKWRITE(); if (VolatileLoad(&s_pLoadedModules) == NULL) { VolatileStore(&s_pLoadedModules, pLoadedModules.Extract()); } } } } ErrExit: return hr; } // LOADEDMODULES::InitializeStatics
ThreadStressLog* StressLog::CreateThreadStressLogHelper(Thread * pThread) { bool skipInsert = FALSE; ThreadStressLog* msgs = NULL; // See if we can recycle a dead thread if (VolatileLoad(&theLog.deadCount) > 0) { unsigned __int64 recycleStamp = getTimeStamp() - RECYCLE_AGE; msgs = VolatileLoad(&theLog.logs); //find out oldest dead ThreadStressLog in case we can't find one within //recycle age but can't create a new chunk ThreadStressLog * oldestDeadMsg = NULL; while(msgs != 0) { if (msgs->isDead) { bool hasTimeStamp = msgs->curPtr != (StressMsg *)msgs->chunkListTail->EndPtr(); if (hasTimeStamp && msgs->curPtr->timeStamp < recycleStamp) { skipInsert = TRUE; PalInterlockedDecrement(&theLog.deadCount); break; } if (!oldestDeadMsg) { oldestDeadMsg = msgs; } else if (hasTimeStamp && oldestDeadMsg->curPtr->timeStamp > msgs->curPtr->timeStamp) { oldestDeadMsg = msgs; } } msgs = msgs->next; } //if the total stress log size limit is already passed and we can't add new chunk, //always reuse the oldest dead msg if (!AllowNewChunk (0) && !msgs) { msgs = oldestDeadMsg; skipInsert = TRUE; PalInterlockedDecrement(&theLog.deadCount); } } if (msgs == 0) { msgs = new (nothrow) ThreadStressLog(); if (msgs == 0 ||!msgs->IsValid ()) { delete msgs; msgs = 0; goto LEAVE; } } msgs->Activate (pThread); if (!skipInsert) { #ifdef _DEBUG ThreadStressLog* walk = VolatileLoad(&theLog.logs); while (walk) { _ASSERTE (walk != msgs); walk = walk->next; } #endif // Put it into the stress log msgs->next = VolatileLoad(&theLog.logs); VolatileStore(&theLog.logs, msgs); } LEAVE: ; return msgs; }