FileHandle_t VirtualFunctionHooks::IBaseFileSystem__Open(const char *pFileName, const char *pOptions, const char *pathID) { IBaseFileSystem *ths = (IBaseFileSystem *)this; std::string relative_path, full_path; if (pathID && strcmp("SKIN", pathID)) if (FunctionHooks->mainthread == ThreadGetCurrentId()) { try { ToFull(pFileName, pathID, full_path); RelativeFrom(full_path, "BASE_PATH", relative_path); } catch (std::exception e) { return 0; } OpenResult opener(relative_path, full_path); if (!opener.GetResult()) return 0; } else { FSLog(std::string("Path ID ") + (pathID ? pathID : "NULL") + " accessed from non-main thread for file " + pFileName + " (" + (pOptions ? pOptions : "no options") + ")"); } return FunctionHooks->BaseFileSystemReplacer->Call<FileHandle_t, const char *, const char *, const char *>(FunctionHooks->IBaseFileSystem__Open__index, pFileName, pOptions, pathID); }
uint InitMainThread() { ThreadSetDebugName( "MainThrd" ); #ifdef _WIN32 return ThreadGetCurrentId(); #elif _LINUX return pthread_self(); #endif }
void CThreadSpinRWLock::UnlockWrite() { Assert( m_lockInfo.m_writerId == ThreadGetCurrentId() && m_lockInfo.m_nReaders == 0 ); static const LockInfo_t newValue = { 0, 0 }; #if defined(_X360) // X360TBD: Serious Perf implications, not yet. __sync(); #endif ThreadInterlockedExchange64( (int64 *)&m_lockInfo, *((int64 *)&newValue) ); m_nWriters--; }
bool CThreadMutex::TryLock() { #if defined( _WIN32 ) #ifdef THREAD_MUTEX_TRACING_ENABLED uint thisThreadID = ThreadGetCurrentId(); if ( m_bTrace && m_currentOwnerID && ( m_currentOwnerID != thisThreadID ) ) Msg( "Thread %u about to try-wait for lock %x owned by %u\n", ThreadGetCurrentId(), (CRITICAL_SECTION *)&m_CriticalSection, m_currentOwnerID ); #endif if ( DynTryEnterCriticalSection != NULL ) { if ( (*DynTryEnterCriticalSection )( (CRITICAL_SECTION *)&m_CriticalSection ) != FALSE ) { #ifdef THREAD_MUTEX_TRACING_ENABLED if (m_lockCount == 0) { // we now own it for the first time. Set owner information m_currentOwnerID = thisThreadID; if ( m_bTrace ) Msg( "Thread %u now owns lock 0x%x\n", m_currentOwnerID, (CRITICAL_SECTION *)&m_CriticalSection ); } m_lockCount++; #endif return true; } return false; } Lock(); return true; #elif defined( _LINUX ) return pthread_mutex_trylock( &m_Mutex ) == 0; #else #error "Implement me!" return true; #endif }
SpewRetval_t MyOutputFunc(SpewType_t spewType, const tchar *pMsg) { static std::mutex m; std::lock_guard<std::mutex> lock(m); fprintf(f, "MyOutputFunc [%15.10f] [T %4x] \"%s\"\n", Plat_FloatTime(), ThreadGetCurrentId(), pMsg); fflush(f); // if (Coroutine_IsActive()) { // size_t depth = Coroutine_GetStackDepth(); // fprintf(f, ">>> In coroutine! Stack depth: %lu\n", depth); // fflush(f); // } // if (ThreadGetCurrentId() != 0x5914) { // __asm { // int 3 // }; // } // return RealOutputFunc(spewType, pMsg); return SPEW_CONTINUE; }
//----------------------------------------------------------------------------- void DeclareCurrentThreadIsMainThread() { g_ThreadMainThreadID = ThreadGetCurrentId(); }
bool ThreadInMainThread() { return ( ThreadGetCurrentId() == g_ThreadMainThreadID ); }
//----------------------------------------------------------------------------- // Makes sure all entries in the KD tree are in the correct position //----------------------------------------------------------------------------- void CDirtySpatialPartitionEntityList::OnPreQuery( SpatialPartitionListMask_t listMask ) { #ifdef CLIENT_DLL const int validMask = PARTITION_CLIENT_GAME_EDICTS; #else const int validMask = PARTITION_SERVER_GAME_EDICTS; #endif if ( !( listMask & validMask ) ) return; if ( m_partitionWriteId != 0 && m_partitionWriteId == ThreadGetCurrentId() ) return; #ifdef CLIENT_DLL // FIXME: This should really be an assertion... feh! if ( !C_BaseEntity::IsAbsRecomputationsEnabled() ) { LockPartitionForRead(); return; } #endif // if you're holding a read lock, then these are entities that were still dirty after your trace started // or became dirty due to some other thread or callback. Updating them may cause corruption further up the // stack (e.g. partition iterator). Ignoring the state change should be safe since it happened after the // trace was requested or was unable to be resolved in a previous attempt (still dirty). if ( m_DirtyEntities.Count() && !m_readLockCount ) { CUtlVector< CBaseHandle > vecStillDirty; m_partitionMutex.LockForWrite(); m_partitionWriteId = ThreadGetCurrentId(); CTSListWithFreeList<CBaseHandle>::Node_t *pCurrent, *pNext; while ( ( pCurrent = m_DirtyEntities.Detach() ) != NULL ) { while ( pCurrent ) { CBaseHandle handle = pCurrent->elem; pNext = (CTSListWithFreeList<CBaseHandle>::Node_t *)pCurrent->Next; m_DirtyEntities.FreeNode( pCurrent ); pCurrent = pNext; #ifndef CLIENT_DLL CBaseEntity *pEntity = gEntList.GetBaseEntity( handle ); #else CBaseEntity *pEntity = cl_entitylist->GetBaseEntityFromHandle( handle ); #endif if ( pEntity ) { // If an entity is in the middle of bone setup, don't call UpdatePartition // which can cause it to redo bone setup on the same frame causing a recursive // call to bone setup. if ( !pEntity->IsEFlagSet( EFL_SETTING_UP_BONES ) ) { pEntity->CollisionProp()->UpdatePartition(); } else { vecStillDirty.AddToTail( handle ); } } } } if ( vecStillDirty.Count() > 0 ) { for ( int i = 0; i < vecStillDirty.Count(); i++ ) { m_DirtyEntities.PushItem( vecStillDirty[i] ); } } m_partitionWriteId = 0; m_partitionMutex.UnlockWrite(); } LockPartitionForRead(); }