status_t lock_with_timeout(bigtime_t time)// だめだめ { #ifdef NTK_WINNT bigtime_t start_time = GetTickCount();//timeGetTime(); while(TryEnterCriticalSection(&m_critical_section)) { if(GetTickCount() - start_time > time)//timeGetTime() return Locker::status::TIME_OUT_ERROR; snooze(1);// 数値は適当 } ++m_lock_count; return st::OK; #else time = time; return status_t(st::ERR, "TryEnterCriticalSection: Windows9x ではサポートされてないようです。"); #endif }
bool MutexImpl::tryLockImpl(long milliseconds) { const int sleepMillis = 5; Timestamp now; Timestamp::TimeDiff diff(Timestamp::TimeDiff(milliseconds)*1000); do { try { if (TryEnterCriticalSection(&_cs) == TRUE) return true; } catch (...) { throw SystemException("cannot lock mutex"); } Sleep(sleepMillis); } while (!now.isElapsed(diff)); return false; }
int loom_mutex_trylock_real(const char *file, int line, MutexHandle m) { #ifndef NTELEMETRY TmU64 matchId; #endif lmAssert(m != 0, "loom_mutex_lock_real - tried to lock NULL"); tmTryLockEx(gTelemetryContext, &matchId, 1000, file, line, m, "mutex_trylock"); if (TryEnterCriticalSection((CRITICAL_SECTION *)m)) { tmEndTryLockEx(gTelemetryContext, matchId, file, line, m, TMLR_SUCCESS); tmSetLockState(gTelemetryContext, m, TMLS_LOCKED, "mutex_trylock"); return 1; } else { tmEndTryLockEx(gTelemetryContext, matchId, file, line, m, TMLR_FAILED); return 0; } }
bool PdfMutex::TryLock() { #ifdef PODOFO_MULTI_THREAD #ifdef _WIN32 return (TryEnterCriticalSection( &m_cs ) ? true : false); #else int nRet = pthread_mutex_trylock( &m_mutex ); if( nRet == 0 ) return true; else if( nRet == EBUSY ) return false; else { PODOFO_RAISE_ERROR( ePdfError_MutexError ); } #endif // _WIN32 #endif // PODOFO_MULTI_THREAD // If we have no multithreading support always // simulate succesfull locking return true; }
VOID FlushBuffer(BOOL Force) { if (!Force) { BOOL Entered = TryEnterCriticalSection(&CriticalSect); if(!Entered) return; } else EnterCriticalSection(&CriticalSect); if (BufLen != 0) { Buf[BufLen] = 0; Buf[BufLen+1] = 0; DoFlushBuffer(Buf, BufLen); // Update 'BackSpaces' for next invocation of DoFlushBuffer BackSpaces = min(0, BackSpaces + OutputPos + BufLen); OutputPos = BufPos - BufLen; BufPos = 0; BufLen = 0; } LeaveCriticalSection(&CriticalSect); }
int pthread_mutex_trylock_annotate_np(pthread_mutex_t *mutex, const char* file, int line) { int contention = 0; pthread_np_assert_live_mutex(mutex,"trylock"); if (*mutex == PTHREAD_MUTEX_INITIALIZER) { pthread_mutex_lock(&mutex_init_lock); if (*mutex == PTHREAD_MUTEX_INITIALIZER) { pthread_mutex_init(mutex, NULL); } pthread_mutex_unlock(&mutex_init_lock); } if ((*mutex)->owner) { pthshow("Mutex #x%p -> #x%p: tried contention; owned by #x%p, wanted by #x%p", mutex, *mutex, (*mutex)->owner, pthread_self()); pthshow("Mutex #x%p -> #x%p: contention notes: old %s +%d, new %s +%d", mutex, *mutex, (*mutex)->file,(*mutex)->line, file, line); contention = 1; } if (TryEnterCriticalSection(&(*mutex)->cs)) { if (contention) { pthshow("Mutex #x%p -> #x%p: contention end; left by #x%p, taken by #x%p", mutex, *mutex, (*mutex)->owner, pthread_self()); pthshow("Mutex #x%p -> #x%p: contention notes: old %s +%d, new %s +%d", mutex, *mutex, (*mutex)->file,(*mutex)->line, file, line); } (*mutex)->owner = pthread_self(); (*mutex)->file = file; (*mutex)->line = line; return 0; } else return EBUSY; }
// ---------------------------------------------------------------------------- // Trys to lock mutex // ---------------------------------------------------------------------------- bool prMutex::TryLock() { bool locked = false; #if defined(PLATFORM_ANDROID) // Lock? int result = pthread_mutex_trylock(&m_mutex); if (result == 0) { locked = true; } else { prTrace(LogError, "Failed to locked mutex\n"); } #elif defined(PLATFORM_PC) // Lock? if (!TryEnterCriticalSection(&m_cs)) { EnterCriticalSection(&m_cs); locked = true; } else { prTrace(LogError, "Failed to locked mutex\n"); } #elif (defined(PLATFORM_IOS) || defined(PLATFORM_LINUX) || defined(PLATFORM_MAC)) // Allows class to compile for the other platforms. #else #error unsupported platform #endif return locked; }
int CPLAcquireMutex( void *hMutexIn, double dfWaitInSeconds ) { #ifdef USE_WIN32_MUTEX HANDLE hMutex = (HANDLE) hMutexIn; DWORD hr; hr = WaitForSingleObject( hMutex, (int) (dfWaitInSeconds * 1000) ); return hr != WAIT_TIMEOUT; #else CRITICAL_SECTION *pcs = (CRITICAL_SECTION *)hMutexIn; BOOL ret; while( (ret = TryEnterCriticalSection(pcs)) == 0 && dfWaitInSeconds > 0.0 ) { CPLSleep( MIN(dfWaitInSeconds,0.125) ); dfWaitInSeconds -= 0.125; } return ret; #endif }
static int winMutexTry(sqlite3_mutex *p){ int rc = SQLITE_BUSY; assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld(p) ); /* ** The sqlite3_mutex_try() routine is very rarely used, and when it ** is used it is merely an optimization. So it is OK for it to always ** fail. ** ** The TryEnterCriticalSection() interface is only available on WinNT. ** And some windows compilers complain if you try to use it without ** first doing some #defines that prevent SQLite from building on Win98. ** For that reason, we will omit this optimization for now. See ** ticket #2685. */ #if 0 if( mutexIsNT() && TryEnterCriticalSection(&p->mutex) ){ p->owner = GetCurrentThreadId(); p->nRef++; rc = SQLITE_OK; } #endif return rc; }
bool recursive_mutex::scoped_lock::internal_try_acquire( recursive_mutex& m ) { #if _WIN32||_WIN64 switch( m.state ) { case INITIALIZED: break; case DESTROYED: __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); break; default: __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); break; } #endif /* _WIN32||_WIN64 */ bool result; #if _WIN32||_WIN64 result = TryEnterCriticalSection(&m.impl)!=0; #else result = pthread_mutex_trylock(&m.impl)==0; #endif /* _WIN32||_WIN64 */ if( result ) my_mutex = &m; return result; }
/* * epicsMutexOsdTryLock () */ epicsMutexLockStatus epicsMutexOsdTryLock ( epicsMutexOSD * pSem ) { if ( thisIsNT ) { if ( TryEnterCriticalSection ( &pSem->os.criticalSection ) ) { return epicsMutexLockOK; } else { return epicsMutexLockTimeout; } } else { DWORD status = WaitForSingleObject ( pSem->os.mutex, 0 ); if ( status != WAIT_OBJECT_0 ) { if (status == WAIT_TIMEOUT) { return epicsMutexLockTimeout; } else { return epicsMutexLockError; } } } return epicsMutexLockOK; }
static TBOOL hal_replytimereq(struct TTimeRequest *tr) { TBOOL success = TFALSE; struct TMessage *msg = TGETMSGPTR(tr); struct TMsgPort *mp = msg->tmsg_RPort; CRITICAL_SECTION *mplock = THALGetObject((TAPTR) &mp->tmp_Lock, CRITICAL_SECTION); if (TryEnterCriticalSection(mplock)) { struct TTask *sigtask = mp->tmp_SigTask; struct HALThread *t = THALGetObject((TAPTR) &sigtask->tsk_Thread, struct HALThread); #ifndef HAL_USE_ATOMICS if (TryEnterCriticalSection(&t->hth_SigLock)) #endif { tr->ttr_Req.io_Error = 0; msg->tmsg_Flags = TMSG_STATUS_REPLIED | TMSGF_QUEUED; TAddTail(&mp->tmp_MsgList, &msg->tmsg_Node); #ifndef HAL_USE_ATOMICS if (mp->tmp_Signal & ~t->hth_SigState) { t->hth_SigState |= mp->tmp_Signal; SetEvent(t->hth_SigEvent); } LeaveCriticalSection(&t->hth_SigLock); #else if (mp->tmp_Signal & ~(TUINT) InterlockedOr(&t->hth_SigState, mp->tmp_Signal)) SetEvent(t->hth_SigEvent); #endif success = TTRUE; } LeaveCriticalSection(mplock); } return success; }
int SG_mutex__trylock(SG_mutex* pm, SG_bool* pb) { #if defined(MAC) || defined(LINUX) int rc = pthread_mutex_trylock(&pm->mtx); if (rc) { if (EBUSY == rc) { *pb = SG_FALSE; return 0; } else { return rc; } } else { *pb = SG_TRUE; return 0; } #endif #if defined(WINDOWS) if (TryEnterCriticalSection(&pm->cs)) { *pb = SG_TRUE; return 0; } else { *pb = SG_FALSE; return 0; } #endif }
PLUGIN_EXPORT double Update(void* data) { MeasureData* measure = (MeasureData*)data; if (TryEnterCriticalSection(&g_CriticalSection)) { if (!g_Thread) { ++g_UpdateCount; if (g_UpdateCount > g_InstanceCount) { if (HasRecycleBinChanged()) { // Delay next check. g_UpdateCount = g_InstanceCount * -2; DWORD id; HANDLE thread = CreateThread(NULL, 0, QueryRecycleBinThreadProc, NULL, 0, &id); if (thread) { CloseHandle(thread); g_Thread = true; } } else { g_UpdateCount = 0; } } } LeaveCriticalSection(&g_CriticalSection); } return measure->count ? g_BinCount : g_BinSize; }
bool TryLock() { return TryEnterCriticalSection(&critical_section) != 0; };
//============================================================================== Bool Mutex::tryLock() { CRITICAL_SECTION* mtx = reinterpret_cast<CRITICAL_SECTION*>(m_impl); BOOL enter = TryEnterCriticalSection(mtx); return enter; }
bit OwnedLock::TryLock() const { if (TryEnterCriticalSection((CRITICAL_SECTION*)hand)) return true; else return false; }
static PVOID TestSynchCritical_Main(PVOID arg) { int i, j; SYSTEM_INFO sysinfo; DWORD dwPreviousSpinCount; DWORD dwSpinCount; DWORD dwSpinCountExpected; HANDLE hMainThread; HANDLE* hThreads; HANDLE hThread; DWORD dwThreadCount; DWORD dwThreadExitCode; BOOL bTest1Running; PBOOL pbThreadTerminated = (PBOOL)arg; GetNativeSystemInfo(&sysinfo); hMainThread = (HANDLE) (ULONG_PTR) GetCurrentThreadId(); /** * Test SpinCount in SetCriticalSectionSpinCount, InitializeCriticalSectionEx and InitializeCriticalSectionAndSpinCount * SpinCount must be forced to be zero on on uniprocessor systems and on systems * where WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT is defined */ dwSpinCount = 100; InitializeCriticalSectionEx(&critical, dwSpinCount, 0); while(--dwSpinCount) { dwPreviousSpinCount = SetCriticalSectionSpinCount(&critical, dwSpinCount); dwSpinCountExpected = 0; #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT) if (sysinfo.dwNumberOfProcessors > 1) dwSpinCountExpected = dwSpinCount+1; #endif if (dwPreviousSpinCount != dwSpinCountExpected) { printf("CriticalSection failure: SetCriticalSectionSpinCount returned %u (expected: %u)\n", dwPreviousSpinCount, dwSpinCountExpected); goto fail; } DeleteCriticalSection(&critical); if (dwSpinCount%2==0) InitializeCriticalSectionAndSpinCount(&critical, dwSpinCount); else InitializeCriticalSectionEx(&critical, dwSpinCount, 0); } DeleteCriticalSection(&critical); /** * Test single-threaded recursive TryEnterCriticalSection/EnterCriticalSection/LeaveCriticalSection * */ InitializeCriticalSection(&critical); for (i = 0; i < 1000; i++) { if (critical.RecursionCount != i) { printf("CriticalSection failure: RecursionCount field is %d instead of %d.\n", critical.RecursionCount, i); goto fail; } if (i%2==0) { EnterCriticalSection(&critical); } else { if (TryEnterCriticalSection(&critical) == FALSE) { printf("CriticalSection failure: TryEnterCriticalSection failed where it should not.\n"); goto fail; } } if (critical.OwningThread != hMainThread) { printf("CriticalSection failure: Could not verify section ownership (loop index=%d).\n", i); goto fail; } } while (--i >= 0) { LeaveCriticalSection(&critical); if (critical.RecursionCount != i) { printf("CriticalSection failure: RecursionCount field is %d instead of %d.\n", critical.RecursionCount, i); goto fail; } if (critical.OwningThread != (HANDLE)(i ? hMainThread : NULL)) { printf("CriticalSection failure: Could not verify section ownership (loop index=%d).\n", i); goto fail; } } DeleteCriticalSection(&critical); /** * Test using multiple threads modifying the same value */ dwThreadCount = sysinfo.dwNumberOfProcessors > 1 ? sysinfo.dwNumberOfProcessors : 2; hThreads = (HANDLE*) calloc(dwThreadCount, sizeof(HANDLE)); if (!hThreads) { printf("Problem allocating memory\n"); goto fail; } for (j = 0; j < TEST_SYNC_CRITICAL_TEST1_RUNS; j++) { dwSpinCount = j * 1000; InitializeCriticalSectionAndSpinCount(&critical, dwSpinCount); gTestValueVulnerable = 0; gTestValueSerialized = 0; /* the TestSynchCritical_Test1 threads shall run until bTest1Running is FALSE */ bTest1Running = TRUE; for (i = 0; i < (int) dwThreadCount; i++) { hThreads[i] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) TestSynchCritical_Test1, &bTest1Running, 0, NULL); } /* let it run for TEST_SYNC_CRITICAL_TEST1_RUNTIME_MS ... */ Sleep(TEST_SYNC_CRITICAL_TEST1_RUNTIME_MS); bTest1Running = FALSE; for (i = 0; i < (int) dwThreadCount; i++) { if (WaitForSingleObject(hThreads[i], INFINITE) != WAIT_OBJECT_0) { printf("CriticalSection failure: Failed to wait for thread #%d\n", i); goto fail; } GetExitCodeThread(hThreads[i], &dwThreadExitCode); if(dwThreadExitCode != 0) { printf("CriticalSection failure: Thread #%d returned error code %u\n", i, dwThreadExitCode); goto fail; } CloseHandle(hThreads[i]); } if (gTestValueVulnerable != gTestValueSerialized) { printf("CriticalSection failure: unexpected test value %d (expected %d)\n", gTestValueVulnerable, gTestValueSerialized); goto fail; } DeleteCriticalSection(&critical); } free(hThreads); /** * TryEnterCriticalSection in thread must fail if we hold the lock in the main thread */ InitializeCriticalSection(&critical); if (TryEnterCriticalSection(&critical) == FALSE) { printf("CriticalSection failure: TryEnterCriticalSection unexpectedly failed.\n"); goto fail; } /* This thread tries to call TryEnterCriticalSection which must fail */ hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) TestSynchCritical_Test2, NULL, 0, NULL); if (WaitForSingleObject(hThread, INFINITE) != WAIT_OBJECT_0) { printf("CriticalSection failure: Failed to wait for thread\n"); goto fail; } GetExitCodeThread(hThread, &dwThreadExitCode); if(dwThreadExitCode != 0) { printf("CriticalSection failure: Thread returned error code %u\n", dwThreadExitCode); goto fail; } CloseHandle(hThread); *pbThreadTerminated = TRUE; /* requ. for winpr issue, see below */ return (PVOID)0; fail: *pbThreadTerminated = TRUE; /* requ. for winpr issue, see below */ return (PVOID)1; }
bool FastMutex::trylock(){ // Attempt to acquire a lock, return true if succ. return(TryEnterCriticalSection((LPCRITICAL_SECTION)cs)==TRUE); }
bool CriticalSection::Trylock() { return TryEnterCriticalSection(&m_Data) == TRUE; }
bool trylock() { return TryEnterCriticalSection(&cs) != 0; }
bool CriticalSection::tryEnter() const noexcept { return TryEnterCriticalSection ((CRITICAL_SECTION*) internal) != FALSE; }
int embb_mutex_try_lock(embb_mutex_t* mutex) { BOOL success; success = TryEnterCriticalSection(mutex); if (success == FALSE) return EMBB_ERROR; return EMBB_SUCCESS; }
forceinline bool Mutex::tryacquire(void) { return TryEnterCriticalSection(&w_cs) != 0; }
int lock_write_try(struct lock *l) { return TryEnterCriticalSection(&l->c) ? 0 : ENODEV; }
/* Try to lock a Mutex, returning immediately, with a return code that tells if mutex could be locked or not: */ int PsychTryLockMutex(psych_mutex* mutex) { // Must #define _WIN32_WINNT as at least 0x0400 in master include file PsychIncludes.h for this to compile! return((((int) TryEnterCriticalSection(mutex)) != 0) ? 0 : 1); }
PLATAPI boolean_t plat_mutex_try_lock(plat_thread_mutex_t* mutex) { return TryEnterCriticalSection(&mutex->tm_crit_section) != 0; }
int devil_spinlock_trylock(devil_spinlock_t* spinlock) { return TryEnterCriticalSection(spinlock) ? 0 : -1; }
bool WinMutex::tryLock() { return TryEnterCriticalSection(&m_mutex) ? true : false; }
BOOL STDCALL OSTryEnterMutex(HANDLE hMutex) { assert(hMutex); return TryEnterCriticalSection((CRITICAL_SECTION*)hMutex); }