BOOL CRWLockSlim::AcquireWriterLock( DWORD dwTimeout ) { BOOL bRet = FALSE; #if ( _WIN32_WINNT_WIN7 <= _WIN32_WINNT ) if ( INFINITE == dwTimeout || FALSE == m_bWin7AndLater ) { AcquireSRWLockExclusive( &m_srwLock ); bRet = TRUE; } else { LARGE_INTEGER lnCurr , lnEnd; QueryPerformanceCounter( &lnCurr ); lnEnd.QuadPart = lnCurr.QuadPart + dwTimeout * ( m_lnFreq.QuadPart / 1000 ); do { QueryPerformanceCounter( &lnCurr ); bRet = (BOOL)TryAcquireSRWLockExclusive( &m_srwLock ); } while ( lnCurr.QuadPart < lnEnd.QuadPart ); } #else AcquireSRWLockExclusive( &m_srwLock ); bRet = TRUE; #endif return bRet; }
void TRI_LockMutex (TRI_mutex_t* mutex) { // as of VS2013, exclusive SRWLocks tend to be faster than native mutexes #if TRI_WINDOWS_VISTA_LOCKS DWORD result = WaitForSingleObject(mutex->_mutex, INFINITE); switch (result) { case WAIT_ABANDONED: { LOG_FATAL_AND_EXIT("locks-win32.c:TRI_LockMutex:could not lock the condition --> WAIT_ABANDONED"); } case WAIT_OBJECT_0: { // everything ok break; } case WAIT_TIMEOUT: { LOG_FATAL_AND_EXIT("locks-win32.c:TRI_LockMutex:could not lock the condition --> WAIT_TIMEOUT"); } case WAIT_FAILED: { result = GetLastError(); LOG_FATAL_AND_EXIT("locks-win32.c:TRI_LockMutex:could not lock the condition --> WAIT_FAILED - reason -->%d",result); } } #else AcquireSRWLockExclusive(&mutex->_mutex); #endif }
static int cache_insert( struct idmap_cache *cache, const struct idmap_lookup *lookup, const struct list_entry *src) { struct list_entry *entry; int status = NO_ERROR; AcquireSRWLockExclusive(&cache->lock); /* search for an existing match */ entry = list_search(&cache->head, lookup->value, lookup->compare); if (entry) { /* overwrite the existing entry with the new results */ cache->ops->entry_copy(entry, src); goto out; } /* initialize a new entry and add it to the list */ entry = cache->ops->entry_alloc(); if (entry == NULL) { status = GetLastError(); goto out; } cache->ops->entry_copy(entry, src); list_add_head(&cache->head, entry); out: ReleaseSRWLockExclusive(&cache->lock); return status; }
int rwmutex_wrlock(XQ_rwmutex_t *m) { #if _WIN32_WINNT >= 0x0700 AcquireSRWLockExclusive(m); #endif return 0; }
void __MCF_CRT_TlsThreadCleanup(){ ThreadMap *const pMap = TlsGetValue(g_dwTlsIndex); if(pMap){ TlsObject *pObject = pMap->pLastByThread; while(pObject){ TlsKey *const pKey = pObject->pKey; AcquireSRWLockExclusive(&(pKey->srwLock)); { if(pKey->pLastByKey == pObject){ pKey->pLastByKey = pObject->pPrevByKey; } } ReleaseSRWLockExclusive(&(pKey->srwLock)); if(pKey->pfnCallback){ (*pKey->pfnCallback)(pObject->nValue); } TlsObject *const pTemp = pObject->pPrevByThread; free(pObject); pObject = pTemp; } free(pMap); TlsSetValue(g_dwTlsIndex, nullptr); } __MCF_CRT_RunEmutlsDtors(); }
_EXP_IMPL void __cdecl Event::set() { AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&m_lock)); m_isSet = true; WakeAllConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&m_cond)); ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&m_lock)); }
static int virtualfs_text_llseek(struct file *f, loff_t offset, loff_t *newoffset, int whence) { AcquireSRWLockExclusive(&f->rw_lock); struct virtualfs_text *file = (struct virtualfs_text *)f; loff_t target; int r; switch (whence) { case SEEK_SET: target = offset; break; case SEEK_CUR: target = file->position + offset; break; case SEEK_END: target = file->textlen - offset; break; default: r = -L_EINVAL; goto out; } if (target >= 0 && target < file->textlen) { file->position = (int)target; *newoffset = target; r = 0; } else r = -L_EINVAL; out: ReleaseSRWLockExclusive(&f->rw_lock); return r; }
BOOST_LOG_API bool once_block_sentry::enter_once_block() const { AcquireSRWLockExclusive(&g_OnceBlockMutex); once_block_flag volatile& flag = m_Flag; while (flag.status != once_block_flag::initialized) { if (flag.status == once_block_flag::uninitialized) { flag.status = once_block_flag::being_initialized; ReleaseSRWLockExclusive(&g_OnceBlockMutex); // Invoke the initializer block return false; } else { while (flag.status == once_block_flag::being_initialized) { BOOST_VERIFY(SleepConditionVariableSRW( &g_OnceBlockCond, &g_OnceBlockMutex, INFINITE, 0)); } } } ReleaseSRWLockExclusive(&g_OnceBlockMutex); return true; }
VOID APPLICATION_INFO::ShutDownApplication() { APPLICATION* pApplication = NULL; BOOL fLockAcquired = FALSE; // pApplication can be NULL due to app_offline if (m_pApplication != NULL) { AcquireSRWLockExclusive(&m_srwLock); fLockAcquired = TRUE; if (m_pApplication != NULL) { pApplication = m_pApplication; // Set m_pApplication to NULL first to prevent anyone from using it m_pApplication = NULL; pApplication->ShutDown(); pApplication->DereferenceApplication(); } if (fLockAcquired) { ReleaseSRWLockExclusive(&m_srwLock); } } }
static void server_addrs_add( IN OUT struct server_addrs *addrs, IN const netaddr4 *addr) { /* we keep a list of addrs used to connect to each server. once it gets * bigger than NFS41_ADDRS_PER_SERVER, overwrite the oldest addrs. use * server_addrs.next_index to implement a circular array */ AcquireSRWLockExclusive(&addrs->lock); if (multi_addr_find(&addrs->addrs, addr, NULL)) { dprintf(SRVLVL, "server_addrs_add() found existing addr '%s'.\n", addr->uaddr); } else { /* overwrite the address at 'next_index' */ StringCchCopyA(addrs->addrs.arr[addrs->next_index].netid, NFS41_NETWORK_ID_LEN+1, addr->netid); StringCchCopyA(addrs->addrs.arr[addrs->next_index].uaddr, NFS41_UNIVERSAL_ADDR_LEN+1, addr->uaddr); /* increment/wrap next_index */ addrs->next_index = (addrs->next_index + 1) % NFS41_ADDRS_PER_SERVER; /* update addrs.count if necessary */ if (addrs->addrs.count < addrs->next_index) addrs->addrs.count = addrs->next_index; dprintf(SRVLVL, "server_addrs_add() added new addr '%s'.\n", addr->uaddr); } ReleaseSRWLockExclusive(&addrs->lock); }
static int open_unlock_delegate( IN nfs41_open_state *open, IN const nfs41_lock_state *input) { struct list_entry *entry; int status = ERROR_NOT_LOCKED; AcquireSRWLockExclusive(&open->lock); /* find lock state that matches this range */ entry = list_search(&open->locks.list, input, lock_range_cmp); if (entry) { nfs41_lock_state *lock = lock_entry(entry); if (lock->delegated) { /* if the lock was delegated, remove/free it and return success */ list_remove(entry); free(lock); status = NO_ERROR; } else status = ERROR_LOCKED; } ReleaseSRWLockExclusive(&open->lock); return status; }
int belle_sip_mutex_lock(belle_sip_mutex_t * hMutex) { #ifdef BELLE_SIP_WINDOWS_DESKTOP WaitForSingleObject(*hMutex, INFINITE); #else AcquireSRWLockExclusive(hMutex); #endif return 0; }
/// <summary> /// Locks for writing. /// </summary> void RWLock::LockWrite() { #ifdef PLATFORM_WIN AcquireSRWLockExclusive( &mRwlock ); #else pthread_rwlock_wrlock( &mRwlock ); #endif }
int mutex_lock(struct mutex_handle *mutex) { struct mutex_priv *priv = (struct mutex_priv *)mutex->priv; AcquireSRWLockExclusive(&priv->lock); return 0; }
bool MCF_CRT_TlsFreeKey(void *pTlsKey){ TlsKey *const pKey = pTlsKey; if(!pKey){ SetLastError(ERROR_INVALID_PARAMETER); return false; } AcquireSRWLockExclusive(&g_csKeyMutex); { MCF_AvlDetach((MCF_AvlNodeHeader *)pKey); } ReleaseSRWLockExclusive(&g_csKeyMutex); TlsObject *pObject = pKey->pLastByKey; while(pObject){ ThreadMap *const pMap = pObject->pMap; AcquireSRWLockExclusive(&(pMap->srwLock)); { TlsObject *const pPrev = pObject->pPrevByThread; TlsObject *const pNext = pObject->pNextByThread; if(pPrev){ pPrev->pNextByThread = pNext; } if(pNext){ pNext->pPrevByThread = pPrev; } if(pMap->pLastByThread == pObject){ pMap->pLastByThread = pObject->pPrevByThread; } } ReleaseSRWLockExclusive(&(pMap->srwLock)); if(pKey->pfnCallback){ (*pKey->pfnCallback)(pObject->nValue); } TlsObject *const pTemp = pObject->pPrevByKey; free(pObject); pObject = pTemp; } free(pKey); return true; }
// the code below in _DEBUG build will check that we don't try to recursively lock, // which is not supported by this class. also checks that you don't unlock without // having locked void SimpleRWLock::lock() { unsigned me = GetCurrentThreadId(); int& state = s.getRef(); dassert( state == 0 ); state--; AcquireSRWLockExclusive(&_lock); tid = me; // this is for use in the debugger to see who does have the lock }
void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line) { assert(mutex->initialized); trace_qemu_mutex_lock(mutex, file, line); AcquireSRWLockExclusive(&mutex->lock); trace_qemu_mutex_locked(mutex, file, line); }
void ui::remove(char type) { AcquireSRWLockExclusive(&uilock); delete elements[type]; elements.erase(type); ReleaseSRWLockExclusive(&uilock); }
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) { AcquireSRWLockExclusive(&rwlock->rwlock); rwlock->exclusive_locked = GetCurrentThreadId(); return (0); }
//worker thread DWORD WINAPI WorkerThread(LPVOID lpParam) { CSocketSever* pSocketServer = (CSocketSever*)lpParam; int i = 0; fd_set fdread; struct timeval tv = {0, 50}; char szMessage[nMsgSize]; memset(szMessage, 0, nMsgSize); while(TRUE) { // set fdread初始化为空 FD_ZERO(&fdread); for(i = 0; i < pSocketServer->m_iTotalConn; i++) //将client socket加入fdread set FD_SET(pSocketServer->m_ClientSocketArr[i], &fdread); int ret = select(0, &fdread, NULL, NULL, &tv); if (ret == 0) { // Time expired continue; } for(i = 0; i < pSocketServer->m_iTotalConn; i++) { if( FD_ISSET( pSocketServer->m_ClientSocketArr[i], &fdread ) ) { memset(szMessage, 0, nMsgSize); ret = recv(pSocketServer->m_ClientSocketArr[i], szMessage, nMsgSize,0); if ( ret == 0 || ( ret == SOCKET_ERROR && WSAGetLastError() ) ) { //Client socket closed cout << "Client socket " << pSocketServer->m_ClientSocketArr[i] << "closed." << endl; closesocket( pSocketServer->m_ClientSocketArr[i]); pSocketServer->m_ClientSocketArr[i--] = pSocketServer->m_ClientSocketArr[--pSocketServer->m_iTotalConn]; } else { string heart = szMessage; if (heart.find("心跳包") != heart.npos) { //cout << szMessage << endl; continue; } //接入业务处理部分 g_pos = strlen(LogBuffer); AcquireSRWLockExclusive(&srwTest); sprintf(LogBuffer + g_pos, "%s\n", szMessage); ReleaseSRWLockExclusive(&srwTest); pSocketServer->m_pRTHandleData->GetClientSocket(pSocketServer->m_ClientSocketArr[i]); pSocketServer->m_pRTHandleData->HandleData(szMessage, ret); } } } } return 0; }
int ts_tree_add(ts_tree_t* ts_tree, ts_tree_node_t* node, uintptr_t key) { int r; AcquireSRWLockExclusive(&ts_tree->lock); r = tree_add(&ts_tree->tree, &node->tree_node, key); ReleaseSRWLockExclusive(&ts_tree->lock); return r; }
int __bctbx_WIN_mutex_lock(bctbx_mutex_t * hMutex) { #ifdef BCTBX_WINDOWS_DESKTOP WaitForSingleObject(*hMutex, INFINITE); /* == WAIT_TIMEOUT; */ #else AcquireSRWLockExclusive(hMutex); #endif return 0; }
FSP_API BOOLEAN FspFileSystemAcquireDirectoryBuffer(PVOID *PDirBuffer, BOOLEAN Reset, PNTSTATUS PResult) { FSP_FILE_SYSTEM_DIRECTORY_BUFFER *DirBuffer = *PDirBuffer; MemoryBarrier(); if (0 == DirBuffer) { static SRWLOCK CreateLock = SRWLOCK_INIT; FSP_FILE_SYSTEM_DIRECTORY_BUFFER *NewDirBuffer; NewDirBuffer = MemAlloc(sizeof *NewDirBuffer); if (0 == NewDirBuffer) RETURN(STATUS_INSUFFICIENT_RESOURCES, FALSE); memset(NewDirBuffer, 0, sizeof *NewDirBuffer); InitializeSRWLock(&NewDirBuffer->Lock); AcquireSRWLockExclusive(&NewDirBuffer->Lock); AcquireSRWLockExclusive(&CreateLock); DirBuffer = *PDirBuffer; MemoryBarrier(); if (0 == DirBuffer) *PDirBuffer = DirBuffer = NewDirBuffer; ReleaseSRWLockExclusive(&CreateLock); if (DirBuffer == NewDirBuffer) RETURN(STATUS_SUCCESS, TRUE); ReleaseSRWLockExclusive(&NewDirBuffer->Lock); MemFree(NewDirBuffer); } if (Reset) { AcquireSRWLockExclusive(&DirBuffer->Lock); DirBuffer->LoMark = 0; DirBuffer->HiMark = DirBuffer->Capacity; RETURN(STATUS_SUCCESS, TRUE); } RETURN(STATUS_SUCCESS, FALSE); }
static size_t virtualfs_text_read(struct file *f, void *buf, size_t count) { AcquireSRWLockExclusive(&f->rw_lock); struct virtualfs_text *file = (struct virtualfs_text *)f; int read_count = (int)min(count, (size_t)(file->textlen - file->position)); memcpy(buf, file->text + file->position, read_count); file->position += read_count; ReleaseSRWLockExclusive(&f->rw_lock); return read_count; }
BOOST_LOG_API void once_block_sentry::rollback() { AcquireSRWLockExclusive(&g_OnceBlockMutex); // The initializer failed, marking the flag as if it hasn't run at all m_Flag.status = once_block_flag::uninitialized; ReleaseSRWLockExclusive(&g_OnceBlockMutex); WakeAllConditionVariable(&g_OnceBlockCond); }
void RWLockBase::RemoveOwner() { AcquireSRWLockExclusive(&ownerSetLock_); auto count = owners_.erase(GetCurrentThreadId()); if ( count != 1) { Common::Assert::CodingError("Trying to release a RWLock not acquired on this thread"); } ReleaseSRWLockExclusive(&ownerSetLock_); }
void RWLock::WriteLockInternal() { #ifdef XP_WIN AcquireSRWLockExclusive(NativeHandle(mRWLock)); #else MOZ_RELEASE_ASSERT(pthread_rwlock_wrlock(NativeHandle(mRWLock)) == 0, "pthread_rwlock_wrlock failed"); #endif }
void RWLock::WriteLock() { #if defined(OVR_CAPTURE_WINDOWS) AcquireSRWLockExclusive(&m_lock); #elif defined(OVR_CAPTURE_POSIX) pthread_rwlock_wrlock(&m_lock); #else #error Unknown Platform! #endif }
DWORD QueueDestroy (QUEUE_OBJECT *q) { /* Free all the resources created by QueueInitialize */ AcquireSRWLockExclusive (&q->qGuard); free (q->msgArray); q->msgArray = NULL; ReleaseSRWLockExclusive (&(q->qGuard)); return 0; }
BOOST_LOG_API void once_block_sentry::commit() { AcquireSRWLockExclusive(&g_OnceBlockMutex); // The initializer executed successfully m_Flag.status = once_block_flag::initialized; ReleaseSRWLockExclusive(&g_OnceBlockMutex); WakeAllConditionVariable(&g_OnceBlockCond); }