BOOST_LOG_API bool once_block_sentry::enter_once_block() const { AcquireSRWLockExclusive(&g_OnceBlockMutex); once_block_flag volatile& flag = m_Flag; while (flag.status != once_block_flag::initialized) { if (flag.status == once_block_flag::uninitialized) { flag.status = once_block_flag::being_initialized; ReleaseSRWLockExclusive(&g_OnceBlockMutex); // Invoke the initializer block return false; } else { while (flag.status == once_block_flag::being_initialized) { BOOST_VERIFY(SleepConditionVariableSRW( &g_OnceBlockCond, &g_OnceBlockMutex, INFINITE, 0)); } } } ReleaseSRWLockExclusive(&g_OnceBlockMutex); return true; }
void kfree(void *mem, int size) { AcquireSRWLockExclusive(&heap->rw_lock); /* Find memory bucket */ void *bucket_addr = (void *)((size_t) mem & (-BLOCK_SIZE)); /* Find pool */ int p = -1; for (int i = 0; i < POOL_COUNT; i++) if (size <= heap->pools[i].objsize) { p = i; break; } if (p == -1) { log_error("kfree(): Invalid size: %x\n", mem); ReleaseSRWLockExclusive(&heap->rw_lock); return; } /* Loop over the chain to find the corresponding bucket */ struct bucket *previous = NULL; struct bucket *current = heap->pools[p].first; while (current) { if (current != bucket_addr) { previous = current; current = current->next_bucket; continue; } *(void **)mem = current->first_free; current->first_free = mem; current->ref_cnt--; if (!current->ref_cnt) { /* Bucket empty, free it */ if (!previous) heap->pools[p].first = current->next_bucket; else previous->next_bucket = current->next_bucket; mm_munmap(current, BLOCK_SIZE); } ReleaseSRWLockExclusive(&heap->rw_lock); return; } log_error("kfree(): Invalid memory pointer or size: (%x, %d)\n", mem, size); ReleaseSRWLockExclusive(&heap->rw_lock); }
void *kmalloc(int size) { AcquireSRWLockExclusive(&heap->rw_lock); /* Find pool */ int p = -1; for (int i = 0; i < POOL_COUNT; i++) if (size <= heap->pools[i].objsize) { p = i; break; } if (p == -1) { log_error("kmalloc(%d): size too large.\n", size); ReleaseSRWLockExclusive(&heap->rw_lock); return NULL; } /* Find a bucket with a free object slot */ if (!heap->pools[p].first) heap->pools[p].first = alloc_bucket(heap->pools[p].objsize); struct bucket *current = heap->pools[p].first; for (;;) { if (!current) { log_error("kmalloc(%d): out of memory\n", size); ReleaseSRWLockExclusive(&heap->rw_lock); return NULL; } /* Current bucket has a free object, return it */ if (current->first_free) { void *c = current->first_free; current->first_free = *(void**)c; current->ref_cnt++; ReleaseSRWLockExclusive(&heap->rw_lock); return c; } /* Next bucket does not exist, allocate one */ if (!current->next_bucket) current->next_bucket = alloc_bucket(heap->pools[p].objsize); /* Move to next bucket */ current = current->next_bucket; } }
_EXP_IMPL void __cdecl Event::set() { AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&m_lock)); m_isSet = true; WakeAllConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&m_cond)); ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&m_lock)); }
static void server_addrs_add( IN OUT struct server_addrs *addrs, IN const netaddr4 *addr) { /* we keep a list of addrs used to connect to each server. once it gets * bigger than NFS41_ADDRS_PER_SERVER, overwrite the oldest addrs. use * server_addrs.next_index to implement a circular array */ AcquireSRWLockExclusive(&addrs->lock); if (multi_addr_find(&addrs->addrs, addr, NULL)) { dprintf(SRVLVL, "server_addrs_add() found existing addr '%s'.\n", addr->uaddr); } else { /* overwrite the address at 'next_index' */ StringCchCopyA(addrs->addrs.arr[addrs->next_index].netid, NFS41_NETWORK_ID_LEN+1, addr->netid); StringCchCopyA(addrs->addrs.arr[addrs->next_index].uaddr, NFS41_UNIVERSAL_ADDR_LEN+1, addr->uaddr); /* increment/wrap next_index */ addrs->next_index = (addrs->next_index + 1) % NFS41_ADDRS_PER_SERVER; /* update addrs.count if necessary */ if (addrs->addrs.count < addrs->next_index) addrs->addrs.count = addrs->next_index; dprintf(SRVLVL, "server_addrs_add() added new addr '%s'.\n", addr->uaddr); } ReleaseSRWLockExclusive(&addrs->lock); }
VOID APPLICATION_INFO::ShutDownApplication() { APPLICATION* pApplication = NULL; BOOL fLockAcquired = FALSE; // pApplication can be NULL due to app_offline if (m_pApplication != NULL) { AcquireSRWLockExclusive(&m_srwLock); fLockAcquired = TRUE; if (m_pApplication != NULL) { pApplication = m_pApplication; // Set m_pApplication to NULL first to prevent anyone from using it m_pApplication = NULL; pApplication->ShutDown(); pApplication->DereferenceApplication(); } if (fLockAcquired) { ReleaseSRWLockExclusive(&m_srwLock); } } }
static int open_unlock_delegate( IN nfs41_open_state *open, IN const nfs41_lock_state *input) { struct list_entry *entry; int status = ERROR_NOT_LOCKED; AcquireSRWLockExclusive(&open->lock); /* find lock state that matches this range */ entry = list_search(&open->locks.list, input, lock_range_cmp); if (entry) { nfs41_lock_state *lock = lock_entry(entry); if (lock->delegated) { /* if the lock was delegated, remove/free it and return success */ list_remove(entry); free(lock); status = NO_ERROR; } else status = ERROR_LOCKED; } ReleaseSRWLockExclusive(&open->lock); return status; }
int pthread_rwlock_unlock(pthread_rwlock_t *l) { #ifndef PTHREAD_WIN_XP_SYNC void *state = *(void **)l; if (state == (void *) 1) { /* Known to be an exclusive lock */ ReleaseSRWLockExclusive(l); } else { /* A shared unlock will work */ ReleaseSRWLockShared(l); } #else WaitForSingleObject(l->mutex,INFINITE); if(l->reader_count < 0) // Known to be an exclusive lock { l->reader_count = 0; if(l->nb_waiting_writer) // writter have the priority ReleaseSemaphore(l->sema_write,1,NULL); // Wakeup one writer else if(l->nb_waiting_reader) ReleaseSemaphore(l->sema_read,l->nb_waiting_reader,NULL); // Wake up all readers } else if(!--(l->reader_count) && l->nb_waiting_writer) // maybe wake up one writer ReleaseSemaphore(l->sema_write,1,NULL); ReleaseMutex(l->mutex); #endif return 0; }
int rwmutex_wrunlock(XQ_rwmutex_t *m) { #if _WIN32_WINNT >= 0x0700 ReleaseSRWLockExclusive(m); #endif return 0; }
static int cache_insert( struct idmap_cache *cache, const struct idmap_lookup *lookup, const struct list_entry *src) { struct list_entry *entry; int status = NO_ERROR; AcquireSRWLockExclusive(&cache->lock); /* search for an existing match */ entry = list_search(&cache->head, lookup->value, lookup->compare); if (entry) { /* overwrite the existing entry with the new results */ cache->ops->entry_copy(entry, src); goto out; } /* initialize a new entry and add it to the list */ entry = cache->ops->entry_alloc(); if (entry == NULL) { status = GetLastError(); goto out; } cache->ops->entry_copy(entry, src); list_add_head(&cache->head, entry); out: ReleaseSRWLockExclusive(&cache->lock); return status; }
static int virtualfs_text_llseek(struct file *f, loff_t offset, loff_t *newoffset, int whence) { AcquireSRWLockExclusive(&f->rw_lock); struct virtualfs_text *file = (struct virtualfs_text *)f; loff_t target; int r; switch (whence) { case SEEK_SET: target = offset; break; case SEEK_CUR: target = file->position + offset; break; case SEEK_END: target = file->textlen - offset; break; default: r = -L_EINVAL; goto out; } if (target >= 0 && target < file->textlen) { file->position = (int)target; *newoffset = target; r = 0; } else r = -L_EINVAL; out: ReleaseSRWLockExclusive(&f->rw_lock); return r; }
void SimpleRWLock::unlock() { int& state = s.getRef(); dassert( state == -1 ); state++; tid = 0xffffffff; ReleaseSRWLockExclusive(&_lock); }
static int winfs_llseek(struct file *f, loff_t offset, loff_t *newoffset, int whence) { struct winfs_file *winfile = (struct winfs_file *) f; DWORD dwMoveMethod; if (whence == SEEK_SET) dwMoveMethod = FILE_BEGIN; else if (whence == SEEK_CUR) dwMoveMethod = FILE_CURRENT; else if (whence == SEEK_END) dwMoveMethod = FILE_END; else return -L_EINVAL; AcquireSRWLockShared(&f->rw_lock); WaitForSingleObject(winfile->fp_mutex, INFINITE); LARGE_INTEGER liDistanceToMove, liNewFilePointer; liDistanceToMove.QuadPart = offset; SetFilePointerEx(winfile->handle, liDistanceToMove, &liNewFilePointer, dwMoveMethod); *newoffset = liNewFilePointer.QuadPart; if (whence == SEEK_SET && offset == 0) { /* TODO: Currently we don't know if it is a directory, it's no harm to do this anyway */ winfile->restart_scan = 1; } ReleaseMutex(winfile->fp_mutex); ReleaseSRWLockExclusive(&f->rw_lock); return 0; }
void __MCF_CRT_TlsThreadCleanup(){ ThreadMap *const pMap = TlsGetValue(g_dwTlsIndex); if(pMap){ TlsObject *pObject = pMap->pLastByThread; while(pObject){ TlsKey *const pKey = pObject->pKey; AcquireSRWLockExclusive(&(pKey->srwLock)); { if(pKey->pLastByKey == pObject){ pKey->pLastByKey = pObject->pPrevByKey; } } ReleaseSRWLockExclusive(&(pKey->srwLock)); if(pKey->pfnCallback){ (*pKey->pfnCallback)(pObject->nValue); } TlsObject *const pTemp = pObject->pPrevByThread; free(pObject); pObject = pTemp; } free(pMap); TlsSetValue(g_dwTlsIndex, nullptr); } __MCF_CRT_RunEmutlsDtors(); }
bool MCF_CRT_TlsFreeKey(void *pTlsKey){ TlsKey *const pKey = pTlsKey; if(!pKey){ SetLastError(ERROR_INVALID_PARAMETER); return false; } AcquireSRWLockExclusive(&g_csKeyMutex); { MCF_AvlDetach((MCF_AvlNodeHeader *)pKey); } ReleaseSRWLockExclusive(&g_csKeyMutex); TlsObject *pObject = pKey->pLastByKey; while(pObject){ ThreadMap *const pMap = pObject->pMap; AcquireSRWLockExclusive(&(pMap->srwLock)); { TlsObject *const pPrev = pObject->pPrevByThread; TlsObject *const pNext = pObject->pNextByThread; if(pPrev){ pPrev->pNextByThread = pNext; } if(pNext){ pNext->pPrevByThread = pPrev; } if(pMap->pLastByThread == pObject){ pMap->pLastByThread = pObject->pPrevByThread; } } ReleaseSRWLockExclusive(&(pMap->srwLock)); if(pKey->pfnCallback){ (*pKey->pfnCallback)(pObject->nValue); } TlsObject *const pTemp = pObject->pPrevByKey; free(pObject); pObject = pTemp; } free(pKey); return true; }
/// <summary> /// Unlocks for writing (Call if thread acquired a write lock). /// </summary> void RWLock::UnlockWrite() { #ifdef PLATFORM_WIN ReleaseSRWLockExclusive( &mRwlock ); #else pthread_rwlock_unlock( &mRwlock ); #endif }
int mutex_unlock(struct mutex_handle *mutex) { struct mutex_priv *priv = (struct mutex_priv *)mutex->priv; ReleaseSRWLockExclusive(&priv->lock); return 0; }
int belle_sip_mutex_unlock(belle_sip_mutex_t * hMutex) { #ifdef BELLE_SIP_WINDOWS_DESKTOP ReleaseMutex(*hMutex); #else ReleaseSRWLockExclusive(hMutex); #endif return 0; }
void shared_mutex::unlock() { bool ex = d->ex; if(ex) { d->ex=false; ReleaseSRWLockExclusive(&d->m); } else ReleaseSRWLockShared(&d->m); }
int ts_tree_add(ts_tree_t* ts_tree, ts_tree_node_t* node, uintptr_t key) { int r; AcquireSRWLockExclusive(&ts_tree->lock); r = tree_add(&ts_tree->tree, &node->tree_node, key); ReleaseSRWLockExclusive(&ts_tree->lock); return r; }
void ui::remove(char type) { AcquireSRWLockExclusive(&uilock); delete elements[type]; elements.erase(type); ReleaseSRWLockExclusive(&uilock); }
//worker thread DWORD WINAPI WorkerThread(LPVOID lpParam) { CSocketSever* pSocketServer = (CSocketSever*)lpParam; int i = 0; fd_set fdread; struct timeval tv = {0, 50}; char szMessage[nMsgSize]; memset(szMessage, 0, nMsgSize); while(TRUE) { // set fdread初始化为空 FD_ZERO(&fdread); for(i = 0; i < pSocketServer->m_iTotalConn; i++) //将client socket加入fdread set FD_SET(pSocketServer->m_ClientSocketArr[i], &fdread); int ret = select(0, &fdread, NULL, NULL, &tv); if (ret == 0) { // Time expired continue; } for(i = 0; i < pSocketServer->m_iTotalConn; i++) { if( FD_ISSET( pSocketServer->m_ClientSocketArr[i], &fdread ) ) { memset(szMessage, 0, nMsgSize); ret = recv(pSocketServer->m_ClientSocketArr[i], szMessage, nMsgSize,0); if ( ret == 0 || ( ret == SOCKET_ERROR && WSAGetLastError() ) ) { //Client socket closed cout << "Client socket " << pSocketServer->m_ClientSocketArr[i] << "closed." << endl; closesocket( pSocketServer->m_ClientSocketArr[i]); pSocketServer->m_ClientSocketArr[i--] = pSocketServer->m_ClientSocketArr[--pSocketServer->m_iTotalConn]; } else { string heart = szMessage; if (heart.find("心跳包") != heart.npos) { //cout << szMessage << endl; continue; } //接入业务处理部分 g_pos = strlen(LogBuffer); AcquireSRWLockExclusive(&srwTest); sprintf(LogBuffer + g_pos, "%s\n", szMessage); ReleaseSRWLockExclusive(&srwTest); pSocketServer->m_pRTHandleData->GetClientSocket(pSocketServer->m_ClientSocketArr[i]); pSocketServer->m_pRTHandleData->HandleData(szMessage, ret); } } } } return 0; }
int __bctbx_WIN_mutex_unlock(bctbx_mutex_t * hMutex) { #ifdef BCTBX_WINDOWS_DESKTOP ReleaseMutex(*hMutex); #else ReleaseSRWLockExclusive(hMutex); #endif return 0; }
FSP_API BOOLEAN FspFileSystemAcquireDirectoryBuffer(PVOID *PDirBuffer, BOOLEAN Reset, PNTSTATUS PResult) { FSP_FILE_SYSTEM_DIRECTORY_BUFFER *DirBuffer = *PDirBuffer; MemoryBarrier(); if (0 == DirBuffer) { static SRWLOCK CreateLock = SRWLOCK_INIT; FSP_FILE_SYSTEM_DIRECTORY_BUFFER *NewDirBuffer; NewDirBuffer = MemAlloc(sizeof *NewDirBuffer); if (0 == NewDirBuffer) RETURN(STATUS_INSUFFICIENT_RESOURCES, FALSE); memset(NewDirBuffer, 0, sizeof *NewDirBuffer); InitializeSRWLock(&NewDirBuffer->Lock); AcquireSRWLockExclusive(&NewDirBuffer->Lock); AcquireSRWLockExclusive(&CreateLock); DirBuffer = *PDirBuffer; MemoryBarrier(); if (0 == DirBuffer) *PDirBuffer = DirBuffer = NewDirBuffer; ReleaseSRWLockExclusive(&CreateLock); if (DirBuffer == NewDirBuffer) RETURN(STATUS_SUCCESS, TRUE); ReleaseSRWLockExclusive(&NewDirBuffer->Lock); MemFree(NewDirBuffer); } if (Reset) { AcquireSRWLockExclusive(&DirBuffer->Lock); DirBuffer->LoMark = 0; DirBuffer->HiMark = DirBuffer->Capacity; RETURN(STATUS_SUCCESS, TRUE); } RETURN(STATUS_SUCCESS, FALSE); }
bool wait() { BOOL result = SleepConditionVariableSRW( &condition, &lock, INFINITE, 0 ); bool ret = false; if( result == TRUE ) { down(); ReleaseSRWLockExclusive( &lock ); ret = true; } return ret; }
DWORD QueueDestroy (QUEUE_OBJECT *q) { /* Free all the resources created by QueueInitialize */ AcquireSRWLockExclusive (&q->qGuard); free (q->msgArray); q->msgArray = NULL; ReleaseSRWLockExclusive (&(q->qGuard)); return 0; }
static size_t virtualfs_text_read(struct file *f, void *buf, size_t count) { AcquireSRWLockExclusive(&f->rw_lock); struct virtualfs_text *file = (struct virtualfs_text *)f; int read_count = (int)min(count, (size_t)(file->textlen - file->position)); memcpy(buf, file->text + file->position, read_count); file->position += read_count; ReleaseSRWLockExclusive(&f->rw_lock); return read_count; }
void RWLock::WriteUnlockInternal() { #ifdef XP_WIN ReleaseSRWLockExclusive(NativeHandle(mRWLock)); #else MOZ_RELEASE_ASSERT(pthread_rwlock_unlock(NativeHandle(mRWLock)) == 0, "pthread_rwlock_unlock failed"); #endif }
void RWLockBase::RemoveOwner() { AcquireSRWLockExclusive(&ownerSetLock_); auto count = owners_.erase(GetCurrentThreadId()); if ( count != 1) { Common::Assert::CodingError("Trying to release a RWLock not acquired on this thread"); } ReleaseSRWLockExclusive(&ownerSetLock_); }
FSP_API VOID FspFileSystemReleaseDirectoryBuffer(PVOID *PDirBuffer) { /* assume that FspFileSystemAcquireDirectoryBuffer has been called */ FSP_FILE_SYSTEM_DIRECTORY_BUFFER *DirBuffer = *PDirBuffer; FspFileSystemSortDirectoryBuffer(DirBuffer); ReleaseSRWLockExclusive(&DirBuffer->Lock); }