void gcore::RWLock::writeUnlock() { details::RWL_WIN32 *rwl = (details::RWL_WIN32*)mData; LeaveCriticalSection(&(rwl->access)); }
BOOL WINAPIV WSAPostApiNotify( IN INT NotificationCode, OUT LPVOID ReturnCode, IN LPSTR LibraryName, ...) /*++ PostApiNotify() Function Description: Like PreApiNotify, builds a string and passes it, along with information about the call, to a handler function. Arguments: NotificationCode -- specifies which API function called us. ReturnCode -- a generic pointer to the return value of the API function. ... -- variable number argument list. These are pointers to the actual parameters of the API functions. Return Value: Returns value is currently meaningless. --*/ { va_list vl; // used for variable arg-list parsing Cstack_c *ThreadCstack; // the Cstack_c object for this thread int Index = 0; // index into string we are creating int Counter; // counter we pop off the cstack LPFNDTHANDLER HdlFunc; // pointer to handler function int OriginalError; // any pending error is saved if (OutputStyle==NO_OUTPUT) return FALSE; OriginalError = GetLastError(); EnterCriticalSection(&CrSec); // Wait until it's ok to send output. WaitForSingleObject(TextOutEvent, INFINITE); va_start(vl, LibraryName); // Get the cstack object from TLS, pop the Counter. ThreadCstack = (Cstack_c *) TlsGetValue(TlsIndex); if (!ThreadCstack){ ThreadCstack = new Cstack_c(); TlsSetValue(TlsIndex, (LPVOID)ThreadCstack); StringCchPrintf(Buffer, TEXT_LEN-1, "0x%X Foriegn thread\n", GetCurrentThreadId()); DTTextOut(LogFileHandle, Buffer, OutputStyle); } //if ThreadCstack->CPop(Counter); Index = StringCchPrintf(Buffer, TEXT_LEN-1, "Function Call: %d ", Counter); // Set the error to what it originally was. SetLastError(OriginalError); // Call the appropriate handling function, output the buffer. if ((NotificationCode < MAX_DTCODE) && HdlFuncTable[NotificationCode]) { HdlFunc = HdlFuncTable[NotificationCode]; (*HdlFunc)(vl, ReturnCode, LibraryName, Buffer, Index, TEXT_LEN, FALSE); } else { StringCchPrintf(Buffer + Index, TEXT_LEN - Index - 1, "Unknown function returned!\r\n"); DTTextOut(LogFileHandle, Buffer, OutputStyle); } LeaveCriticalSection(&CrSec); // In case the error has changed since the handler returned, we // want to set it back. SetLastError(OriginalError); return(FALSE); } // WSAPostApiNotify()
BOOL IPCKDGateWayVistor::SendKcxpMsg( char *pCmd ) { EnterCriticalSection(&m_caSendMsgLock); g_pLog->WriteRunLog(KCXP_MODE, LOG_DEBUG, pCmd); int iRetCode = KCBP_MSG_OK; char szTemp[512] = {0}; if (NULL == m_pKcxpConn) { g_pLog->WriteRunLog(KCXP_MODE, LOG_DEBUG, "获取KCXP连接失败!"); LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } // 发送消息 try { // 解析命令 g_pParseKcbpLog->ParseCmd(pCmd); // 向KCXP发送命令 BOOL bRet = g_pParseKcbpLog->ExecSingleCmd(); // 清空日志解析,便于下一次操作 g_pParseKcbpLog->Clean(); if (FALSE != bRet) { // 获取执行结果 int nRow = 0; if ((iRetCode = m_pKcxpConn->RsOpen()) == KCBP_MSG_OK) { // 获取结果集行数,注意行数是包括标题的,因此行数要减1 m_pKcxpConn->RsGetRowNum(&nRow); if (nRow>1) { m_nRowNum = nRow - 1; } else { g_pLog->WriteRunLogEx(__FILE__,__LINE__,"结果集返回行数异常!"); m_nRowNum = 0; LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } if ((iRetCode = m_pKcxpConn->RsFetchRow()) == KCBP_MSG_OK) { if ((iRetCode = m_pKcxpConn->RsGetCol(1, szTemp)) == KCBP_MSG_OK) { if ((iRetCode = m_pKcxpConn->RsGetCol(2, szTemp)) == KCBP_MSG_OK) { if(strcmp(szTemp,"0") != 0) { iRetCode = m_pKcxpConn->RsGetCol(3, szTemp); g_pLog->WriteRunLogEx(__FILE__,__LINE__, "获取结果集列信息失败,ERRCODE = %ld", iRetCode); LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } } } else { g_pLog->WriteRunLogEx(__FILE__,__LINE__, "获取结果集列信息失败,ERRCODE = %ld", iRetCode); LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } } } else { g_pLog->WriteRunLogEx(__FILE__,__LINE__,"打开结果集失败,ERRCODE = %ld", iRetCode); LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } } else { LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } } catch(...) { g_pLog->WriteRunLog(KCXP_MODE, LOG_DEBUG, "LBM调用异常!"); LeaveCriticalSection(&m_caSendMsgLock); return FALSE; } LeaveCriticalSection(&m_caSendMsgLock); return TRUE; }
void MutexBase::unlock() { ASSERT(m_mutex.m_recursionCount); --m_mutex.m_recursionCount; LeaveCriticalSection(&m_mutex.m_internalMutex); }
int glthread_cond_timedwait_func (gl_cond_t *cond, gl_lock_t *lock, struct timespec *abstime) { struct timeval currtime; gettimeofday (&currtime, NULL); if (currtime.tv_sec > abstime->tv_sec || (currtime.tv_sec == abstime->tv_sec && currtime.tv_usec * 1000 >= abstime->tv_nsec)) return ETIMEDOUT; if (!cond->guard.done) { if (InterlockedIncrement (&cond->guard.started) == 0) /* This thread is the first one to need this condition variable. Initialize it. */ glthread_cond_init (cond); else /* Yield the CPU while waiting for another thread to finish initializing this condition variable. */ while (!cond->guard.done) Sleep (0); } EnterCriticalSection (&cond->lock); { struct gl_waitqueue_element *elt = gl_waitqueue_add (&cond->waiters); LeaveCriticalSection (&cond->lock); if (elt == NULL) { /* Allocation failure. Weird. */ return EAGAIN; } else { HANDLE event = elt->event; int err; DWORD timeout; DWORD result; /* Now release the lock and let any other thread take it. */ err = glthread_lock_unlock (lock); if (err != 0) { EnterCriticalSection (&cond->lock); gl_waitqueue_remove (&cond->waiters, elt); LeaveCriticalSection (&cond->lock); CloseHandle (event); free (elt); return err; } /* POSIX says: "If another thread is able to acquire the mutex after the about-to-block thread has released it, then a subsequent call to pthread_cond_broadcast() or pthread_cond_signal() in that thread shall behave as if it were issued after the about-to-block thread has blocked." This is fulfilled here, because the thread signalling is done through SetEvent, not PulseEvent. */ /* Wait until another thread signals this event or until the abstime passes. */ gettimeofday (&currtime, NULL); if (currtime.tv_sec > abstime->tv_sec) timeout = 0; else { unsigned long seconds = abstime->tv_sec - currtime.tv_sec; timeout = seconds * 1000; if (timeout / 1000 != seconds) /* overflow? */ timeout = INFINITE; else { long milliseconds = abstime->tv_nsec / 1000000 - currtime.tv_usec / 1000; if (milliseconds >= 0) { timeout += milliseconds; if (timeout < milliseconds) /* overflow? */ timeout = INFINITE; } else { if (timeout >= - milliseconds) timeout -= (- milliseconds); else timeout = 0; } } } result = WaitForSingleObject (event, timeout); if (result == WAIT_FAILED) abort (); if (result == WAIT_TIMEOUT) { EnterCriticalSection (&cond->lock); if (gl_waitqueue_remove (&cond->waiters, elt)) { /* The event was not signaled between the WaitForSingleObject call and the EnterCriticalSection call. */ if (!(WaitForSingleObject (event, 0) == WAIT_TIMEOUT)) abort (); } else { /* The event was signaled between the WaitForSingleObject call and the EnterCriticalSection call. */ if (!(WaitForSingleObject (event, 0) == WAIT_OBJECT_0)) abort (); /* Produce the right return value. */ result = WAIT_OBJECT_0; } LeaveCriticalSection (&cond->lock); } else { /* The thread which signalled the event already did the bookkeeping: removed us from the waiters. */ } CloseHandle (event); free (elt); /* Take the lock again. It does not matter whether this is done before or after the bookkeeping for WAIT_TIMEOUT. */ err = glthread_lock_lock (lock); return (err ? err : result == WAIT_OBJECT_0 ? 0 : result == WAIT_TIMEOUT ? ETIMEDOUT : /* WAIT_FAILED shouldn't happen */ EAGAIN); } } }
static DWORD WINAPI ProcessMsgThread(LPVOID lpParam) { IDirectMusicPerformance8Impl* This = lpParam; DWORD timeOut = INFINITE; MSG msg; HRESULT hr; REFERENCE_TIME rtCurTime; DMUS_PMSGItem* it = NULL; DMUS_PMSGItem* cur = NULL; DMUS_PMSGItem* it_next = NULL; while (TRUE) { DWORD dwDec = This->rtLatencyTime + This->dwBumperLength; if (timeOut > 0) MsgWaitForMultipleObjects(0, NULL, FALSE, timeOut, QS_POSTMESSAGE|QS_SENDMESSAGE|QS_TIMER); timeOut = INFINITE; EnterCriticalSection(&This->safe); hr = IDirectMusicPerformance8_GetTime(&This->IDirectMusicPerformance8_iface, &rtCurTime, NULL); if (FAILED(hr)) { goto outrefresh; } for (it = This->imm_head; NULL != it; ) { it_next = it->next; cur = ProceedMsg(This, it); HeapFree(GetProcessHeap(), 0, cur); it = it_next; } for (it = This->head; NULL != it && it->rtItemTime < rtCurTime + dwDec; ) { it_next = it->next; cur = ProceedMsg(This, it); HeapFree(GetProcessHeap(), 0, cur); it = it_next; } if (NULL != it) { timeOut = ( it->rtItemTime - rtCurTime ) + This->rtLatencyTime; } outrefresh: LeaveCriticalSection(&This->safe); while (PeekMessageA(&msg, NULL, 0, 0, PM_REMOVE)) { /** if hwnd we suppose that is a windows event ... */ if (NULL != msg.hwnd) { TranslateMessage(&msg); DispatchMessageA(&msg); } else { switch (msg.message) { case WM_QUIT: case PROCESSMSG_EXIT: goto outofthread; case PROCESSMSG_START: break; case PROCESSMSG_ADD: break; case PROCESSMSG_REMOVE: break; default: ERR("Unhandled message %u. Critical Path\n", msg.message); break; } } } /** here we should run a little of current AudioPath */ } outofthread: TRACE("(%p): Exiting\n", This); return 0; }
void vboxDispCmSessionCtxRemove(PVBOXDISPCM_SESSION pSession, PVBOXWDDMDISP_CONTEXT pContext) { EnterCriticalSection(&pSession->CritSect); vboxDispCmSessionCtxRemoveLocked(pSession, pContext); LeaveCriticalSection(&pSession->CritSect); }
// Thread that gets and sends CPU usage static DWORD WINAPI mainThread(LPVOID args) { UNREFERENCED_PARAMETER(args); bool usbOk = true; byte checkCounter = 0; byte pokeCounter = 0; while(1) { // If exit event is set then exit thread, otherwise wait for timeout and do the usual CPU stuff if(WaitForSingleObject(hThreadExitEvent, SAMPLERATE) != WAIT_TIMEOUT) break; // Get usage byte cpuUsage = getCPUUsage(); EnterCriticalSection(&cs); // Store sample sample.samples[sample.idx++] = cpuUsage; if(sample.idx >= sample.count) sample.idx = 0; // Get average uint avg = 0; for(uint i=0;i<sample.count;i++) avg += sample.samples[i]; avg /= sample.count; LeaveCriticalSection(&cs); cpuUsage = (byte)avg; // Show CPU usage actions_showCPULoad(cpuUsage); // Constantly searching for USB devices takes up a lot of CPU time, // so here we only check for new device every so often. if(!usbOk) { if(++checkCounter < (USB_CHECK_INTERVAL / SAMPLERATE)) continue; checkCounter = 0; } // Send to USB if((usbOk = checkDevice(usbOk))) { if(device_get()->ledMode == MODE_CPU_USAGE) { // Workout colour s_rgbVal colour; getCPUColour(cpuUsage, &colour); // Send colour usbOk = device_setColour(&colour); } else { // Just send pokes if not in CPU usage mode, // this sees if the device is still connected if(++pokeCounter >= (USB_POKE_INTERVAL / SAMPLERATE)) { pokeCounter = 0; usbOk = device_poke(); } } } } return EXIT_SUCCESS; }
/* NB: _get_commstatus also produces most of the events consumed by _wait_on_mask(). Exceptions: * - SERIAL_EV_RXFLAG: FIXME: once EventChar supported * */ static BOOL _get_commstatus(WINPR_COMM *pComm, SERIAL_STATUS *pCommstatus) { /* http://msdn.microsoft.com/en-us/library/jj673022%28v=vs.85%29.aspx */ struct serial_icounter_struct currentCounters; /* NB: ensure to leave the critical section before to return */ EnterCriticalSection(&pComm->EventsLock); ZeroMemory(pCommstatus, sizeof(SERIAL_STATUS)); ZeroMemory(¤tCounters, sizeof(struct serial_icounter_struct)); if (ioctl(pComm->fd, TIOCGICOUNT, ¤tCounters) < 0) { CommLog_Print(WLOG_WARN, "TIOCGICOUNT ioctl failed, errno=[%d] %s", errno, strerror(errno)); SetLastError(ERROR_IO_DEVICE); LeaveCriticalSection(&pComm->EventsLock); return FALSE; } /* NB: preferred below (currentCounters.* != pComm->counters.*) over (currentCounters.* > pComm->counters.*) thinking the counters can loop */ /* Errors */ if (currentCounters.buf_overrun != pComm->counters.buf_overrun) { pCommstatus->Errors |= SERIAL_ERROR_QUEUEOVERRUN; } if (currentCounters.overrun != pComm->counters.overrun) { pCommstatus->Errors |= SERIAL_ERROR_OVERRUN; pComm->PendingEvents |= SERIAL_EV_ERR; } if (currentCounters.brk != pComm->counters.brk) { pCommstatus->Errors |= SERIAL_ERROR_BREAK; pComm->PendingEvents |= SERIAL_EV_BREAK; } if (currentCounters.parity != pComm->counters.parity) { pCommstatus->Errors |= SERIAL_ERROR_PARITY; pComm->PendingEvents |= SERIAL_EV_ERR; } if (currentCounters.frame != pComm->counters.frame) { pCommstatus->Errors |= SERIAL_ERROR_FRAMING; pComm->PendingEvents |= SERIAL_EV_ERR; } /* HoldReasons */ /* TODO: SERIAL_TX_WAITING_FOR_CTS */ /* TODO: SERIAL_TX_WAITING_FOR_DSR */ /* TODO: SERIAL_TX_WAITING_FOR_DCD */ /* TODO: SERIAL_TX_WAITING_FOR_XON */ /* TODO: SERIAL_TX_WAITING_ON_BREAK, see LCR's bit 6 */ /* TODO: SERIAL_TX_WAITING_XOFF_SENT */ /* AmountInInQueue */ if (ioctl(pComm->fd, TIOCINQ, &(pCommstatus->AmountInInQueue)) < 0) { CommLog_Print(WLOG_WARN, "TIOCINQ ioctl failed, errno=[%d] %s", errno, strerror(errno)); SetLastError(ERROR_IO_DEVICE); LeaveCriticalSection(&pComm->EventsLock); return FALSE; } /* AmountInOutQueue */ if (ioctl(pComm->fd, TIOCOUTQ, &(pCommstatus->AmountInOutQueue)) < 0) { CommLog_Print(WLOG_WARN, "TIOCOUTQ ioctl failed, errno=[%d] %s", errno, strerror(errno)); SetLastError(ERROR_IO_DEVICE); LeaveCriticalSection(&pComm->EventsLock); return FALSE; } /* BOOLEAN EofReceived; FIXME: once EofChar supported */ /* BOOLEAN WaitForImmediate; TODO: once IOCTL_SERIAL_IMMEDIATE_CHAR fully supported */ /* other events based on counters */ if (currentCounters.rx != pComm->counters.rx) { pComm->PendingEvents |= SERIAL_EV_RXCHAR; } if ((currentCounters.tx != pComm->counters.tx) && /* at least a transmission occurred AND ...*/ (pCommstatus->AmountInOutQueue == 0)) /* output bufer is now empty */ { pComm->PendingEvents |= SERIAL_EV_TXEMPTY; } else { /* FIXME: "now empty" from the specs is ambiguous, need to track previous completed transmission? */ pComm->PendingEvents &= ~SERIAL_EV_TXEMPTY; } if (currentCounters.cts != pComm->counters.cts) { pComm->PendingEvents |= SERIAL_EV_CTS; } if (currentCounters.dsr != pComm->counters.dsr) { pComm->PendingEvents |= SERIAL_EV_DSR; } if (currentCounters.dcd != pComm->counters.dcd) { pComm->PendingEvents |= SERIAL_EV_RLSD; } if (currentCounters.rng != pComm->counters.rng) { pComm->PendingEvents |= SERIAL_EV_RING; } if (pCommstatus->AmountInInQueue > (0.8 * N_TTY_BUF_SIZE)) { pComm->PendingEvents |= SERIAL_EV_RX80FULL; } else { /* FIXME: "is 80 percent full" from the specs is ambiguous, need to track when it previously occured? */ pComm->PendingEvents &= ~SERIAL_EV_RX80FULL; } pComm->counters = currentCounters; LeaveCriticalSection(&pComm->EventsLock); return TRUE; }
intptr_t CustomMemFree(void* allocator, void* pointer) { intptr_t retval = g_origMemFree(allocator, pointer); /*if (pointer != nullptr && *g_unsafePointerLoc) { size_t allocSize = 0; if (g_unsafeStack.size() == 0) { { std::unique_lock<std::mutex> lock(g_allocMutex); auto it = g_allocData.find(pointer); if (it != g_allocData.end()) { allocSize = it->second; g_allocData.erase(it); } } if (**(void***)g_unsafePointerLoc >= pointer && **(void***)g_unsafePointerLoc < ((char*)pointer + allocSize)) { std::vector<uintptr_t> stackList(96); uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); for (int i = 0; i < stackList.size(); i++) { stackList[i] = stack[i]; } g_unsafeStack = stackList; } } }*/ if (/*!g_didLevelFree && */pointer != nullptr) { //std::unique_lock<std::mutex> lock(g_allocMutex); EnterCriticalSection(&g_allocCS); uintptr_t ptr = (uintptr_t)pointer; auto it = g_allocData.find(ptr); if (it != g_allocData.end()) { size_t allocSize = it->second; g_allocStuff.erase({ ptr, allocSize }); if (allocSize >= 8) { if (*(uintptr_t*)ptr == 0x141826A10) { uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); stack += (32 / 8); std::array<uintptr_t, 16> stacky; memcpy(stacky.data(), stack, 16 * 8); { g_freeThings.insert({ ptr, { allocSize, stacky } }); } } } /*static char* location = hook::pattern("4C 8D 0D ? ? ? ? 48 89 01 4C 89 81 80 00 00").count(1).get(0).get<char>(3); static char** g_collectionRoot = (char**)(location + *(int32_t*)location + 4); for (int i = 0; i < 0x950; i++) { if (g_collectionRoot[i]) { void* baad = *(void**)(g_collectionRoot[i] + 32); if (baad >= pointer && baad < ((char*)pointer + allocSize)) { atArray<char>* array = (atArray<char>*)(g_collectionRoot[i] + 128); trace("freed collection %s (%p-%p)\n", &array->Get(0), pointer, allocSize + (char*)pointer); uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); stack += (32 / 8); for (int i = 0; i < 16; i++) { trace("stack: %p\n", stack[i]); } } } }*/ /*if (g_inLevelFree) { if (allocSize != -1) { int stackIdx = g_stackIdx++; std::vector<uintptr_t> stackList(96); uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); for (int i = 0; i < stackList.size(); i++) { stackList[i] = stack[i]; } g_stacks[stackIdx] = stackList; trace("level free: %p-%p - stack idx: %d\n", pointer, (char*)pointer + allocSize, stackIdx); } }*/ g_allocData.erase(it); } LeaveCriticalSection(&g_allocCS); } return retval; }
void* CustomMemAlloc(void* allocator, intptr_t size, intptr_t align, int subAlloc) { void* ptr = g_origMemAlloc(allocator, size, align, subAlloc); /*if (*g_unsafePointerLoc >= ptr && *g_unsafePointerLoc < ((char*)ptr + size)) { #ifdef _DEBUG __debugbreak(); #endif assert(!"Tried to allocate over unsafe pointer!"); } if (*g_unsafePointerLoc) { void*** unsafePtrLoc = (void***)g_unsafePointerLoc; if (**unsafePtrLoc >= ptr && **unsafePtrLoc < ((char*)ptr + size)) { #ifdef _DEBUG __debugbreak(); #endif assert(!"Tried to allocate over unsafe pointer!"); } }*/ //memset(ptr, 0, size); if (subAlloc == 0) { uintptr_t ptr_ = (uintptr_t)ptr; //std::unique_lock<std::mutex> lock(g_allocMutex); EnterCriticalSection(&g_allocCS); g_allocData[ptr_] = size; /*auto first = g_freeThings.lower_bound(ptr_); auto second = g_freeThings.upper_bound(ptr_ + size); for (auto it = first; first != second; first++) { if (ptr_ >= it->first && ptr_ < (it->first + it->second.first)) { if (size == it->second.first) { trace("allocate over stacky!\n"); auto stacky = it->second.second; for (auto& entry : stacky) { trace("%p\n", entry); } trace("noooooooooo!\n"); } } }*/ //g_allocData[ptr_] = size; uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); stack += (32 / 8); std::array<uintptr_t, 16> stacky; memcpy(stacky.data(), stack, 16 * 8); g_allocStuff[{ ptr_, size }] = stacky; LeaveCriticalSection(&g_allocCS); } return ptr; }
int UnLockMutex(CyaSSL_Mutex* m) { LeaveCriticalSection(m); return 0; }
void _d_monitorexit(Object *h) { //printf("_d_monitorexit(%p)\n", h); assert(h->monitor); LeaveCriticalSection(MONPTR(h)); }
void gcore::Mutex::unlock() { LeaveCriticalSection((CRITICAL_SECTION*)&mData); }
int stresscpu_thread_mutex_unlock(stresscpu_thread_mutex_t *mtx) { LeaveCriticalSection(mtx->actual_mutex); return 0; }
/* * NB: see also: _set_wait_mask() */ static BOOL _wait_on_mask(WINPR_COMM *pComm, ULONG *pOutputMask) { assert(*pOutputMask == 0); EnterCriticalSection(&pComm->EventsLock); pComm->PendingEvents |= SERIAL_EV_FREERDP_WAITING; LeaveCriticalSection(&pComm->EventsLock); while (TRUE) { /* NB: EventsLock also used by _refresh_PendingEvents() */ if (!_refresh_PendingEvents(pComm)) { EnterCriticalSection(&pComm->EventsLock); pComm->PendingEvents &= ~SERIAL_EV_FREERDP_WAITING; LeaveCriticalSection(&pComm->EventsLock); return FALSE; } /* NB: ensure to leave the critical section before to return */ EnterCriticalSection(&pComm->EventsLock); if (pComm->PendingEvents & SERIAL_EV_FREERDP_STOP) { pComm->PendingEvents &= ~SERIAL_EV_FREERDP_STOP; /* pOutputMask must remain empty but should * not have been modified. * * http://msdn.microsoft.com/en-us/library/ff546805%28v=vs.85%29.aspx */ assert(*pOutputMask == 0); pComm->PendingEvents &= ~SERIAL_EV_FREERDP_WAITING; LeaveCriticalSection(&pComm->EventsLock); return TRUE; } _consume_event(pComm, pOutputMask, SERIAL_EV_RXCHAR); _consume_event(pComm, pOutputMask, SERIAL_EV_RXFLAG); _consume_event(pComm, pOutputMask, SERIAL_EV_TXEMPTY); _consume_event(pComm, pOutputMask, SERIAL_EV_CTS); _consume_event(pComm, pOutputMask, SERIAL_EV_DSR); _consume_event(pComm, pOutputMask, SERIAL_EV_RLSD); _consume_event(pComm, pOutputMask, SERIAL_EV_BREAK); _consume_event(pComm, pOutputMask, SERIAL_EV_ERR); _consume_event(pComm, pOutputMask, SERIAL_EV_RING ); _consume_event(pComm, pOutputMask, SERIAL_EV_RX80FULL); LeaveCriticalSection(&pComm->EventsLock); /* NOTE: PendingEvents can be modified from now on but * not pOutputMask */ if (*pOutputMask != 0) { /* at least an event occurred */ EnterCriticalSection(&pComm->EventsLock); pComm->PendingEvents &= ~SERIAL_EV_FREERDP_WAITING; LeaveCriticalSection(&pComm->EventsLock); return TRUE; } /* waiting for a modification of PendingEvents. * * NOTE: previously used a semaphore but used * sem_timedwait() anyway. Finally preferred a simpler * solution with Sleep() whithout the burden of the * semaphore initialization and destroying. */ Sleep(100); /* 100 ms */ } CommLog_Print(WLOG_WARN, "_wait_on_mask, unexpected return, WaitEventMask=0X%lX", pComm->WaitEventMask); EnterCriticalSection(&pComm->EventsLock); pComm->PendingEvents &= ~SERIAL_EV_FREERDP_WAITING; LeaveCriticalSection(&pComm->EventsLock); assert(FALSE); return FALSE; }
void CriticalSection::exit() const noexcept { LeaveCriticalSection ((CRITICAL_SECTION*) lock); }
BOOL VDE_IOControl( DWORD hOpenContext, DWORD dwCode, PBYTE pBufIn, DWORD dwLenIn, PBYTE pBufOut, DWORD dwLenOut, PDWORD pdwActualOut ) { SVEngineContext *pCtxt; BOOL bRet = TRUE; BYTE LocalBuffer[SVEARG_MAX_SIZE]; PBYTE pBufInLocal = (PBYTE)&LocalBuffer; pCtxt = SVE_get_context(); DEBUGMSG(VDE_ZONE_ENTER, (_T("[VDE] VDE_IOControl(0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x)\r\n"), hOpenContext, dwCode, pBufIn, dwLenIn, pBufOut, dwLenOut, pdwActualOut)); memset(pBufInLocal, 0, SVEARG_MAX_SIZE); if (dwLenIn > SVEARG_MAX_SIZE || !CeSafeCopyMemory(pBufInLocal, pBufIn, dwLenIn)) { RETAILMSG(ZONEMASK_ERROR, (_T("VDE_IOControl: Failed to create a local copy of parameters.\r\n")) ); return FALSE; } EnterCriticalSection(&pCtxt->csProc); switch(dwCode) { case IOCTL_POWER_CAPABILITIES: case IOCTL_POWER_GET: case IOCTL_POWER_QUERY: case IOCTL_POWER_SET: case IOCTL_REGISTER_POWER_RELATIONSHIP: break; case IOCTL_SVE_PM_SET_POWER_ON: SVE_video_engine_power_on(); break; case IOCTL_SVE_PM_SET_POWER_OFF: //if caller is not kernel mode, do not allow setting power state to off if (GetDirectCallerProcessId() != GetCurrentProcessId()){ return FALSE; } SVE_video_engine_power_off(); break; case IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE: case IOCTL_SVE_RSC_RELEASE_FIMD_INTERFACE: case IOCTL_SVE_RSC_REQUEST_FIMD_WIN0: case IOCTL_SVE_RSC_RELEASE_FIMD_WIN0: case IOCTL_SVE_RSC_REQUEST_FIMD_WIN1: case IOCTL_SVE_RSC_RELEASE_FIMD_WIN1: case IOCTL_SVE_RSC_REQUEST_FIMD_WIN2: case IOCTL_SVE_RSC_RELEASE_FIMD_WIN2: case IOCTL_SVE_RSC_REQUEST_FIMD_WIN3: case IOCTL_SVE_RSC_RELEASE_FIMD_WIN3: case IOCTL_SVE_RSC_REQUEST_FIMD_WIN4: case IOCTL_SVE_RSC_RELEASE_FIMD_WIN4: case IOCTL_SVE_RSC_REQUEST_POST: case IOCTL_SVE_RSC_RELEASE_POST: case IOCTL_SVE_RSC_REQUEST_ROTATOR: case IOCTL_SVE_RSC_RELEASE_ROTATOR: case IOCTL_SVE_RSC_REQUEST_TVSCALER_TVENCODER: case IOCTL_SVE_RSC_RELEASE_TVSCALER_TVENCODER: __try { bRet = SVE_Resource_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_FIMD_SET_INTERFACE_PARAM: case IOCTL_SVE_FIMD_SET_OUTPUT_RGBIF: case IOCTL_SVE_FIMD_SET_OUTPUT_TV: case IOCTL_SVE_FIMD_SET_OUTPUT_ENABLE: case IOCTL_SVE_FIMD_SET_OUTPUT_DISABLE: case IOCTL_SVE_FIMD_SET_WINDOW_MODE: case IOCTL_SVE_FIMD_SET_WINDOW_POSITION: case IOCTL_SVE_FIMD_SET_WINDOW_FRAMEBUFFER: case IOCTL_SVE_FIMD_SET_WINDOW_COLORMAP: case IOCTL_SVE_FIMD_SET_WINDOW_ENABLE: case IOCTL_SVE_FIMD_SET_WINDOW_DISABLE: case IOCTL_SVE_FIMD_SET_WINDOW_BLEND_DISABLE: case IOCTL_SVE_FIMD_SET_WINDOW_BLEND_COLORKEY: case IOCTL_SVE_FIMD_SET_WINDOW_BLEND_ALPHA: case IOCTL_SVE_FIMD_WAIT_FRAME_INTERRUPT: case IOCTL_SVE_FIMD_GET_OUTPUT_STATUS: case IOCTL_SVE_FIMD_GET_WINDOW_STATUS: __try { bRet = SVE_DispCon_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_POST_SET_PROCESSING_PARAM: case IOCTL_SVE_POST_SET_SOURCE_BUFFER: case IOCTL_SVE_POST_SET_NEXT_SOURCE_BUFFER: case IOCTL_SVE_POST_SET_DESTINATION_BUFFER: case IOCTL_SVE_POST_SET_NEXT_DESTINATION_BUFFER: case IOCTL_SVE_POST_SET_PROCESSING_START: case IOCTL_SVE_POST_SET_PROCESSING_STOP: case IOCTL_SVE_POST_WAIT_PROCESSING_DONE: case IOCTL_SVE_POST_GET_PROCESSING_STATUS: __try { bRet = SVE_Post_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_LOCALPATH_SET_WIN0_START: case IOCTL_SVE_LOCALPATH_SET_WIN0_STOP: case IOCTL_SVE_LOCALPATH_SET_WIN1_START: case IOCTL_SVE_LOCALPATH_SET_WIN1_STOP: case IOCTL_SVE_LOCALPATH_SET_WIN2_START: case IOCTL_SVE_LOCALPATH_SET_WIN2_STOP: __try { bRet = SVE_LocalPath_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_ROTATOR_SET_OPERATION_PARAM: case IOCTL_SVE_ROTATOR_SET_SOURCE_BUFFER: case IOCTL_SVE_ROTATOR_SET_DESTINATION_BUFFER: case IOCTL_SVE_ROTATOR_SET_OPERATION_START: case IOCTL_SVE_ROTATOR_SET_OPERATION_STOP: case IOCTL_SVE_ROTATOR_WAIT_OPERATION_DONE: case IOCTL_SVE_ROTATOR_GET_STATUS: __try { bRet = SVE_Rotator_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_TVSC_SET_PROCESSING_PARAM: case IOCTL_SVE_TVSC_SET_SOURCE_BUFFER: case IOCTL_SVE_TVSC_SET_NEXT_SOURCE_BUFFER: case IOCTL_SVE_TVSC_SET_DESTINATION_BUFFER: case IOCTL_SVE_TVSC_SET_NEXT_DESTINATION_BUFFER: case IOCTL_SVE_TVSC_SET_PROCESSING_START: case IOCTL_SVE_TVSC_SET_PROCESSING_STOP: case IOCTL_SVE_TVSC_WAIT_PROCESSING_DONE: case IOCTL_SVE_TVSC_GET_PROCESSING_STATUS: __try { bRet = SVE_TVScaler_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_TVENC_SET_INTERFACE_PARAM: case IOCTL_SVE_TVENC_SET_ENCODER_ON: case IOCTL_SVE_TVENC_SET_ENCODER_OFF: case IOCTL_SVE_TVENC_GET_INTERFACE_STATUS: __try { bRet = SVE_TVEncoder_API_Proc(hOpenContext, SVE_get_api_function_code(dwCode), pBufInLocal, dwLenIn, pBufOut, dwLenOut, pdwActualOut); } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_RSC_REQUEST_FIMD_INTERFACE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_FIMD_VSYNC_ENABLE: __try { Disp_VSync_Enable(); bRet=TRUE; } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_FIMD_VSYNC_ENABLE\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_FIMD_GET_FLIPSTATUS: __try { if(Disp_GetFlipStatus()) { bRet=TRUE; } else { bRet=FALSE; } } __except ( EXCEPTION_EXECUTE_HANDLER ) { RETAILMSG( 1, ( _T("VDE_IOControl: exception in IOCTL_SVE_FIMD_GET_FLIPSTATUS\n")) ); LeaveCriticalSection(&pCtxt->csProc); return FALSE; } break; case IOCTL_SVE_PM_GET_POWER_STATUS: default: VDE_ERR((_T("[VDE:ERR] VDE_IOControl() : Unknown IOCTL [0x%08x]\r\n"), dwCode)); SetLastError (ERROR_INVALID_ACCESS); bRet = FALSE; break; } LeaveCriticalSection(&pCtxt->csProc); DEBUGMSG(VDE_ZONE_ENTER, (_T("[VDE] --VDE_IOControl()\r\n"))); return bRet; }
void vboxDispCmSessionCtxAdd(PVBOXDISPCM_SESSION pSession, PVBOXWDDMDISP_CONTEXT pContext) { EnterCriticalSection(&pSession->CritSect); RTListAppend(&pSession->CtxList, &pContext->ListNode); LeaveCriticalSection(&pSession->CritSect); }
void Dispatcher::dispatch() { assert(GetCurrentThreadId() == threadId); NativeContext* context; for (;;) { if (firstResumingContext != nullptr) { context = firstResumingContext; firstResumingContext = context->next; break; } LARGE_INTEGER frequency; LARGE_INTEGER ticks; QueryPerformanceCounter(&ticks); QueryPerformanceFrequency(&frequency); uint64_t currentTime = ticks.QuadPart / (frequency.QuadPart / 1000); auto timerContextPair = timers.begin(); auto end = timers.end(); while (timerContextPair != end && timerContextPair->first <= currentTime) { pushContext(timerContextPair->second); timerContextPair = timers.erase(timerContextPair); } if (firstResumingContext != nullptr) { context = firstResumingContext; firstResumingContext = context->next; break; } DWORD timeout = timers.empty() ? INFINITE : static_cast<DWORD>(std::min(timers.begin()->first - currentTime, static_cast<uint64_t>(INFINITE - 1))); OVERLAPPED_ENTRY entry; ULONG actual = 0; if (GetQueuedCompletionStatusEx(completionPort, &entry, 1, &actual, timeout, TRUE) == TRUE) { if (entry.lpOverlapped == reinterpret_cast<LPOVERLAPPED>(remoteSpawnOverlapped)) { EnterCriticalSection(reinterpret_cast<LPCRITICAL_SECTION>(criticalSection)); assert(remoteNotificationSent); assert(!remoteSpawningProcedures.empty()); do { spawn(std::move(remoteSpawningProcedures.front())); remoteSpawningProcedures.pop(); } while (!remoteSpawningProcedures.empty()); remoteNotificationSent = false; LeaveCriticalSection(reinterpret_cast<LPCRITICAL_SECTION>(criticalSection)); continue; } context = reinterpret_cast<DispatcherContext*>(entry.lpOverlapped)->context; break; } DWORD lastError = GetLastError(); if (lastError == WAIT_TIMEOUT) { continue; } if (lastError != WAIT_IO_COMPLETION) { throw std::runtime_error("Dispatcher::dispatch, GetQueuedCompletionStatusEx failed, " + errorMessage(lastError)); } } if (context != currentContext) { currentContext = context; SwitchToFiber(context->fiber); } }
void IoBufferPool::unlock() { LeaveCriticalSection(&m_crit); }
void WINAPI pipe_reread_cb(void *ctx) { pipe_instance_t *pp; DWORD result; int failed; pp = (pipe_instance_t *)ctx; EnterCriticalSection(&pp->rcs); TRACE1(ENTER, "Entering pipe_reread_cb %p", ctx); failed = 0; if (pp->state != PIPE_STATE_CONNECTED) { TRACE0(NETWORK, "WARNING: not PIPE_STATE_CONNECTED"); } /* * Tear down and wire up read thread callback again. * This is probably inefficient. */ UnregisterWaitEx(pp->rwait, pp->revent); ResetEvent(pp->revent); /* XXX ReadFile() should do this for us? */ pp->rwait = NULL; /* * Post a new read request. Deal with fatal errors. */ result = ReadFile(pp->pipe, pp->rbuf, pp->rsize, NULL, &pp->rov); if (result == 0) { result = GetLastError(); if (result != ERROR_IO_PENDING) { TRACE1(ANY, "WARNING: pipe_reread_cb read returned %d", result); } if (result == ERROR_BROKEN_PIPE) { failed = 1; goto fail; } } /* * Now, and only now, do we kick off the read thread, in order * to avoid being preempted if the client disconnects. */ result = RegisterWaitForSingleObject(&(pp->rwait), pp->revent, pipe_read_cb, pp, INFINITE, WT_EXECUTEINIOTHREAD | WT_EXECUTEONLYONCE); if (result == 0) { result = GetLastError(); TRACE1(CONFIGURATION, "Error %u RegisterWaitForSingleObject()", result); failed = 1; } fail: /* * If a fatal error occurred, disconnect the pipe client, and * listen for a new connection on this instance. */ if (failed) { ResetEvent(pp->revent); QueueUserWorkItem( (LPTHREAD_START_ROUTINE)pipe_relisten_cb, (PVOID)pp, WT_EXECUTEINIOTHREAD); } out: TRACE0(ENTER, "Leaving pipe_reread_cb"); LeaveCriticalSection(&pp->rcs); }
int glthread_cond_wait_func (gl_cond_t *cond, gl_lock_t *lock) { if (!cond->guard.done) { if (InterlockedIncrement (&cond->guard.started) == 0) /* This thread is the first one to need this condition variable. Initialize it. */ glthread_cond_init (cond); else /* Yield the CPU while waiting for another thread to finish initializing this condition variable. */ while (!cond->guard.done) Sleep (0); } EnterCriticalSection (&cond->lock); { struct gl_waitqueue_element *elt = gl_waitqueue_add (&cond->waiters); LeaveCriticalSection (&cond->lock); if (elt == NULL) { /* Allocation failure. Weird. */ return EAGAIN; } else { HANDLE event = elt->event; int err; DWORD result; /* Now release the lock and let any other thread take it. */ err = glthread_lock_unlock (lock); if (err != 0) { EnterCriticalSection (&cond->lock); gl_waitqueue_remove (&cond->waiters, elt); LeaveCriticalSection (&cond->lock); CloseHandle (event); free (elt); return err; } /* POSIX says: "If another thread is able to acquire the mutex after the about-to-block thread has released it, then a subsequent call to pthread_cond_broadcast() or pthread_cond_signal() in that thread shall behave as if it were issued after the about-to-block thread has blocked." This is fulfilled here, because the thread signalling is done through SetEvent, not PulseEvent. */ /* Wait until another thread signals this event. */ result = WaitForSingleObject (event, INFINITE); if (result == WAIT_FAILED || result == WAIT_TIMEOUT) abort (); CloseHandle (event); free (elt); /* The thread which signalled the event already did the bookkeeping: removed us from the waiters. */ return glthread_lock_lock (lock); } } }
void CALLBACK pipe_read_cb(PVOID lpParameter, BOOLEAN TimerOrWaitFired) { struct rt_msghdr *rtm; pipe_instance_t *pp; DWORD result; DWORD nbytes; pp = (pipe_instance_t *)lpParameter; EnterCriticalSection(&pp->rcs); TRACE1(ENTER, "Entering pipe_read_cb %p", lpParameter); if (pp->state != PIPE_STATE_CONNECTED) { TRACE0(NETWORK, "WARNING: not PIPE_STATE_CONNECTED, bailing."); /* * XXX: Is something racy, or is it just me? * Try to avoid deadlocking by returning if we * got called when we weren't connected. */ goto out; } result = GetOverlappedResult(pp->pipe, &pp->rov, &nbytes, TRUE); if (result == 0) { result = GetLastError(); TRACE1(NETWORK, "WARNING: pipe_read_cb read returned %d", result); if (result == ERROR_BROKEN_PIPE) { /* * We must queue the new listen on a separate thread to * avoid infinite recursion. */ TRACE0(NETWORK, "Posting listen again."); ResetEvent(pp->revent); QueueUserWorkItem( (LPTHREAD_START_ROUTINE)pipe_relisten_cb, (PVOID)pp, WT_EXECUTEINIOTHREAD); goto out; } } TRACE1(NETWORK, "Read %d bytes from named pipe.", nbytes); /* * Perform sanity checks on input message. * XXX: We should use a more appropriate errno value. * We use -1 as ENOBUFS, etc are not part of the namespace. */ rtm = (struct rt_msghdr *)&pp->rbuf[0]; if (rtm->rtm_version != RTM_VERSION) { TRACE1(NETWORK, "Invalid rtm_version %d, dropping.", rtm->rtm_version); goto drop; } /* * Sanity check size. */ if (rtm->rtm_msglen > nbytes || nbytes < sizeof(struct rt_msghdr)) { TRACE1(NETWORK, "Invalid rtm_msglen %d, dropping.", rtm->rtm_msglen); rtm->rtm_errno = -1; goto drop; } if (rtm->rtm_pid == 0) { TRACE1(NETWORK, "Invalid rtm_pid %d, dropping.", rtm->rtm_pid); rtm->rtm_errno = -1; goto bounce; } switch (rtm->rtm_type) { case RTM_ADD: result = rtm_add_route(rtm, nbytes); if (result == 0) { TRACE0(NETWORK, "route added successfully"); } else { TRACE0(NETWORK, "failed to add route"); } rtm->rtm_errno = result; break; case RTM_DELETE: result = rtm_delete_route(rtm, nbytes); if (result == 0) { TRACE0(NETWORK, "route deleted successfully"); } else { TRACE0(NETWORK, "failed to delete route"); } rtm->rtm_errno = result; break; default: TRACE1(NETWORK, "Invalid rtm_type %d, dropping.", rtm->rtm_type); rtm->rtm_errno = -1; break; } bounce: /* * There is currently no analogue of the BSD SO_LOOPBACK option. * XXX: Normally processes will hear their own messages echoed across * the routing socket emulation pipe. Because the broadcast technique * uses blocking NT I/O, processes must read back their own message * after issuing it. */ broadcast_pipe_message(pp->rbuf, nbytes); drop: TRACE0(NETWORK, "Posting read again."); ResetEvent(pp->revent); QueueUserWorkItem( (LPTHREAD_START_ROUTINE)pipe_reread_cb, (PVOID)pp, WT_EXECUTEINIOTHREAD); out: TRACE0(ENTER, "Leaving pipe_read_cb"); LeaveCriticalSection(&pp->rcs); }
BOOL WINAPIV WSAPreApiNotify( IN INT NotificationCode, OUT LPVOID ReturnCode, IN LPSTR LibraryName, ...) /*++ Function Description: Builds a string for output and passes it, along with information about the call, to a handler function. Arguments: NotificationCode -- specifies which API function called us. ReturnCode -- a generic pointer to the return value of the API function. Can be used to change the return value in the case of a short-circuit (see how the return value from PreApiNotify works for more information on short-circuiting the API function). LibraryName -- a string pointing to the name of the library that called us. ... -- variable number argument list. These are pointers to the actual parameters of the API functions. Return Value: Returns TRUE if we want to short-circuit the API function; in other words, returning non-zero here forces the API function to return immediately before any other actions take place. Returns FALSE if we want to proceed with the API function. --*/ { va_list vl; // used for variable arg-list parsing Cstack_c *ThreadCstack; // the Cstack_c object for this thread int Index = 0; // index into string we are creating BOOL ReturnValue; // value to return LPFNDTHANDLER HdlFunc; // pointer to handler function int Counter; // counter popped off the cstack int OriginalError; // any pending error is saved if (OutputStyle==NO_OUTPUT) return FALSE; OriginalError = GetLastError(); EnterCriticalSection(&CrSec); // Wait until the debug window is ready to receive text for output. WaitForSingleObject(TextOutEvent, INFINITE); va_start(vl, LibraryName); // Get the Cstack_c object for this thread. ThreadCstack = (Cstack_c *)TlsGetValue(TlsIndex); if (!ThreadCstack){ ThreadCstack = new Cstack_c(); TlsSetValue(TlsIndex, (LPVOID)ThreadCstack); StringCchPrintf(Buffer, TEXT_LEN-1, "0x%X Foriegn thread\n", GetCurrentThreadId()); DTTextOut(LogFileHandle, Buffer, OutputStyle); } //if // Start building an output string with some info that's // independent of which API function called us. Index = StringCchPrintf(Buffer, TEXT_LEN-1, "Function call: %d ", ThreadCstack->CGetCounter()); // Push the counter & increment. ThreadCstack->CPush(); // Reset the error to what it was when the function started. SetLastError(OriginalError); // Call the appropriate handling function, output the buffer. if ((NotificationCode < MAX_DTCODE) && HdlFuncTable[NotificationCode]) { HdlFunc = HdlFuncTable[NotificationCode]; ReturnValue = (*HdlFunc)(vl, ReturnCode, LibraryName, Buffer, Index, TEXT_LEN, TRUE); } else { StringCchPrintf(Buffer + Index, TEXT_LEN - Index - 1, "Unknown function called!\r\n"); DTTextOut(LogFileHandle, Buffer, OutputStyle); ReturnValue = FALSE; } // If we are returning TRUE, then the API/SPI function will be // short-circuited. We must pop the thread stack, since no // corresponding WSAPostApiNotify will be called. if (ReturnValue) { ThreadCstack->CPop(Counter); } LeaveCriticalSection(&CrSec); // In case the error has changed since the handler returned, we // want to set it back. SetLastError(OriginalError); return(ReturnValue); } // WSAPreApiNotify()
CDragDrop::~CDragDrop() { DestroyDragImageBits(); DestroyDragImageWindow(); if (mb_DragDropRegistered && ghWnd) { mb_DragDropRegistered = FALSE; RevokeDragDrop(ghWnd); } EnterCriticalSection(&m_CrThreads); BOOL lbEmpty = m_OpThread.empty() && !InDragDrop(); LeaveCriticalSection(&m_CrThreads); if (!lbEmpty) { if (MessageBox(ghWnd, L"Not all shell operations was finished!\r\nDo You want to terminate them (it's may be harmful)?", gpConEmu->GetDefaultTitle(), MB_YESNO|MB_ICONEXCLAMATION) == IDYES) { // Terminate all shell (copying) threads EnterCriticalSection(&m_CrThreads); std::vector<ThInfo>::iterator iter = m_OpThread.begin(); while (iter != m_OpThread.end()) { HANDLE hThread = iter->hThread; TerminateThread(hThread, 100); CloseHandle(hThread); iter = m_OpThread.erase(iter); } LeaveCriticalSection(&m_CrThreads); } else { // Wait until finished BOOL lbActive = TRUE; while (lbActive) { Sleep(100); EnterCriticalSection(&m_CrThreads); lbActive = (!m_OpThread.empty()) || InDragDrop(); LeaveCriticalSection(&m_CrThreads); } } } else { // незаконченных нитей нет // -- LeaveCriticalSection(&m_CrThreads); -- 101229 секция уже закрыта } // Завершение всех нитей драга TerminateDrag(); //if (m_pfpi) free(m_pfpi); m_pfpi=NULL; //if (mp_DesktopID) { CoTaskMemFree(mp_DesktopID); mp_DesktopID = NULL; } DeleteCriticalSection(&m_CrThreads); }
// 初始化尾包时刻帧 void OnInitInstrTailTimeFrame(m_oTailTimeFrameStruct* pTailTimeFrame, m_oInstrumentCommInfoStruct* pCommInfo, m_oConstVarStruct* pConstVar) { ASSERT(pTailTimeFrame != NULL); ASSERT(pCommInfo != NULL); ASSERT(pConstVar != NULL); EnterCriticalSection(&pTailTimeFrame->m_oSecTailTimeFrame); if (pTailTimeFrame->m_pCommandStructSet != NULL) { delete pTailTimeFrame->m_pCommandStructSet; pTailTimeFrame->m_pCommandStructSet = NULL; } pTailTimeFrame->m_pCommandStructSet = new m_oInstrumentCommandStruct; // 源地址 pTailTimeFrame->m_pCommandStructSet->m_uiSrcIP = pCommInfo->m_pServerSetupData->m_oXMLIPSetupData.m_uiSrcIP; // 目的地址 pTailTimeFrame->m_pCommandStructSet->m_uiAimIP = pCommInfo->m_pServerSetupData->m_oXMLIPSetupData.m_uiAimIP; // 目标IP地址端口号 pTailTimeFrame->m_pCommandStructSet->m_usAimPort = pCommInfo->m_pServerSetupData->m_oXMLPortSetupData.m_usAimPort + pCommInfo->m_pServerSetupData->m_oXMLParameterSetupData.m_usNetRcvPortMove; // 尾包时刻发送缓冲区帧数设定为仪器个数 pTailTimeFrame->m_uiSndBufferSize = pConstVar->m_iInstrumentNum * pConstVar->m_iSndFrameSize; // 尾包时刻应答接收缓冲区帧数设定为仪器个数 pTailTimeFrame->m_uiRcvBufferSize = pConstVar->m_iInstrumentNum * pConstVar->m_iRcvFrameSize; // 尾包时刻查询返回端口 pTailTimeFrame->m_pCommandStructSet->m_usReturnPort = pCommInfo->m_pServerSetupData->m_oXMLPortSetupData.m_usTailTimeReturnPort; // 尾包时刻查询接收端口偏移量 pTailTimeFrame->m_usPortMove = pCommInfo->m_pServerSetupData->m_oXMLParameterSetupData.m_usNetRcvPortMove; // 重置帧内通讯信息 // 命令,为1则设置命令应答,为2查询命令应答,为3AD采样数据重发 pTailTimeFrame->m_pCommandStructSet->m_usCommand = pConstVar->m_usSendQueryCmd; // 重置帧内容解析变量 ResetInstrFramePacket(pTailTimeFrame->m_pCommandStructSet); // 清空发送帧缓冲区 if (pTailTimeFrame->m_cpSndFrameData != NULL) { delete[] pTailTimeFrame->m_cpSndFrameData; pTailTimeFrame->m_cpSndFrameData = NULL; } pTailTimeFrame->m_cpSndFrameData = new char[pConstVar->m_iSndFrameSize]; memset(pTailTimeFrame->m_cpSndFrameData, pConstVar->m_cSndFrameBufInit, pConstVar->m_iSndFrameSize); // 清空尾包时刻查询命令字集合 if (pTailTimeFrame->m_pbyCommandWord != NULL) { delete[] pTailTimeFrame->m_pbyCommandWord; pTailTimeFrame->m_pbyCommandWord = NULL; } pTailTimeFrame->m_pbyCommandWord = new BYTE[pConstVar->m_iCommandWordMaxNum]; memset(pTailTimeFrame->m_pbyCommandWord, pConstVar->m_cSndFrameBufInit, pConstVar->m_iCommandWordMaxNum); // 尾包时刻查询命令字个数 pTailTimeFrame->m_usCommandWordNum = 0; // 重置帧内容解析变量 if (pTailTimeFrame->m_pCommandStructReturn != NULL) { delete pTailTimeFrame->m_pCommandStructReturn; pTailTimeFrame->m_pCommandStructReturn = NULL; } pTailTimeFrame->m_pCommandStructReturn = new m_oInstrumentCommandStruct; ResetInstrFramePacket(pTailTimeFrame->m_pCommandStructReturn); // 清空接收帧缓冲区 if (pTailTimeFrame->m_cpRcvFrameData != NULL) { delete[] pTailTimeFrame->m_cpRcvFrameData; pTailTimeFrame->m_cpRcvFrameData = NULL; } pTailTimeFrame->m_cpRcvFrameData = new char[pConstVar->m_iRcvFrameSize]; memset(pTailTimeFrame->m_cpRcvFrameData, pConstVar->m_cSndFrameBufInit, pConstVar->m_iRcvFrameSize); LeaveCriticalSection(&pTailTimeFrame->m_oSecTailTimeFrame); }
void AddChildThread(HANDLE hThread) { EnterCriticalSection(&g_Lock); g_ChildThreadInfo[g_ChildThreadIndex++] = hThread; LeaveCriticalSection(&g_Lock); }
//----------------------------------------------------------------------------- // Name: // Desc: //----------------------------------------------------------------------------- HRESULT CDPlay8Client::JoinSession( DWORD num ) { HRESULT hr; IDirectPlay8Address* pHostAddress = NULL; IDirectPlay8Address* pDeviceAddress = NULL; if( m_pDPlay == NULL ) return E_FAIL; DXTRACE( TEXT("MazeClient: Trying to connect to server\n") ); DPN_APPLICATION_DESC dpnAppDesc; ZeroMemory( &dpnAppDesc, sizeof( DPN_APPLICATION_DESC ) ); dpnAppDesc.dwSize = sizeof( DPN_APPLICATION_DESC ); dpnAppDesc.guidApplication = StressMazeAppGUID; dpnAppDesc.guidInstance = m_Sessions[num].guidInstance; EnterCriticalSection( &m_csLock ); // Copy the host and device address pointers, and addref them. // If this is not done, then there is a rare chance that // EnumSessionCallback() may be called during the Connect() call // and destory the address before DirectPlay gets a chance to copy them. pHostAddress = m_pHostAddresses[num]; pHostAddress->AddRef(); pDeviceAddress = m_pDeviceAddresses[num]; pDeviceAddress->AddRef(); LeaveCriticalSection( &m_csLock ); // Connect to the remote host // The enumeration is automatically canceled after Connect is called if( FAILED( hr = m_pDPlay->Connect( &dpnAppDesc, // Application description pHostAddress, // Session host address pDeviceAddress, // Address of device used to connect to the host NULL, NULL, // Security descriptions & credientials (MBZ in DPlay8) NULL, 0, // User data & its size NULL, // Asynchronous connection context (returned with DPNMSG_CONNECT_COMPLETE in async handshaking) NULL, // Asynchronous connection handle (used to cancel connection process) DPNOP_SYNC ) ) ) // Connect synchronously { if( hr == DPNERR_NORESPONSE || hr == DPNERR_ABORTED ) goto LCleanup; // These are possible if the server exits while joining if( hr == DPNERR_INVALIDINSTANCE ) goto LCleanup; // This is possible if the original server exits and another server comes online while we are connecting DXTRACE_ERR_NOMSGBOX( TEXT("Connect"), hr ); goto LCleanup; } m_bSessionLost = FALSE; DXTRACE( TEXT("MazeClient: Connected to server. Enum automatically canceled\n") ); UpdateConnectionInfo(); m_fLastUpdateConnectInfoTime = DXUtil_Timer( TIMER_GETAPPTIME ); LCleanup: SAFE_RELEASE( pHostAddress ); SAFE_RELEASE( pDeviceAddress ); return hr; }
/*! \internal \note Obtains ::cs_kmq_global, kmq_queue::cs, ::cs_kmq_msg_ref, ::cs_kmq_msg, */ KHMEXP khm_int32 KHMAPI kmq_dispatch(kmq_timer timeout) { kmq_queue * q; kmq_message_ref * r; kmq_message *m; DWORD hr; q = kmqint_get_thread_queue(); assert(q->wait_o); hr = WaitForSingleObject(q->wait_o, timeout); if(hr == WAIT_OBJECT_0) { /* signalled */ kmqint_get_queue_message_ref(q, &r); m = r->msg; if(m->type != KMSG_SYSTEM || m->subtype != KMSG_SYSTEM_EXIT) { khm_boolean rv; if (m->err_ctx) kherr_push_context(m->err_ctx); /* TODO: before dispatching the message, the message being dispatched for this thread needs to be stored so that it can be looked up in kmq_is_call_aborted(). This needs to happen in kmq_wm_dispatch() and kmq_wm_begin() as well. */ /* dispatch */ rv = r->recipient(m->type, m->subtype, m->uparam, m->vparam); if (m->err_ctx) kherr_pop_context(); EnterCriticalSection(&cs_kmq_msg); EnterCriticalSection(&cs_kmq_msg_ref); kmqint_put_message_ref(r); LeaveCriticalSection(&cs_kmq_msg_ref); if(KHM_SUCCEEDED(rv)) m->nCompleted++; else m->nFailed++; if(m->nCompleted + m->nFailed == m->nSent) { kmqint_put_message(m); } LeaveCriticalSection(&cs_kmq_msg); return KHM_ERROR_SUCCESS; } else { EnterCriticalSection(&cs_kmq_msg); EnterCriticalSection(&cs_kmq_msg_ref); kmqint_put_message_ref(r); LeaveCriticalSection(&cs_kmq_msg_ref); m->nCompleted++; if(m->nCompleted + m->nFailed == m->nSent) { kmqint_put_message(m); } LeaveCriticalSection(&cs_kmq_msg); return KHM_ERROR_EXIT; } } else { return KHM_ERROR_TIMEOUT; } }