static int w32_mutex_lock(HANDLE lock) { DWORD result; while (1) { thread_debug("native_mutex_lock: %p\n", lock); result = w32_wait_events(&lock, 1, INFINITE, 0); switch (result) { case WAIT_OBJECT_0: /* get mutex object */ thread_debug("acquire mutex: %p\n", lock); return 0; case WAIT_OBJECT_0 + 1: /* interrupt */ errno = EINTR; thread_debug("acquire mutex interrupted: %p\n", lock); return 0; case WAIT_TIMEOUT: thread_debug("timeout mutex: %p\n", lock); break; case WAIT_ABANDONED: rb_bug("win32_mutex_lock: WAIT_ABANDONED"); break; default: rb_bug("win32_mutex_lock: unknown result (%ld)", result); break; } } return 0; }
static int native_mutex_lock(rb_thread_lock_t *lock) { #if USE_WIN32_MUTEX DWORD result; while (1) { thread_debug("native_mutex_lock: %p\n", *lock); result = w32_wait_events(&*lock, 1, INFINITE, 0); switch (result) { case WAIT_OBJECT_0: /* get mutex object */ thread_debug("acquire mutex: %p\n", *lock); return 0; case WAIT_OBJECT_0 + 1: /* interrupt */ errno = EINTR; thread_debug("acquire mutex interrupted: %p\n", *lock); return 0; case WAIT_TIMEOUT: thread_debug("timeout mutex: %p\n", *lock); break; case WAIT_ABANDONED: rb_bug("win32_mutex_lock: WAIT_ABANDONED"); break; default: rb_bug("win32_mutex_lock: unknown result (%d)", result); break; } } return 0; #else EnterCriticalSection(lock); return 0; #endif }
static int native_mutex_trylock(rb_thread_lock_t *lock) { #if USE_WIN32_MUTEX int result; thread_debug("native_mutex_trylock: %p\n", *lock); result = w32_wait_events(&*lock, 1, 1, 0); thread_debug("native_mutex_trylock result: %d\n", result); switch (result) { case WAIT_OBJECT_0: return 0; case WAIT_TIMEOUT: return EBUSY; } return EINVAL; #else return TryEnterCriticalSection(lock) == 0; #endif }
static void native_sleep(rb_thread_t *th, struct timeval *tv) { DWORD msec; if (tv) { msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; } else { msec = INFINITE; } GVL_UNLOCK_BEGIN(); { DWORD ret; native_mutex_lock(&th->interrupt_lock); th->unblock.func = ubf_handle; th->unblock.arg = th; native_mutex_unlock(&th->interrupt_lock); if (RUBY_VM_INTERRUPTED(th)) { /* interrupted. return immediate */ } else { thread_debug("native_sleep start (%lu)\n", msec); ret = w32_wait_events(0, 0, msec, th); thread_debug("native_sleep done (%lu)\n", ret); } native_mutex_lock(&th->interrupt_lock); th->unblock.func = 0; th->unblock.arg = 0; native_mutex_unlock(&th->interrupt_lock); } GVL_UNLOCK_END(); }
static void native_sleep(rb_thread_t *th, struct timeval *tv) { DWORD msec; if (tv) { msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; } else { msec = INFINITE; } GVL_UNLOCK_BEGIN(); { DWORD ret; int status = th->status; th->status = THREAD_STOPPED; th->unblock_function = ubf_handle; th->unblock_function_arg = th; if (RUBY_VM_INTERRUPTED(th)) { /* interrupted. return immediate */ } else { thread_debug("native_sleep start (%d)\n", (int)msec); ret = w32_wait_events(0, 0, msec, th); thread_debug("native_sleep done (%d)\n", ret); } th->unblock_function = 0; th->unblock_function_arg = 0; th->status = status; } GVL_UNLOCK_END(); RUBY_VM_CHECK_INTS(); }
static void native_thread_join(HANDLE th) { w32_wait_events(&th, 1, 0, 0); }
int rb_w32_sleep(unsigned long msec) { return w32_wait_events(0, 0, msec, GET_THREAD()); }
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout) { return w32_wait_events(events, num, timeout, GET_THREAD()); }
/* @internal */ int rb_w32_check_interrupt(rb_thread_t *th) { return w32_wait_events(0, 0, 0, th); }
int rb_w32_sleep(unsigned long msec) { return w32_wait_events(0, 0, msec, ruby_thread_from_native()); }
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout) { return w32_wait_events(events, num, timeout, ruby_thread_from_native()); }