static void gvl_destroy(rb_vm_t *vm) { native_cond_destroy(&vm->gvl.switch_wait_cond); native_cond_destroy(&vm->gvl.switch_cond); native_cond_destroy(&vm->gvl.cond); native_mutex_destroy(&vm->gvl.lock); }
static rb_thread_t * register_cached_thread_and_wait(void) { rb_thread_cond_t cond = { PTHREAD_COND_INITIALIZER, }; volatile rb_thread_t *th_area = 0; struct timeval tv; struct timespec ts; struct cached_thread_entry *entry = (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry)); if (entry == 0) { return 0; /* failed -> terminate thread immediately */ } gettimeofday(&tv, 0); ts.tv_sec = tv.tv_sec + 60; ts.tv_nsec = tv.tv_usec * 1000; pthread_mutex_lock(&thread_cache_lock); { entry->th_area = &th_area; entry->cond = &cond; entry->next = cached_thread_root; cached_thread_root = entry; native_cond_timedwait(&cond, &thread_cache_lock, &ts); { struct cached_thread_entry *e = cached_thread_root; struct cached_thread_entry *prev = cached_thread_root; while (e) { if (e == entry) { if (prev == cached_thread_root) { cached_thread_root = e->next; } else { prev->next = e->next; } break; } prev = e; e = e->next; } } free(entry); /* ok */ native_cond_destroy(&cond); } pthread_mutex_unlock(&thread_cache_lock); return (rb_thread_t *)th_area; }
int NdbCondition_Destroy(struct NdbCondition* p_cond) { int result; if (p_cond == NULL) return 1; result = native_cond_destroy(&p_cond->cond); memset(p_cond, 0xff, sizeof(struct NdbCondition)); free(p_cond); return 0; }
static void mutex_free(void *ptr) { if (ptr) { rb_mutex_t *mutex = ptr; if (mutex->th) { /* rb_warn("free locked mutex"); */ const char *err = rb_mutex_unlock_th(mutex, mutex->th); if (err) rb_bug("%s", err); } native_mutex_destroy(&mutex->lock); native_cond_destroy(&mutex->cond); } ruby_xfree(ptr); }
static void native_thread_destroy(rb_thread_t *th) { native_cond_destroy(&th->native_thread_data.sleep_cond); }
int rw_pr_destroy(rw_pr_lock_t *rwlock) { native_cond_destroy(&rwlock->no_active_readers); native_mutex_destroy(&rwlock->lock); return 0; }