Example #1
0
static void
gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
{
#if GVL_SIMPLE_LOCK
    native_mutex_lock(&vm->gvl.lock);
#else
    native_mutex_lock(&vm->gvl.lock);
    if (vm->gvl.waiting > 0 || vm->gvl.acquired != 0) {
	if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): sleep\n", th);
	gvl_waiting_push(vm, th);
        if (GVL_DEBUG) gvl_show_waiting_threads(vm);

	while (vm->gvl.acquired != 0 || vm->gvl.waiting_threads != th) {
	    native_cond_wait(&th->native_thread_data.gvl_cond, &vm->gvl.lock);
	}
	gvl_waiting_shift(vm, th);
    }
    else {
	/* do nothing */
    }
    vm->gvl.acquired = 1;
    native_mutex_unlock(&vm->gvl.lock);
#endif
    if (GVL_DEBUG) gvl_show_waiting_threads(vm);
    if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
}
Example #2
0
static void
gvl_yield(rb_vm_t *vm, rb_thread_t *th)
{
    native_mutex_lock(&vm->gvl.lock);

    gvl_release_common(vm);

    /* An another thread is processing GVL yield. */
    if (UNLIKELY(vm->gvl.wait_yield)) {
	while (vm->gvl.wait_yield)
	    native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
	goto acquire;
    }

    if (vm->gvl.waiting > 0) {
	/* Wait until another thread task take GVL. */
	vm->gvl.need_yield = 1;
	vm->gvl.wait_yield = 1;
	while (vm->gvl.need_yield)
	    native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
	vm->gvl.wait_yield = 0;
    }
    else {
	native_mutex_unlock(&vm->gvl.lock);
	sched_yield();
	native_mutex_lock(&vm->gvl.lock);
    }

    native_cond_broadcast(&vm->gvl.switch_wait_cond);
  acquire:
    gvl_acquire_common(vm);
    native_mutex_unlock(&vm->gvl.lock);
}
Example #3
0
static void
native_cond_wait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex)
{
    DWORD r;
    struct cond_event_entry entry;

    entry.next = 0;
    entry.event = CreateEvent(0, FALSE, FALSE, 0);

    /* cond is guarded by mutex */
    if (cond->next) {
	cond->last->next = &entry;
	cond->last = &entry;
    }
    else {
	cond->next = &entry;
	cond->last = &entry;
    }

    native_mutex_unlock(mutex);
    {
	r = WaitForSingleObject(entry.event, INFINITE);
	if (r != WAIT_OBJECT_0) {
	    rb_bug("native_cond_wait: WaitForSingleObject returns %lu", r);
	}
    }
    native_mutex_lock(mutex);

    w32_close_handle(entry.event);
}
int rw_pr_wrlock(rw_pr_lock_t *rwlock)
{
  native_mutex_lock(&rwlock->lock);

  if (rwlock->active_readers != 0)
  {
    /* There are active readers. We have to wait until they are gone. */
    rwlock->writers_waiting_readers++;

    while (rwlock->active_readers != 0)
      native_cond_wait(&rwlock->no_active_readers, &rwlock->lock);

    rwlock->writers_waiting_readers--;
  }

  /*
    We own 'lock' mutex so there is no active writers.
    Also there are no active readers.
    This means that we can grant wr-lock.
    Not releasing 'lock' mutex until unlock will block
    both requests for rd and wr-locks.
    Set 'active_writer' flag to simplify unlock.

    Thanks to the fact wr-lock/unlock in the absence of
    contention from readers is essentially mutex lock/unlock
    with a few simple checks make this rwlock implementation
    wr-lock optimized.
  */
  rwlock->active_writer= TRUE;
#ifdef SAFE_MUTEX
  rwlock->writer_thread= pthread_self();
#endif
  return 0;
}
Example #5
0
int
NdbInfo::openTable(const char* table_name,
                   const NdbInfo::Table** table_copy)
{
  native_mutex_lock(&m_mutex);

  if (!check_tables()){
    native_mutex_unlock(&m_mutex);
    return ERR_ClusterFailure;
  }

  Table* tab;
  if (!m_tables.search(table_name, &tab))
  {
    // No such table existed
    native_mutex_unlock(&m_mutex);
    return ERR_NoSuchTable;
  }

  // Return a _copy_ of the table
  *table_copy = new Table(*tab);

  native_mutex_unlock(&m_mutex);
  return 0;
}
Example #6
0
static int
use_cached_thread(rb_thread_t *th)
{
    int result = 0;
#if USE_THREAD_CACHE
    struct cached_thread_entry *entry;

    if (cached_thread_root) {
	native_mutex_lock(&thread_cache_lock);
	entry = cached_thread_root;
	{
	    if (cached_thread_root) {
		cached_thread_root = entry->next;
		*entry->th_area = th;
		result = 1;
	    }
	}
	if (result) {
	    native_cond_signal(entry->cond);
	}
	native_mutex_unlock(&thread_cache_lock);
    }
#endif
    return result;
}
Example #7
0
static int
native_thread_init_stack(rb_thread_t *th)
{
    rb_nativethread_id_t curr = pthread_self();

    if (pthread_equal(curr, native_main_thread.id)) {
	th->machine.stack_start = native_main_thread.stack_start;
	th->machine.stack_maxsize = native_main_thread.stack_maxsize;
    }
    else {
#ifdef STACKADDR_AVAILABLE
	void *start;
	size_t size;

	if (get_stack(&start, &size) == 0) {
	    th->machine.stack_start = start;
	    th->machine.stack_maxsize = size;
	}
#elif defined get_stack_of
	if (!th->machine.stack_maxsize) {
	    native_mutex_lock(&th->interrupt_lock);
	    native_mutex_unlock(&th->interrupt_lock);
	}
#else
	rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
#endif
    }
#ifdef __ia64
    th->machine.register_stack_start = native_main_thread.register_stack_start;
    th->machine.stack_maxsize /= 2;
    th->machine.register_stack_maxsize = th->machine.stack_maxsize;
#endif
    return 0;
}
Example #8
0
static int
__cond_timedwait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex, unsigned long msec)
{
    DWORD r;
    struct cond_event_entry entry;
    struct cond_event_entry *head = (struct cond_event_entry*)cond;

    entry.event = CreateEvent(0, FALSE, FALSE, 0);

    /* cond is guarded by mutex */
    entry.next = head;
    entry.prev = head->prev;
    head->prev->next = &entry;
    head->prev = &entry;

    native_mutex_unlock(mutex);
    {
	r = WaitForSingleObject(entry.event, msec);
	if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
	    rb_bug("native_cond_wait: WaitForSingleObject returns %lu", r);
	}
    }
    native_mutex_lock(mutex);

    entry.prev->next = entry.next;
    entry.next->prev = entry.prev;

    w32_close_handle(entry.event);
    return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
}
Example #9
0
static void
gvl_release(rb_vm_t *vm)
{
    native_mutex_lock(&vm->gvl.lock);
    gvl_release_common(vm);
    native_mutex_unlock(&vm->gvl.lock);
}
Example #10
0
static void
gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
{
    native_mutex_lock(&vm->gvl.lock);
    gvl_acquire_common(vm);
    native_mutex_unlock(&vm->gvl.lock);
}
Example #11
0
static const char *
rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
{
    const char *err = NULL;

    native_mutex_lock(&mutex->lock);

    if (mutex->th == 0) {
	err = "Attempt to unlock a mutex which is not locked";
    }
    else if (mutex->th != th) {
	err = "Attempt to unlock a mutex which is locked by another thread";
    }
    else {
	mutex->th = 0;
	if (mutex->cond_waiting > 0)
	    native_cond_signal(&mutex->cond);
    }

    native_mutex_unlock(&mutex->lock);

    if (!err) {
	rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
	while (*th_mutex != mutex) {
	    th_mutex = &(*th_mutex)->next_mutex;
	}
	*th_mutex = mutex->next_mutex;
	mutex->next_mutex = NULL;
    }

    return err;
}
Example #12
0
int
NdbInfo::openTable(Uint32 tableId,
                   const NdbInfo::Table** table_copy)
{
  native_mutex_lock(&m_mutex);

  if (!check_tables()){
    native_mutex_unlock(&m_mutex);
    return ERR_ClusterFailure;
  }

  // Find the table with correct id
  const Table* table = NULL;
  for (size_t i = 0; i < m_tables.entries(); i++)
  {
    const Table* tmp = m_tables.value(i);
    if (tmp->m_table_id == tableId)
    {
      table = tmp;
      break;
    }
  }
  if (table == NULL)
  {
    // No such table existed
    native_mutex_unlock(&m_mutex);
    return ERR_NoSuchTable;
  }

  // Return a _copy_ of the table
  *table_copy = new Table(*table);

  native_mutex_unlock(&m_mutex);
  return 0;
}
Example #13
0
static int
native_thread_create(rb_thread_t *th)
{
    int err = 0;

    if (use_cached_thread(th)) {
	thread_debug("create (use cached thread): %p\n", (void *)th);
    }
    else {
#ifdef HAVE_PTHREAD_ATTR_INIT
	pthread_attr_t attr;
	pthread_attr_t *const attrp = &attr;
#else
	pthread_attr_t *const attrp = NULL;
#endif
	const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
	const size_t space = space_size(stack_size);

        th->machine.stack_maxsize = stack_size - space;
#ifdef __ia64
        th->machine.stack_maxsize /= 2;
        th->machine.register_stack_maxsize = th->machine.stack_maxsize;
#endif

#ifdef HAVE_PTHREAD_ATTR_INIT
	CHECK_ERR(pthread_attr_init(&attr));

# ifdef PTHREAD_STACK_MIN
	thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
	CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
# endif

# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
	CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
# endif
	CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
#endif
#ifdef get_stack_of
	native_mutex_lock(&th->interrupt_lock);
#endif
	err = pthread_create(&th->thread_id, attrp, thread_start_func_1, th);
#ifdef get_stack_of
	if (!err) {
	    get_stack_of(th->thread_id,
			 &th->machine.stack_start,
			 &th->machine.stack_maxsize);
	}
	native_mutex_unlock(&th->interrupt_lock);
#endif
	thread_debug("create: %p (%d)\n", (void *)th, err);
	/* should be done in the created thread */
	fill_thread_id_str(th);
#ifdef HAVE_PTHREAD_ATTR_INIT
	CHECK_ERR(pthread_attr_destroy(&attr));
#endif
    }
    return err;
}
Example #14
0
static void
lock_interrupt(void *ptr)
{
    rb_mutex_t *mutex = (rb_mutex_t *)ptr;
    native_mutex_lock(&mutex->lock);
    if (mutex->cond_waiting > 0)
	native_cond_broadcast(&mutex->cond);
    native_mutex_unlock(&mutex->lock);
}
Example #15
0
void rho_ruby_stop_threadidle()
{
    if ( g_th_stored )
    {
        native_mutex_lock(&g_th_stored->vm->global_vm_lock);
        rb_thread_set_current(g_th_stored);
        g_th_stored = 0;
    }
}
Example #16
0
static void
native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
{
    struct timespec timeout;
    rb_nativethread_lock_t *lock = &th->interrupt_lock;
    rb_nativethread_cond_t *cond = &th->native_thread_data.sleep_cond;

    if (timeout_tv) {
	struct timespec timeout_rel;

	timeout_rel.tv_sec = timeout_tv->tv_sec;
	timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;

	/* Solaris cond_timedwait() return EINVAL if an argument is greater than
	 * current_time + 100,000,000.  So cut up to 100,000,000.  This is
	 * considered as a kind of spurious wakeup.  The caller to native_sleep
	 * should care about spurious wakeup.
	 *
	 * See also [Bug #1341] [ruby-core:29702]
	 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
	 */
	if (timeout_rel.tv_sec > 100000000) {
	    timeout_rel.tv_sec = 100000000;
	    timeout_rel.tv_nsec = 0;
	}

	timeout = native_cond_timeout(cond, timeout_rel);
    }

    GVL_UNLOCK_BEGIN();
    {
	native_mutex_lock(lock);
	th->unblock.func = ubf_pthread_cond_signal;
	th->unblock.arg = th;

	if (RUBY_VM_INTERRUPTED(th)) {
	    /* interrupted.  return immediate */
	    thread_debug("native_sleep: interrupted before sleep\n");
	}
	else {
	    if (!timeout_tv)
		native_cond_wait(cond, lock);
	    else
		native_cond_timedwait(cond, lock, &timeout);
	}
	th->unblock.func = 0;
	th->unblock.arg = 0;

	native_mutex_unlock(lock);
    }
    GVL_UNLOCK_END();

    thread_debug("native_sleep done\n");
}
int rw_pr_rdlock(rw_pr_lock_t *rwlock)
{
  native_mutex_lock(&rwlock->lock);
  /*
    The fact that we were able to acquire 'lock' mutex means
    that there are no active writers and we can acquire rd-lock.
    Increment active readers counter to prevent requests for
    wr-lock from succeeding and unlock mutex.
  */
  rwlock->active_readers++;
  native_mutex_unlock(&rwlock->lock);
  return 0;
}
Example #18
0
static void
native_sleep(rb_thread_t *th, struct timeval *tv)
{
    DWORD msec;

    if (tv) {
	msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
    }
    else {
	msec = INFINITE;
    }

    GVL_UNLOCK_BEGIN();
    {
	DWORD ret;

	native_mutex_lock(&th->interrupt_lock);
	th->unblock.func = ubf_handle;
	th->unblock.arg = th;
	native_mutex_unlock(&th->interrupt_lock);

	if (RUBY_VM_INTERRUPTED(th)) {
	    /* interrupted.  return immediate */
	}
	else {
	    thread_debug("native_sleep start (%lu)\n", msec);
	    ret = w32_wait_events(0, 0, msec, th);
	    thread_debug("native_sleep done (%lu)\n", ret);
	}

	native_mutex_lock(&th->interrupt_lock);
	th->unblock.func = 0;
	th->unblock.arg = 0;
	native_mutex_unlock(&th->interrupt_lock);
    }
    GVL_UNLOCK_END();
}
Example #19
0
static rb_thread_t *
register_cached_thread_and_wait(void)
{
    rb_nativethread_cond_t cond = RB_NATIVETHREAD_COND_INIT;
    volatile rb_thread_t *th_area = 0;
    struct timeval tv;
    struct timespec ts;
    struct cached_thread_entry *entry =
      (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));

    if (entry == 0) {
	return 0; /* failed -> terminate thread immediately */
    }

    gettimeofday(&tv, 0);
    ts.tv_sec = tv.tv_sec + 60;
    ts.tv_nsec = tv.tv_usec * 1000;

    native_mutex_lock(&thread_cache_lock);
    {
	entry->th_area = &th_area;
	entry->cond = &cond;
	entry->next = cached_thread_root;
	cached_thread_root = entry;

	native_cond_timedwait(&cond, &thread_cache_lock, &ts);

	{
	    struct cached_thread_entry *e, **prev = &cached_thread_root;

	    while ((e = *prev) != 0) {
		if (e == entry) {
		    *prev = e->next;
		    break;
		}
		prev = &e->next;
	    }
	}

	free(entry); /* ok */
	native_cond_destroy(&cond);
    }
    native_mutex_unlock(&thread_cache_lock);

    return (rb_thread_t *)th_area;
}
int rw_pr_unlock(rw_pr_lock_t *rwlock)
{
  if (rwlock->active_writer)
  {
    /* We are unlocking wr-lock. */
#ifdef SAFE_MUTEX
    rwlock->writer_thread= 0;
#endif
    rwlock->active_writer= FALSE;
    if (rwlock->writers_waiting_readers)
    {
      /*
        Avoid expensive cond signal in case when there is no contention
        or it is wr-only.

        Note that from view point of performance it would be better to
        signal on the condition variable after unlocking mutex (as it
        reduces number of contex switches).

        Unfortunately this would mean that such rwlock can't be safely
        used by MDL subsystem, which relies on the fact that it is OK
        to destroy rwlock once it is in unlocked state.
      */
      native_cond_signal(&rwlock->no_active_readers);
    }
    native_mutex_unlock(&rwlock->lock);
  }
  else
  {
    /* We are unlocking rd-lock. */
    native_mutex_lock(&rwlock->lock);
    rwlock->active_readers--;
    if (rwlock->active_readers == 0 &&
        rwlock->writers_waiting_readers)
    {
      /*
        If we are last reader and there are waiting
        writers wake them up.
      */
      native_cond_signal(&rwlock->no_active_readers);
    }
    native_mutex_unlock(&rwlock->lock);
  }
  return 0;
}
Example #21
0
/*
 * call-seq:
 *    mutex.try_lock  -> true or false
 *
 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
 * lock was granted.
 */
VALUE
rb_mutex_trylock(VALUE self)
{
    rb_mutex_t *mutex;
    VALUE locked = Qfalse;
    GetMutexPtr(self, mutex);

    native_mutex_lock(&mutex->lock);
    if (mutex->th == 0) {
	rb_thread_t *th = GET_THREAD();
	mutex->th = th;
	locked = Qtrue;

	mutex_locked(th, self);
    }
    native_mutex_unlock(&mutex->lock);

    return locked;
}
Example #22
0
static int
w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
{
    HANDLE *targets = events;
    HANDLE intr;
    DWORD ret;

    thread_debug("  w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
		 events, count, timeout, th);
    if (th && (intr = th->native_thread_data.interrupt_event)) {
	native_mutex_lock(&th->vm->global_vm_lock);
	if (intr == th->native_thread_data.interrupt_event) {
	    w32_reset_event(intr);
	    if (RUBY_VM_INTERRUPTED(th)) {
		w32_set_event(intr);
	    }

	    targets = ALLOCA_N(HANDLE, count + 1);
	    memcpy(targets, events, sizeof(HANDLE) * count);

	    targets[count++] = intr;
	    thread_debug("  * handle: %p (count: %d, intr)\n", intr, count);
	}
	native_mutex_unlock(&th->vm->global_vm_lock);
    }

    thread_debug("  WaitForMultipleObjects start (count: %d)\n", count);
    ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
    thread_debug("  WaitForMultipleObjects end (ret: %lu)\n", ret);

    if (ret == WAIT_OBJECT_0 + count - 1 && th) {
	errno = EINTR;
    }
    if (ret == -1 && THREAD_DEBUG) {
	int i;
	DWORD dmy;
	for (i = 0; i < count; i++) {
	    thread_debug("  * error handle %d - %s\n", i,
			 GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
	}
    }
    return ret;
}
Example #23
0
static void
gvl_release(rb_vm_t *vm)
{
#if GVL_SIMPLE_LOCK
    native_mutex_unlock(&vm->gvl.lock);
#else
    native_mutex_lock(&vm->gvl.lock);
    if (vm->gvl.waiting > 0) {
	rb_thread_t *th = vm->gvl.waiting_threads;
	if (GVL_DEBUG) fprintf(stderr, "gvl release (%p): wakeup: %p\n", GET_THREAD(), th);
	native_cond_signal(&th->native_thread_data.gvl_cond);
    }
    else {
	if (GVL_DEBUG) fprintf(stderr, "gvl release (%p): wakeup: %p\n", GET_THREAD(), NULL);
	/* do nothing */
    }
    vm->gvl.acquired = 0;
    native_mutex_unlock(&vm->gvl.lock);
#endif
}
Example #24
0
static void
native_cond_wait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex)
{
    DWORD r;
    struct cond_event_entry entry;
//RHO	
    //WinMo BUG: in release mode CreateEventW without name inside non-main thread does not work
    static int nCounter = 0;
    wchar_t buf[20];
    wsprintfW(buf, L"REvent%d", nCounter);
    nCounter = nCounter + 1;

    entry.next = 0;
    entry.event = CreateEventW(0, FALSE, FALSE, buf);
//RHO

    /* cond is guarded by mutex */
    if (cond->next) {
	cond->last->next = &entry;
	cond->last = &entry;
    }
    else {
	cond->next = &entry;
	cond->last = &entry;
    }

    native_mutex_unlock(mutex);
    {
	r = WaitForSingleObject(entry.event, INFINITE);
	if (r != WAIT_OBJECT_0) {
	    rb_bug("native_cond_wait: WaitForSingleObject returns %lu", r);
	}
    }
    native_mutex_lock(mutex);

    w32_close_handle(entry.event);
}
Example #25
0
/*
 * call-seq:
 *    mutex.lock  -> self
 *
 * Attempts to grab the lock and waits if it isn't available.
 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
 */
VALUE
rb_mutex_lock(VALUE self)
{
    rb_thread_t *th = GET_THREAD();
    rb_mutex_t *mutex;
    GetMutexPtr(self, mutex);

    /* When running trap handler */
    if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
	rb_raise(rb_eThreadError, "can't be called from trap context");
    }

    if (rb_mutex_trylock(self) == Qfalse) {
	if (mutex->th == th) {
	    rb_raise(rb_eThreadError, "deadlock; recursive locking");
	}

	while (mutex->th != th) {
	    int interrupted;
	    enum rb_thread_status prev_status = th->status;
	    volatile int timeout_ms = 0;
	    struct rb_unblock_callback oldubf;

	    set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
	    th->status = THREAD_STOPPED_FOREVER;
	    th->locking_mutex = self;

	    native_mutex_lock(&mutex->lock);
	    th->vm->sleeper++;
	    /*
	     * Carefully! while some contended threads are in lock_func(),
	     * vm->sleepr is unstable value. we have to avoid both deadlock
	     * and busy loop.
	     */
	    if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
		!patrol_thread) {
		timeout_ms = 100;
		patrol_thread = th;
	    }

	    GVL_UNLOCK_BEGIN();
	    interrupted = lock_func(th, mutex, (int)timeout_ms);
	    native_mutex_unlock(&mutex->lock);
	    GVL_UNLOCK_END();

	    if (patrol_thread == th)
		patrol_thread = NULL;

	    reset_unblock_function(th, &oldubf);

	    th->locking_mutex = Qfalse;
	    if (mutex->th && interrupted == 2) {
		rb_check_deadlock(th->vm);
	    }
	    if (th->status == THREAD_STOPPED_FOREVER) {
		th->status = prev_status;
	    }
	    th->vm->sleeper--;

	    if (mutex->th == th) mutex_locked(th, self);

	    if (interrupted) {
		RUBY_VM_CHECK_INTS_BLOCKING(th);
	    }
	}
    }
    return self;
}