bool SystemProperties::Wait(const prop_info* pi, uint32_t old_serial, uint32_t* new_serial_ptr,
                            const timespec* relative_timeout) {
  // Are we waiting on the global serial or a specific serial?
  atomic_uint_least32_t* serial_ptr;
  if (pi == nullptr) {
    if (!initialized_) {
      return -1;
    }

    prop_area* serial_pa = contexts_->GetSerialPropArea();
    if (serial_pa == nullptr) {
      return -1;
    }

    serial_ptr = serial_pa->serial();
  } else {
    serial_ptr = const_cast<atomic_uint_least32_t*>(&pi->serial);
  }

  uint32_t new_serial;
  do {
    int rc;
    if ((rc = __futex_wait(serial_ptr, old_serial, relative_timeout)) != 0 && rc == -ETIMEDOUT) {
      return false;
    }
    new_serial = load_const_atomic(serial_ptr, memory_order_acquire);
  } while (new_serial == old_serial);

  *new_serial_ptr = new_serial;
  return true;
}
int __system_property_wait(const prop_info *pi)
{
    if (pi == 0) {
        prop_area *pa = __system_property_area__;
        const uint32_t n = pa->serial;
        do {
            __futex_wait(&pa->serial, n, NULL);
        } while (n == pa->serial);
    } else {
        const uint32_t n = pi->serial;
        do {
            __futex_wait((volatile void *)&pi->serial, n, NULL);
        } while(n == pi->serial);
    }
    return 0;
}
int __system_property_read(const prop_info *pi, char *name, char *value)
{
    unsigned serial, len;

    if (__predict_false(compat_mode)) {
        return __system_property_read_compat(pi, name, value);
    }

    for(;;) {
        serial = pi->serial;
        while(SERIAL_DIRTY(serial)) {
            __futex_wait((volatile void *)&pi->serial, serial, NULL);
            serial = pi->serial;
        }
        len = SERIAL_VALUE_LEN(serial);
        memcpy(value, pi->value, len + 1);
        ANDROID_MEMBAR_FULL();
        if(serial == pi->serial) {
            if(name != 0) {
                strcpy(name, pi->name);
            }
            return len;
        }
    }
}
bool mb__system_property_wait(const prop_info* pi,
                              uint32_t old_serial,
                              uint32_t* new_serial_ptr,
                              const timespec* relative_timeout) {
  // Are we waiting on the global serial or a specific serial?
  atomic_uint_least32_t* serial_ptr;
  if (pi == nullptr) {
    if (mb__system_property_area__ == nullptr) return -1;
    serial_ptr = mb__system_property_area__->serial();
  } else {
    serial_ptr = const_cast<atomic_uint_least32_t*>(&pi->serial);
  }

  uint32_t new_serial;
  do {
    int rc;
    if ((rc = __futex_wait(serial_ptr, old_serial, relative_timeout)) != 0 && rc == -ETIMEDOUT) {
      return false;
    }
    new_serial = load_const_atomic(serial_ptr, memory_order_acquire);
  } while (new_serial == old_serial);

  *new_serial_ptr = new_serial;
  return true;
}
int __system_property_wait(const prop_info *pi)
{
    unsigned n;
    if(pi == 0) {
        prop_area *pa = __system_property_area__;
        n = pa->serial;
        do {
            __futex_wait(&pa->serial, n, 0);
        } while(n == pa->serial);
    } else {
        n = pi->serial;
        do {
            __futex_wait((volatile void *)&pi->serial, n, 0);
        } while(n == pi->serial);
    }
    return 0;
}
Esempio n. 6
0
int futex_wait(volatile void *ftx, int val, const struct timespec *timeout)
{
#ifdef ANDROID
    return __futex_wait(ftx, val, timeout);
#else
    nanosleep(timeout, NULL);
    return 0;
#endif
}
unsigned int __system_property_wait_any(unsigned int serial)
{
    prop_area *pa = __system_property_area__;

    do {
        __futex_wait(&pa->serial, serial, NULL);
    } while(pa->serial == serial);

    return pa->serial;
}
// Wait for non-locked serial, and retrieve it with acquire semantics.
unsigned int __system_property_serial(const prop_info *pi)
{
    uint32_t serial = load_const_atomic(&pi->serial, memory_order_acquire);
    while (SERIAL_DIRTY(serial)) {
        __futex_wait(const_cast<volatile void *>(
                        reinterpret_cast<const void *>(&pi->serial)),
                     serial, NULL);
        serial = load_const_atomic(&pi->serial, memory_order_acquire);
    }
    return serial;
}
unsigned int __system_property_wait_any(unsigned int serial)
{
    prop_area *pa = __system_property_area__;
    uint32_t my_serial;

    do {
        __futex_wait(&pa->serial, serial, NULL);
        my_serial = atomic_load_explicit(&pa->serial, memory_order_acquire);
    } while (my_serial == serial);

    return my_serial;
}
Esempio n. 10
0
/*
 * Poll an event to see if it has been signaled. Set timeout to -1 to block indefinatly.
 * If timeout is 0 this function does not block but returns immediately.
 */
BOOL event_poll( EVENT * event, DWORD timeout )
{
#ifdef _WIN32
	if( event == NULL )
		return FALSE;

	if( WaitForSingleObject( event->handle, timeout ) == WAIT_OBJECT_0 )
		return TRUE;

	return FALSE;
#else
	BOOL result = FALSE;

	// DWORD WINAPI WaitForSingleObject(
	// __in  HANDLE hHandle,
	// __in  DWORD dwMilliseconds
	// );
	// http://msdn.microsoft.com/en-us/library/ms687032(VS.85).aspx

	if( event == NULL )
		return FALSE;

	if(timeout) {
		struct timespec ts;

		// XXX, need to verify for -1. below modified from bionic/pthread.c
		// and maybe loop if needed ;\

		ts.tv_sec = timeout / 1000;
		ts.tv_nsec = (timeout%1000)*1000000;
		if (ts.tv_nsec >= 1000000000) {
			ts.tv_sec++;
			ts.tv_nsec -= 1000000000;
		}

		// atomically checks if event->handle is 0, if so,
		// it sleeps for timeout. if event->handle is 1, it
		// returns straight away.

		__futex_wait(&(event->handle), 0, &ts);
	}

	// We should behave like an auto-reset event
	result = event->handle ? TRUE : FALSE;
	if( result )
		event->handle = (HANDLE)0;

	return result;
#endif
}
Esempio n. 11
0
int pthread_join(pthread_t t, void** return_value) {
  if (t == pthread_self()) {
    return EDEADLK;
  }

  pid_t tid;
  volatile int* tid_ptr;
  {
    pthread_accessor thread(t);
    if (thread.get() == NULL) {
      return ESRCH;
    }

    if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
      return EINVAL;
    }

    if ((thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) != 0) {
      return EINVAL;
    }

    // Okay, looks like we can signal our intention to join.
    thread->attr.flags |= PTHREAD_ATTR_FLAG_JOINED;
    tid = thread->tid;
    tid_ptr = &thread->tid;
  }

  // We set the PTHREAD_ATTR_FLAG_JOINED flag with the lock held,
  // so no one is going to remove this thread except us.

  // Wait for the thread to actually exit, if it hasn't already.
  while (*tid_ptr != 0) {
    __futex_wait(tid_ptr, tid, NULL);
  }

  // Take the lock again so we can pull the thread's return value
  // and remove the thread from the list.
  pthread_accessor thread(t);

  if (return_value) {
    *return_value = thread->return_value;
  }

  _pthread_internal_remove_locked(thread.get());
  return 0;
}
int __system_property_read(const prop_info *pi, char *name, char *value)
{
    unsigned serial, len;
    
    for(;;) {
        serial = *(unsigned*)((char*)pi + prop_name_max);
        while(SERIAL_DIRTY(serial)) {
            __futex_wait((volatile void *)((char*)pi + prop_name_max), serial, 0);
            serial = *(unsigned*)((char*)pi + prop_name_max);
        }
        len = SERIAL_VALUE_LEN(serial);
        memcpy(value, (char*)pi + prop_name_max + sizeof(pi->serial), len + 1);
        if(serial == *(unsigned*)((char*)pi + prop_name_max)) {
            if(name != 0) {
                strcpy(name, pi->name);
            }
            return len;
        }
    }
}
extern "C" int __cxa_guard_acquire(_guard_t* gv)
{
    // 0 -> pending, return 1
    // pending -> waiting, wait and return 0
    // waiting: untouched, wait and return 0
    // ready: untouched, return 0

retry:
    if (__atomic_cmpxchg(0, pending, &gv->state) == 0) {
        ANDROID_MEMBAR_FULL();
        return 1;
    }
    __atomic_cmpxchg(pending, waiting, &gv->state); // Indicate there is a waiter
    __futex_wait(&gv->state, waiting, NULL);

    if (gv->state != ready) // __cxa_guard_abort was called, let every thread try since there is no return code for this condition
        goto retry;

    ANDROID_MEMBAR_FULL();
    return 0;
}
Esempio n. 14
0
int pthread_join(pthread_t t, void** return_value) {
  if (t == pthread_self()) {
    return EDEADLK;
  }

  pid_t tid;
  volatile int* tid_ptr;
  {
    pthread_accessor thread(t);
    if (thread.get() == NULL) {
      return ESRCH;
    }

    if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
      return EINVAL;
    }

    if ((thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) != 0) {
      return EINVAL;
    }

    // Okay, looks like we can signal our intention to join.
    thread->attr.flags |= PTHREAD_ATTR_FLAG_JOINED;
    tid = thread->tid;
    tid_ptr = &thread->tid;
  }

  // We set the PTHREAD_ATTR_FLAG_JOINED flag with the lock held,
  // so no one is going to remove this thread except us.

  // Wait for the thread to actually exit, if it hasn't already.
  while (*tid_ptr != 0) {
    // ARC MOD BEGIN
    // Use __nacl_irt_sched_yield instead of __futex_wait.
    // __nacl_irt_thread_exit does not give us a notice with
    // futex_wait, so we will yield and poll until thread completes.
    //
    // Note that nacl-glibc's has similar code in nptl/pthread_join.c
    // and sysdeps/nacl/lowlevellock.h.
#if defined(HAVE_ARC)
    __nacl_irt_sched_yield();
#else
    // ARC MOD END
    __futex_wait(tid_ptr, tid, NULL);
    // ARC MOD BEGIN
#endif
    // ARC MOD END
  }

  // Take the lock again so we can pull the thread's return value
  // and remove the thread from the list.
  pthread_accessor thread(t);

  if (return_value) {
    *return_value = thread->return_value;
  }
  // ARC MOD BEGIN
  // Unmap stack if PTHREAD_ATTR_FLAG_USER_STACK is not
  // set. Upstream bionic unmaps the stack in thread which are about
  // to exit, but we cannot do this on NaCl because the stack should
  // be available when we call __nacl_irt_thread_exit. Instead, we
  // unmap the stack from the thread which calls pthread_join.
#if defined(HAVE_ARC)
  if (!thread->user_allocated_stack() &&
      thread->attr.stack_base) {
    if (munmap(thread->attr.stack_base, thread->attr.stack_size) != 0) {
      static const int kStderrFd = 2;
      static const char kMsg[] = "failed to unmap the stack!\n";
      write(kStderrFd, kMsg, sizeof(kMsg) - 1);
      abort();
    }
    // Clear the pointer to unmapped stack so pthread_join from
    // other threads will not try to unmap this region again.
    thread->attr.stack_base = NULL;
    thread->attr.stack_size = 0;
    thread->tls = NULL;
  }
#endif  // HAVE_ARC
  // ARC MOD END

  _pthread_internal_remove_locked(thread.get());
  return 0;
}