Esempio n. 1
0
void
test_or ()
{
  v = 0;
  count = 1;

  atomic_fetch_or (&v, count);
  if (v != 1)
    abort ();

  count *= 2;
  atomic_fetch_or_explicit (&v, count, memory_order_consume);
  if (v != 3)
    abort ();

  count *= 2;
  atomic_fetch_or (&v, 4);
  if (v != 7)
    abort ();

  count *= 2;
  atomic_fetch_or_explicit (&v, 8, memory_order_release);
  if (v != 15)
    abort ();

  count *= 2;
  atomic_fetch_or (&v, count);
  if (v != 31)
    abort ();

  count *= 2;
  atomic_fetch_or_explicit (&v, count, memory_order_seq_cst);
  if (v != 63)
    abort ();
}
Esempio n. 2
0
static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
    return EDEADLK;
  }

  while (true) {
    int result = __pthread_rwlock_tryrdlock(rwlock);
    if (result == 0 || result == EAGAIN) {
      return result;
    }
    result = check_timespec(abs_timeout_or_null);
    if (result != 0) {
      return result;
    }

    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
      continue;
    }

    rwlock->pending_lock.lock();
    rwlock->pending_reader_count++;

    // We rely on the fact that all atomic exchange operations on the same object (here it is
    // rwlock->state) always appear to occur in a single total order. If the pending flag is added
    // before unlocking, the unlocking thread will wakeup the waiter. Otherwise, we will see the
    // state is unlocked and will not wait anymore.
    old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_READERS_FLAG,
                                         memory_order_relaxed);

    int old_serial = rwlock->pending_reader_wakeup_serial;
    rwlock->pending_lock.unlock();

    int futex_result = 0;
    if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
      futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared,
                                  old_serial, true, abs_timeout_or_null);
    }

    rwlock->pending_lock.lock();
    rwlock->pending_reader_count--;
    if (rwlock->pending_reader_count == 0) {
      atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_READERS_FLAG,
                                memory_order_relaxed);
    }
    rwlock->pending_lock.unlock();

    if (futex_result == -ETIMEDOUT) {
      return ETIMEDOUT;
    }
  }
}
Esempio n. 3
0
void
test_fetch_or ()
{
  v = 0;
  count = 1;

  if (atomic_fetch_or_explicit (&v, count, memory_order_relaxed) != 0)
    abort ();

  count *= 2;
  if (atomic_fetch_or_explicit (&v, 2, memory_order_consume) != 1)
    abort ();

  count *= 2;
  if (atomic_fetch_or_explicit (&v, count, memory_order_acquire) != 3)
    abort ();

  count *= 2;
  if (atomic_fetch_or_explicit (&v, 8, memory_order_release) != 7)
    abort ();

  count *= 2;
  if (atomic_fetch_or_explicit (&v, count, memory_order_acq_rel) != 15)
    abort ();

  count *= 2;
  if (atomic_fetch_or_explicit (&v, count, memory_order_seq_cst) != 31)
    abort ();

  count *= 2;
  if (atomic_fetch_or (&v, count) != 63)
    abort ();
}
Esempio n. 4
0
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
    return EDEADLK;
  }
  while (true) {
    int result = __pthread_rwlock_trywrlock(rwlock);
    if (result == 0) {
      return result;
    }
    result = check_timespec(abs_timeout_or_null);
    if (result != 0) {
      return result;
    }

    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__can_acquire_write_lock(old_state)) {
      continue;
    }

    rwlock->pending_lock.lock();
    rwlock->pending_writer_count++;

    old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_WRITERS_FLAG,
                                         memory_order_relaxed);

    int old_serial = rwlock->pending_writer_wakeup_serial;
    rwlock->pending_lock.unlock();

    int futex_result = 0;
    if (!__can_acquire_write_lock(old_state)) {
      futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared,
                                  old_serial, true, abs_timeout_or_null);
    }

    rwlock->pending_lock.lock();
    rwlock->pending_writer_count--;
    if (rwlock->pending_writer_count == 0) {
      atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_WRITERS_FLAG,
                                memory_order_relaxed);
    }
    rwlock->pending_lock.unlock();

    if (futex_result == -ETIMEDOUT) {
      return ETIMEDOUT;
    }
  }
}
Esempio n. 5
0
void vlc_cancel (vlc_thread_t thread_id)
{
    atomic_int *addr;

    atomic_store(&thread_id->killed, true);

    vlc_mutex_lock(&thread_id->wait.lock);
    addr = thread_id->wait.addr;
    if (addr != NULL)
    {
        atomic_fetch_or_explicit(addr, 1, memory_order_relaxed);
        vlc_addr_broadcast(addr);
    }
    vlc_mutex_unlock(&thread_id->wait.lock);
}
Esempio n. 6
0
/* note that this function doesn't actually delete the dirent. It just sets a flag that
 * says "hey, this was deleted". See vfs_dirent_release for more details. An important
 * aspect is that on unlinking a directory, it does unlink the . and .. entries, even
 * though the directory won't actually be deleted until vfs_dirent_release gets called
 * and the last reference is released. */
static int do_fs_unlink(struct inode *node, const char *name, size_t namelen, int rec)
{
	if(!vfs_inode_check_permissions(node, MAY_WRITE, 0))
		return -EACCES;
	struct dirent *dir = fs_dirent_lookup(node, name, namelen);
	if(!dir)
		return -ENOENT;
	struct inode *target = fs_dirent_readinode(dir, true);
	if(!target || (rec && S_ISDIR(target->mode) && !fs_inode_dirempty(target))) {
		if(target)
			vfs_icache_put(target);
		vfs_dirent_release(dir);
		return -ENOTEMPTY;
	}
	atomic_fetch_or_explicit(&dir->flags, DIRENT_UNLINK, memory_order_release);
	if(S_ISDIR(target->mode) && rec) {
		do_fs_unlink(target, "..", 2, 0);
		do_fs_unlink(target, ".", 1, 0);
	}
	vfs_icache_put(target);
	vfs_dirent_release(dir);
	return 0;
}
Esempio n. 7
0
TEST(stdatomic, atomic_fetch_or) {
  atomic_int i = ATOMIC_VAR_INIT(0x100);
  ASSERT_EQ(0x100, atomic_fetch_or(&i, 0x020));
  ASSERT_EQ(0x120, atomic_fetch_or_explicit(&i, 0x003, memory_order_relaxed));
  ASSERT_EQ(0x123, atomic_load(&i));
}