/* Check that we can dynamically delete code. */
void test_deleting_code(void) {
  uint8_t *load_area = (uint8_t *) allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int rc;
  int (*func)(void);

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_OLD);

  rc = dyncode_delete_with_retry(load_area, sizeof(buf));
  assert(rc == 0);
  assert(load_area[0] != buf[0]);

  /* Attempting to unload the code again should fail. */
  rc = nacl_dyncode_delete(load_area, sizeof(buf));
  assert(rc == -1);
  assert(errno == EFAULT);

  /*
   * We should be able to load new code at the same address.  This
   * assumes that no other threads are running, otherwise this request
   * can be rejected.
   *
   * This fails under ARM QEMU.  QEMU will flush its instruction
   * translation cache based on writes to the same virtual address,
   * but it ignores our explicit cache flush system calls.  Valgrind
   * has a similar problem, except that there is no cache flush system
   * call on x86.
   */
  if (getenv("UNDER_QEMU_ARM") != NULL ||
      getenv("RUNNING_ON_VALGRIND") != NULL) {
    printf("Skipping loading new code under emulator\n");
  } else {
    printf("Testing loading new code...\n");
    copy_and_pad_fragment(buf, sizeof(buf), &template_func_replacement,
                          &template_func_replacement_end);
    rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
    assert(rc == 0);
    func = (int (*)(void)) (uintptr_t) load_area;
    rc = func();
    assert(rc == MARKER_NEW);

    rc = nacl_dyncode_delete(load_area, sizeof(buf));
    assert(rc == 0);
    assert(load_area[0] != buf[0]);
  }
}
void test_external_jump_target_replacement(void) {
  uint8_t *load_area = allocate_code_space(1);
  /* BUF_SIZE * 2 because this function necessarily has an extra bundle. */
  uint8_t buf[BUF_SIZE * 2];
  int rc;
  int (*func)(void);
  const int kNaClBundleSize = NACL_BUNDLE_SIZE;

  copy_and_pad_fragment(buf, sizeof(buf),
                        &template_func_external_jump_target,
                        &template_func_external_jump_target_end);

  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_OLD);

  copy_and_pad_fragment(buf, sizeof(buf),
                        &template_func_external_jump_target_replace,
                        &template_func_external_jump_target_replace_end);
  /* Only copy one bundle so we can test an unaligned external jump target */
  rc = nacl_dyncode_modify(load_area, buf, kNaClBundleSize);
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_NEW);
}
/* Check code replacement constraints */
void test_illegal_code_replacment(void) {
  uint8_t *load_area = allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int rc;
  int i;
  int (*func)(void);

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_OLD);

  for (i = 0;
       i < (sizeof(illegal_code_sections) / sizeof(struct code_section));
       i++) {
    printf("\t%s\n", illegal_code_sections[i].name);

    /* write illegal replacement to the same location */
    copy_and_pad_fragment(buf, sizeof(buf), illegal_code_sections[i].start,
                                            illegal_code_sections[i].end);
    rc = nacl_dyncode_modify(load_area, buf, sizeof(buf));
    assert(rc != 0);
    func = (int (*)(void)) (uintptr_t) load_area;
    rc = func();
    assert(rc == MARKER_OLD);
  }
}
/* Check that we can rewrite instruction that crosses align boundaries. */
void test_replacing_code_slowpaths(void) {
  uint8_t *load_area = allocate_code_space(1);
  uint8_t buf[NACL_BUNDLE_SIZE];
  size_t size;
  int rc;
  /* Offsets to copy an instruction to. */
  int off1, off2, off3;

  fill_nops(buf, sizeof(buf));
  size = (size_t) (&template_instr_end - &template_instr);
  assert(size <= 5);
  off1 = 4 - size + 1; /* Cross 4 byte boundary */
  off2 = 24 - size + 1; /* Cross 8 byte boundary */
  off3 = 16 - size + 1; /* Cross 16 byte boundary */
  memcpy(buf + off1, &template_instr, size);
  memcpy(buf + off2, &template_instr, size);
  memcpy(buf + off3, &template_instr, size);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);

  memcpy(buf + off1, &template_instr_replace, size);
  rc = nacl_dyncode_modify(load_area + off1, buf + off1, size);
  assert(rc == 0);
  assert(memcmp(buf + off1, load_area + off1, size) == 0);

  memcpy(buf + off2, &template_instr_replace, size);
  rc = nacl_dyncode_modify(load_area + off2, buf + off2, size);
  assert(rc == 0);
  assert(memcmp(buf + off2, load_area + off2, size) == 0);

  memcpy(buf + off3, &template_instr_replace, size);
  rc = nacl_dyncode_modify(load_area + off3, buf + off3, size);
  assert(rc == 0);
  assert(memcmp(buf + off3, load_area + off3, size) == 0);
}
/* Check that we can dynamically rewrite code. */
void test_replacing_code_unaligned(void) {
  uint8_t *load_area = allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int first_diff = 0;
  int rc;
  int (*func)(void);

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_OLD);

  /* write replacement to the same location, unaligned */
  copy_and_pad_fragment(buf, sizeof(buf), &template_func_replacement,
                                          &template_func_replacement_end);
  /* we find first byte where old and new code differs */
  while (buf[first_diff] == load_area[first_diff] && first_diff < sizeof buf) {
    first_diff++;
  }
  /* and check, that there is some data in common, and some different */
  assert(first_diff > 0 && first_diff < sizeof(buf));
  rc = nacl_dyncode_modify(load_area+first_diff, buf+first_diff,
                           sizeof(buf)-first_diff);
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_NEW);
}
int main(void) {
  void *dest = (void *) DYNAMIC_CODE_SEGMENT_START;
  char buf[1];
  int rc = nacl_dyncode_create(dest, buf, 0);
  assert(rc == -1);
  assert(errno == EINVAL);
  return 0;
}
int nacl_load_code(void *dest, void *src, int size) {
  int rc = nacl_dyncode_create(dest, src, size);
  /*
   * Undo the syscall wrapper's errno handling, because it's more
   * convenient to test a single return value.
   */
  return rc == 0 ? 0 : -errno;
}
void test_jump_into_super_inst_create(void) {
  uint8_t *load_area = allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int rc;

  /* A direct jump into a bundle is invalid. */
  copy_and_pad_fragment(buf, sizeof(buf), &jump_into_super_inst_modified,
                        &jump_into_super_inst_modified_end);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc != 0);
  assert(errno == EINVAL);
}
int test_simle_replacement(const char *fragment1, const char *fragment1_end,
                           const char *fragment2, const char *fragment2_end) {
  uint8_t *load_area = allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int rc;

  /* The original version is fine. */
  copy_and_pad_fragment(buf, sizeof(buf), fragment1, fragment1_end);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);

  copy_and_pad_fragment(buf, sizeof(buf), fragment2, fragment2_end);
  rc = nacl_dyncode_modify(load_area, buf, sizeof(buf));
  return rc;
}
/* Check that we can dynamically rewrite code. */
void test_replacing_code(void) {
  uint8_t *load_area = allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int rc;
  int (*func)(void);

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
  rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_OLD);

  /* write replacement to the same location */
  copy_and_pad_fragment(buf, sizeof(buf), &template_func_replacement,
                                          &template_func_replacement_end);
  rc = nacl_dyncode_modify(load_area, buf, sizeof(buf));
  assert(rc == 0);
  func = (int (*)(void)) (uintptr_t) load_area;
  rc = func();
  assert(rc == MARKER_NEW);
}
void test_deleting_code_from_invalid_ranges(void) {
  uint8_t *load_addr = (uint8_t *) allocate_code_space(1) + 32;
  uint8_t buf[64];
  int rc;

  /* We specifically want to test using multiple instruction bundles. */
  assert(sizeof(buf) / NACL_BUNDLE_SIZE >= 2);
  assert(sizeof(buf) % NACL_BUNDLE_SIZE == 0);

  rc = dyncode_delete_with_retry(load_addr, sizeof(buf));
  assert(rc == -1);
  assert(errno == EFAULT);

  fill_hlts(buf, sizeof(buf));
  rc = nacl_dyncode_create(load_addr, buf, sizeof(buf));
  assert(rc == 0);

  /* Overlapping before. */
  rc = nacl_dyncode_delete(load_addr - NACL_BUNDLE_SIZE,
                           sizeof(buf) + NACL_BUNDLE_SIZE);
  assert(rc == -1);
  assert(errno == EFAULT);
  /* Overlapping after. */
  rc = nacl_dyncode_delete(load_addr, sizeof(buf) + NACL_BUNDLE_SIZE);
  assert(rc == -1);
  assert(errno == EFAULT);
  /* Missing the end of the loaded chunk. */
  rc = nacl_dyncode_delete(load_addr, sizeof(buf) - NACL_BUNDLE_SIZE);
  assert(rc == -1);
  assert(errno == EFAULT);
  /* Missing the start of the loaded chunk. */
  rc = nacl_dyncode_delete(load_addr + NACL_BUNDLE_SIZE,
                           sizeof(buf) - NACL_BUNDLE_SIZE);
  assert(rc == -1);
  assert(errno == EFAULT);
  /* The correct range should work, though. */
  rc = nacl_dyncode_delete(load_addr, sizeof(buf));
  assert(rc == 0);
}
Beispiel #12
0
/**
 * mono_code_manager_commit:
 * @cman: a code manager
 * @data: the pointer returned by mono_code_manager_reserve ()
 * @size: the size requested in the call to mono_code_manager_reserve ()
 * @newsize: the new size to reserve
 *
 * If we reserved too much room for a method and we didn't allocate
 * already from the code manager, we can get back the excess allocation
 * for later use in the code manager.
 */
void
mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
{
#if !defined(__native_client__) || !defined(__native_client_codegen__)
	g_assert (newsize <= size);

	if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
		cman->current->pos -= size - newsize;
	}
#else
	unsigned char *code;
	int status;
	g_assert (NACL_BUNDLE_ALIGN_UP(newsize) <= size);
	code = g_hash_table_lookup (cman->hash, data);
	g_assert (code != NULL);
	mono_nacl_fill_code_buffer ((uint8_t*)data + newsize, size - newsize);
	newsize = NACL_BUNDLE_ALIGN_UP(newsize);
	g_assert ((GPOINTER_TO_UINT (data) & kNaClBundleMask) == 0);
	g_assert ((newsize & kNaClBundleMask) == 0);
	status = nacl_dyncode_create (code, data, newsize);
	if (status != 0) {
		unsigned char *codep;
		fprintf(stderr, "Error creating Native Client dynamic code section attempted to be\n"
		                "emitted at %p (hex dissasembly of code follows):\n", code);
		for (codep = data; codep < data + newsize; codep++)
			fprintf(stderr, "%02x ", *codep);
		fprintf(stderr, "\n");
		g_assert_not_reached ();
	}
	g_hash_table_remove (cman->hash, data);
# ifndef USE_JUMP_TABLES
	g_assert (data == patch_source_base[patch_current_depth]);
	g_assert (code == patch_dest_base[patch_current_depth]);
	patch_current_depth--;
	g_assert (patch_current_depth >= -1);
# endif
	free (data);
#endif
}
int main(int argc, char **argv) {
  if (argc != 2) {
    fprintf(stderr, "Usage: write_to_dyncode <alloc_dest_first>\n");
    return 1;
  }
  int alloc_dest_first = atoi(argv[1]);

  void (*func)(void);
  uintptr_t code_ptr = (uintptr_t) DYNAMIC_CODE_SEGMENT_START;

  if (alloc_dest_first) {
    char code_buf[32];
    uint32_t halt_val = NACL_HALT_WORD;
    for (int i = 0; i < sizeof(code_buf); i += NACL_HALT_LEN) {
      memcpy(code_buf + i, &halt_val, NACL_HALT_LEN);
    }
    int rc = nacl_dyncode_create((void *) code_ptr, code_buf, sizeof(code_buf));
    assert(rc == 0);
  }

  fprintf(stdout, "This should fault...\n");
  fflush(stdout);
#if defined(__i386__) || defined(__x86_64__)
  *(uint8_t *) code_ptr = 0xc3; /* RET */
#elif defined(__arm__)
  *(uint32_t *) code_ptr = 0xe12fff1e; /* BX LR */
#else
# error Unknown architecture
#endif

  fprintf(stdout, "We're still running. This is wrong.\n");
  fprintf(stdout, "Now try executing the code we wrote...\n");

  /* Double cast required to stop gcc complaining. */
  func = (void (*)(void)) (uintptr_t) code_ptr;
  func();
  fprintf(stdout, "We managed to run the code. This is bad.\n");
  return 1;
}
Beispiel #14
0
void test_syscall_wrappers(void) {
    /*
     * This tests whether various IRT calls generate
     * blocking-notification callbacks.  The test expectations here are
     * subject to change.  We might need to update them when the IRT or
     * the NaCl trusted runtime are changed.
     *
     * For example, if the IRT's mutex_lock() is always reported as
     * blocking today, it might not be reported as blocking in the
     * uncontended case in the future.
     *
     * Conversely, while the IRT's mutex_unlock() might always be
     * reported as non-blocking today, in a future implementation it
     * might briefly hold a lock to inspect a futex wait queue, which
     * might be reported as blocking.
     *
     * The user-code libpthread implementation is similarly subject to
     * change, but it is one level removed from the IRT interfaces that
     * generate blocking-notification callbacks.  Therefore, we test the
     * IRT interfaces rather than testing pthread_mutex, pthread_cond,
     * etc.
     */

    unsigned int local_pre_call_count = nacl_pre_calls;
    unsigned int local_post_call_count = nacl_pre_calls;

    /* A set of nonsense arguments to keep from having a bunch
     * of literal values below.
     */
    const int fd = -1;
    void* ptr = NULL;
    const size_t size = 0;

    /* Test all syscalls to make sure we are wrapping all the
     * syscalls we are trying to wrap. We don't care about the
     * args or return values as long as the syscall is made.
     */
    CHECK_SYSCALL_PRE();
    read(fd, ptr, size);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    write(fd, ptr, size);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    nacl_dyncode_create(ptr, ptr, size);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    nacl_dyncode_modify(ptr, ptr, size);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    nacl_dyncode_delete(ptr, size);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    nanosleep(ptr, ptr);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    open(ptr, 0, O_RDWR);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    sched_yield();
    CHECK_SYSCALL_WRAPPED();

    /*
     * This initializes __nc_irt_mutex, __nc_irt_cond and __nc_irt_sem
     * as a side effect.
     */
    struct nacl_irt_thread irt_thread;
    __nc_initialize_interfaces(&irt_thread);

    /* Check the IRT's mutex interface */

    int mutex_handle;
    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_mutex.mutex_create(&mutex_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_mutex.mutex_lock(mutex_handle) == 0);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_mutex.mutex_trylock(mutex_handle) == EBUSY);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_mutex.mutex_unlock(mutex_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_mutex.mutex_destroy(mutex_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    /* Check the IRT's condvar interface */

    int cond_handle;
    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_cond.cond_create(&cond_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_cond.cond_signal(cond_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_cond.cond_broadcast(cond_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK(__nc_irt_mutex.mutex_create(&mutex_handle) == 0);
    CHECK(__nc_irt_mutex.mutex_lock(mutex_handle) == 0);
    struct timespec abstime = { 0, 0 };
    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_cond.cond_timed_wait_abs(cond_handle, mutex_handle,
                                            &abstime) == ETIMEDOUT);
    CHECK_SYSCALL_WRAPPED();
    CHECK(__nc_irt_mutex.mutex_unlock(mutex_handle) == 0);
    CHECK(__nc_irt_mutex.mutex_destroy(mutex_handle) == 0);

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_cond.cond_destroy(cond_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    /* Check the IRT's semaphore interface */

    /* Semaphore with value 1 (we're the only user of it) */
    int sem_handle;
    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_sem.sem_create(&sem_handle, 1) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_sem.sem_wait(sem_handle) == 0);
    CHECK_SYSCALL_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_sem.sem_post(sem_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();

    CHECK_SYSCALL_PRE();
    CHECK(__nc_irt_sem.sem_destroy(sem_handle) == 0);
    CHECK_SYSCALL_NOT_WRAPPED();
}
void test_syscall_wrappers(void) {
  /*
   * This tests whether various IRT calls generate
   * blocking-notification callbacks.  The test expectations here are
   * subject to change.  We might need to update them when the IRT or
   * the NaCl trusted runtime are changed.
   *
   * For example, if the IRT's mutex_lock() is always reported as
   * blocking today, it might not be reported as blocking in the
   * uncontended case in the future.
   *
   * Conversely, while the IRT's mutex_unlock() might always be
   * reported as non-blocking today, in a future implementation it
   * might briefly hold a lock to inspect a futex wait queue, which
   * might be reported as blocking.
   *
   * The user-code libpthread implementation is similarly subject to
   * change, but it is one level removed from the IRT interfaces that
   * generate blocking-notification callbacks.  Therefore, we test the
   * IRT interfaces rather than testing pthread_mutex, pthread_cond,
   * etc.
   */

  unsigned int local_pre_call_count = nacl_pre_calls;
  unsigned int local_post_call_count = nacl_pre_calls;

  /* A set of nonsense arguments to keep from having a bunch
   * of literal values below.
   */
  const int fd = -1;
  void* ptr = NULL;
  const size_t size = 0;

  /* Test all syscalls to make sure we are wrapping all the
   * syscalls we are trying to wrap. We don't care about the
   * args or return values as long as the syscall is made.
   */
  CHECK_SYSCALL_PRE();
  read(fd, ptr, size);
  CHECK_SYSCALL_WRAPPED();

  CHECK_SYSCALL_PRE();
  write(fd, ptr, size);
  CHECK_SYSCALL_WRAPPED();

  CHECK_SYSCALL_PRE();
  nacl_dyncode_create(ptr, ptr, size);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  nacl_dyncode_modify(ptr, ptr, size);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  nacl_dyncode_delete(ptr, size);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  nanosleep(ptr, ptr);
  CHECK_SYSCALL_WRAPPED();

  CHECK_SYSCALL_PRE();
  open(ptr, 0, O_RDWR);
  CHECK_SYSCALL_WRAPPED();

  CHECK_SYSCALL_PRE();
  sched_yield();
  CHECK_SYSCALL_WRAPPED();

  /*
   * We only test the following threading-related interfaces when
   * using the IRT, because it is awkward to test this when using
   * nacl_sys_private, and it doesn't really matter whether
   * nacl_sys_private supports the "blockhooks" (a.k.a. "gc_hooks")
   * interface because nacl_sys_private bypasses NaCl's stable ABI and
   * is not officially supported.
   */
#if TESTS_USE_IRT
  struct nacl_irt_futex irt_futex;
  struct nacl_irt_mutex irt_mutex;
  struct nacl_irt_cond irt_cond;
  struct nacl_irt_sem irt_sem;
  __libnacl_mandatory_irt_query(NACL_IRT_FUTEX_v0_1,
                                &irt_futex, sizeof(irt_futex));
  __libnacl_mandatory_irt_query(NACL_IRT_MUTEX_v0_1,
                                &irt_mutex, sizeof(irt_mutex));
  __libnacl_mandatory_irt_query(NACL_IRT_COND_v0_1,
                                &irt_cond, sizeof(irt_cond));
  __libnacl_mandatory_irt_query(NACL_IRT_SEM_v0_1,
                                &irt_sem, sizeof(irt_sem));

  /* Check the IRT's futex interface */

  int futex_value = 123;
  CHECK_SYSCALL_PRE();
  CHECK(irt_futex.futex_wait_abs(&futex_value, futex_value + 1, NULL)
        == EWOULDBLOCK);
  CHECK_SYSCALL_WRAPPED();

  int woken_count;
  CHECK_SYSCALL_PRE();
  CHECK(irt_futex.futex_wake(&futex_value, 1, &woken_count) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();
  CHECK(woken_count == 0);

  /* Check the IRT's mutex interface */

  int mutex_handle;
  CHECK_SYSCALL_PRE();
  CHECK(irt_mutex.mutex_create(&mutex_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_mutex.mutex_lock(mutex_handle) == 0);
  CHECK_SYSCALL_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_mutex.mutex_trylock(mutex_handle) == EBUSY);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_mutex.mutex_unlock(mutex_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_mutex.mutex_destroy(mutex_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  /* Check the IRT's condvar interface */

  int cond_handle;
  CHECK_SYSCALL_PRE();
  CHECK(irt_cond.cond_create(&cond_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_cond.cond_signal(cond_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_cond.cond_broadcast(cond_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK(irt_mutex.mutex_create(&mutex_handle) == 0);
  CHECK(irt_mutex.mutex_lock(mutex_handle) == 0);
  struct timespec abstime = { 0, 0 };
  CHECK_SYSCALL_PRE();
  CHECK(irt_cond.cond_timed_wait_abs(cond_handle, mutex_handle, &abstime)
        == ETIMEDOUT);
  CHECK_SYSCALL_WRAPPED();
  CHECK(irt_mutex.mutex_unlock(mutex_handle) == 0);
  CHECK(irt_mutex.mutex_destroy(mutex_handle) == 0);

  CHECK_SYSCALL_PRE();
  CHECK(irt_cond.cond_destroy(cond_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  /* Check the IRT's semaphore interface */

  /* Semaphore with value 1 (we're the only user of it) */
  int sem_handle;
  CHECK_SYSCALL_PRE();
  CHECK(irt_sem.sem_create(&sem_handle, 1) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_sem.sem_wait(sem_handle) == 0);
  CHECK_SYSCALL_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_sem.sem_post(sem_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();

  CHECK_SYSCALL_PRE();
  CHECK(irt_sem.sem_destroy(sem_handle) == 0);
  CHECK_SYSCALL_NOT_WRAPPED();
#endif
}