void test_end_of_code_region() {
  int rc;
  void *dest;
  uint8_t data[32];
  fill_nops(data, sizeof(data));

  /* This tries to load into the data segment, which is definitely not
     allowed. */
  dest = (uint8_t *) DYNAMIC_CODE_SEGMENT_END;
  rc = nacl_load_code(dest, data, sizeof(data));
  assert(rc == -EFAULT);

  /* This tries to load into the last bundle of the code region, which
     sel_ldr disallows just in case there is some CPU bug in which the
     CPU fails to check for running off the end of an x86 code
     segment.  This is applied to other architectures for
     consistency. */
  dest = (uint8_t *) DYNAMIC_CODE_SEGMENT_END - sizeof(data);
  rc = nacl_load_code(dest, data, sizeof(data));
  assert(rc == -EFAULT);

  dest = (uint8_t *) DYNAMIC_CODE_SEGMENT_END - sizeof(data) * 2;
  rc = nacl_load_code(dest, data, sizeof(data));
  assert(rc == 0);
}
void test_branches_outside_chunk() {
  char *load_area = allocate_code_space(1);
  int rc;
  int size = &branch_forwards_end - &branch_forwards;
  assert(size == 16 || size == 32);
  assert(&branch_backwards_end - &branch_backwards == size);

  rc = nacl_load_code(load_area, &branch_forwards, size);
  assert(rc == 0);
  rc = nacl_load_code(load_area + size, &branch_backwards, size);
  assert(rc == 0);
}
void test_fail_on_overwrite() {
  void *load_area = allocate_code_space(1);
  uint8_t buf[32];
  int rc;

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);

  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == 0);

  copy_and_pad_fragment(buf, sizeof(buf), &template_func,
                                          &template_func_end);

  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == -EINVAL);
}
/* The syscall may have to mmap() shared memory temporarily,
   so there is some interaction with page size.
   Check that we can load to non-page-aligned addresses. */
void test_loading_code_non_page_aligned() {
  char *load_area = allocate_code_space(1);
  uint8_t buf[32];
  int rc;

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);

  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == 0);
  assert(memcmp(load_area, buf, sizeof(buf)) == 0);

  load_area += 32;
  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == 0);
  assert(memcmp(load_area, buf, sizeof(buf)) == 0);
}
void test_hlt_filled_bundle() {
  uint8_t bad_code[NUM_BUNDLES_FOR_HLT * NACL_BUNDLE_SIZE];
  void *load_area;
  int ix;

  for (ix = 0; ix < NUM_BUNDLES_FOR_HLT; ++ix) {
    fill_nops(bad_code, sizeof bad_code);
    fill_hlts(bad_code + ix * NACL_BUNDLE_SIZE, NACL_BUNDLE_SIZE);

    load_area = allocate_code_space(1);
    /* hlts are now allowed */
    assert(0 == nacl_load_code(load_area, bad_code, sizeof bad_code));
    /* but not twice... */
    assert(0 != nacl_load_code(load_area, bad_code, sizeof bad_code));
  }
}
void test_validation_error_does_not_leak(void) {
  void *load_area = allocate_code_space(1);
  uint8_t buf[BUF_SIZE];
  int rc;

  copy_and_pad_fragment(buf, sizeof(buf), &invalid_code, &invalid_code_end);
  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == -EINVAL);

  /*
   * Make sure that the failed validation didn't claim the memory.
   * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
   */
  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == 0);
}
void test_fail_on_validation_error() {
  void *load_area = allocate_code_space(1);
  uint8_t buf[32];
  int rc;

  copy_and_pad_fragment(buf, sizeof(buf), &invalid_code, &invalid_code_end);

  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == -EINVAL);
}
/* Since there is an interaction with page size, we also test loading
   a multi-page chunk of code. */
void test_loading_large_chunk() {
  char *load_area = allocate_code_space(2);
  int size = 0x20000;
  uint8_t *data = alloca(size);
  int rc;

  fill_nops(data, size);
  rc = nacl_load_code(load_area, data, size);
  assert(rc == 0);
  assert(memcmp(load_area, data, size) == 0);
}
/*
 * Check that dyncode_create() works on a set of pages when a strict
 * subset of those pages were allocated by a previous dyncode_create()
 * call.  This provides some coverage of the coalescing of mprotect()
 * calls that dyncode_create() does.
 */
void test_demand_alloc_of_fragmented_pages(void) {
  int smaller_size = 2 * DYNAMIC_CODE_PAGE_SIZE;
  int smaller_size_load_offset = 2 * DYNAMIC_CODE_PAGE_SIZE;
  int larger_size = 6 * DYNAMIC_CODE_PAGE_SIZE;
  char *load_area = allocate_code_space(6);
  uint8_t *data = alloca(larger_size);
  int rc;

  fill_nops(data, larger_size);

  /* Cause pages 2 and 3 to be allocated. */
  rc = nacl_load_code(load_area + smaller_size_load_offset, data, smaller_size);
  assert(rc == 0);

  rc = dyncode_delete_with_retry(load_area + smaller_size_load_offset,
                                 smaller_size);
  assert(rc == 0);

  /* Cause pages 0, 1, 4 and 5 to be allocated as well. */
  rc = nacl_load_code(load_area, data, larger_size);
  assert(rc == 0);
}
void test_fail_on_load_to_data_area() {
  uint8_t *data;
  int rc;

  fill_hlts(block_in_data_segment, sizeof(block_in_data_segment));

  /* Align to 32 byte boundary so that we don't fail for a reason
     we're not testing for. */
  data = block_in_data_segment;
  while (((int) data) % 32 != 0)
    data++;
  rc = nacl_load_code(data, data, 32);
  assert(rc == -EFAULT);
}
void test_fail_on_non_bundle_aligned_dest_addresses() {
  char *load_area = allocate_code_space(1);
  int rc;
  uint8_t nops[32];

  fill_nops(nops, sizeof(nops));

  /* Test unaligned destination. */
  rc = nacl_load_code(load_area + 1, nops, 32);
  assert(rc == -EINVAL);
  rc = nacl_load_code(load_area + 4, nops, 32);
  assert(rc == -EINVAL);

  /* Test unaligned size. */
  rc = nacl_load_code(load_area, nops + 1, 31);
  assert(rc == -EINVAL);
  rc = nacl_load_code(load_area, nops + 4, 28);
  assert(rc == -EINVAL);

  /* Check that the code we're trying works otherwise. */
  rc = nacl_load_code(load_area, nops, 32);
  assert(rc == 0);
}
/*
 * Check that regions surrounding the region we load code into are
 * correctly filled with halt instructions.  Loading code causes the
 * pages to become allocated, and unused parts of these pages should
 * be filled with halts.
 */
void test_demand_alloc_surrounding_hlt_filling(void) {
  int pad_size = 0x4000; /* This must be less than one 64k page. */
  int code_size = 0x28000;
  int total_size = pad_size * 2 + code_size;
  assert(total_size % DYNAMIC_CODE_PAGE_SIZE == 0);
  char *load_area = allocate_code_space(total_size / DYNAMIC_CODE_PAGE_SIZE);
  uint8_t *data = alloca(code_size);
  int rc;

  fill_nops(data, code_size);
  rc = nacl_load_code(load_area + pad_size, data, code_size);
  assert(rc == 0);
  check_region_is_filled_with_hlts(load_area, pad_size);
  assert(memcmp(load_area + pad_size, data, code_size) == 0);
  check_region_is_filled_with_hlts(load_area + pad_size + code_size, pad_size);
}
void test_fail_on_load_to_data_area(void) {
  uint8_t *data;
  int rc;

  fill_hlts(block_in_data_segment, sizeof(block_in_data_segment));

  /*
   * Align to bundle size so that we don't fail for a reason we're not
   * testing for.
   */
  data = block_in_data_segment;
  while (((int) data) % NACL_BUNDLE_SIZE != 0)
    data++;
  rc = nacl_load_code(data, data, NACL_BUNDLE_SIZE);
  assert(rc == -EFAULT);
}
/* Check that we can load and run code. */
void test_loading_code() {
  void *load_area = allocate_code_space(1);
  uint8_t buf[32];
  int rc;
  int (*func)();

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);

  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == 0);
  assert(memcmp(load_area, buf, sizeof(buf)) == 0);
  /* Need double cast otherwise gcc complains with "ISO C forbids
     conversion of object pointer to function pointer type
     [-pedantic]". */
  func = (int (*)()) (uintptr_t) load_area;
  rc = func();
  assert(rc == 1234);
}
/* Check that we can load code at the very beginning of the dynamic section. */
void test_loading_code_on_first_dynamic_page() {
  const unsigned int kPageMask = 0xFFFF;
  void *load_area = (void*)((uintptr_t)(etext + kPageMask) & ~kPageMask);
  uint8_t buf[32];
  int rc;
  int (*func)();

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);

  rc = nacl_load_code(load_area, buf, sizeof(buf));
  assert(rc == 0);
  assert(memcmp(load_area, buf, sizeof(buf)) == 0);
  /* Need double cast otherwise gcc complains with "ISO C forbids
     conversion of object pointer to function pointer type
     [-pedantic]". */
  func = (int (*)()) (uintptr_t) load_area;
  rc = func();
  assert(rc == 1234);
}
/*
 * This is mostly the same as test_loading_code() except that we
 * repeat the test many times within the same page.  Unlike the other
 * tests, this will consistently fail on ARM if we do not flush the
 * instruction cache, so it reproduces the bug
 * http://code.google.com/p/nativeclient/issues/detail?id=699
 */
void test_stress(void) {
  void *load_area = allocate_code_space(1);
  uint8_t *dest;
  uint8_t *dest_max;
  uint8_t buf[BUF_SIZE];

  copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);

  dest_max = (uint8_t *) load_area + DYNAMIC_CODE_PAGE_SIZE;
  for (dest = load_area; dest < dest_max; dest += sizeof(buf)) {
    int (*func)(void);
    int rc;

    rc = nacl_load_code(dest, buf, sizeof(buf));
    assert(rc == 0);
    func = (int (*)(void)) (uintptr_t) dest;
    rc = func();
    assert(rc == MARKER_OLD);
  }
}
void test_loading_zero_size() {
  char *load_area = allocate_code_space(1);
  int rc = nacl_load_code(load_area, &template_func, 0);
  assert(rc == 0);
}
/* In principle we could load into the initially-loaded executable's
   code area, but at the moment we don't allow it. */
void test_fail_on_load_to_static_code_area() {
  int size = &hlts_end - &hlts;
  int rc = nacl_load_code(&hlts, &hlts, size);
  assert(rc == -EFAULT);
}