/* Check that we can rewrite instruction that crosses align boundaries. */ void test_replacing_code_slowpaths(void) { uint8_t *load_area = allocate_code_space(1); uint8_t buf[NACL_BUNDLE_SIZE]; size_t size; int rc; /* Offsets to copy an instruction to. */ int off1, off2, off3; fill_nops(buf, sizeof(buf)); size = (size_t) (&template_instr_end - &template_instr); assert(size <= 5); off1 = 4 - size + 1; /* Cross 4 byte boundary */ off2 = 24 - size + 1; /* Cross 8 byte boundary */ off3 = 16 - size + 1; /* Cross 16 byte boundary */ memcpy(buf + off1, &template_instr, size); memcpy(buf + off2, &template_instr, size); memcpy(buf + off3, &template_instr, size); rc = nacl_dyncode_create(load_area, buf, sizeof(buf)); assert(rc == 0); memcpy(buf + off1, &template_instr_replace, size); rc = nacl_dyncode_modify(load_area + off1, buf + off1, size); assert(rc == 0); assert(memcmp(buf + off1, load_area + off1, size) == 0); memcpy(buf + off2, &template_instr_replace, size); rc = nacl_dyncode_modify(load_area + off2, buf + off2, size); assert(rc == 0); assert(memcmp(buf + off2, load_area + off2, size) == 0); memcpy(buf + off3, &template_instr_replace, size); rc = nacl_dyncode_modify(load_area + off3, buf + off3, size); assert(rc == 0); assert(memcmp(buf + off3, load_area + off3, size) == 0); }
void test_external_jump_target_replacement(void) { uint8_t *load_area = allocate_code_space(1); /* BUF_SIZE * 2 because this function necessarily has an extra bundle. */ uint8_t buf[BUF_SIZE * 2]; int rc; int (*func)(void); const int kNaClBundleSize = NACL_BUNDLE_SIZE; copy_and_pad_fragment(buf, sizeof(buf), &template_func_external_jump_target, &template_func_external_jump_target_end); rc = nacl_dyncode_create(load_area, buf, sizeof(buf)); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_OLD); copy_and_pad_fragment(buf, sizeof(buf), &template_func_external_jump_target_replace, &template_func_external_jump_target_replace_end); /* Only copy one bundle so we can test an unaligned external jump target */ rc = nacl_dyncode_modify(load_area, buf, kNaClBundleSize); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_NEW); }
/* Check code replacement constraints */ void test_illegal_code_replacment(void) { uint8_t *load_area = allocate_code_space(1); uint8_t buf[BUF_SIZE]; int rc; int i; int (*func)(void); copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end); rc = nacl_dyncode_create(load_area, buf, sizeof(buf)); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_OLD); for (i = 0; i < (sizeof(illegal_code_sections) / sizeof(struct code_section)); i++) { printf("\t%s\n", illegal_code_sections[i].name); /* write illegal replacement to the same location */ copy_and_pad_fragment(buf, sizeof(buf), illegal_code_sections[i].start, illegal_code_sections[i].end); rc = nacl_dyncode_modify(load_area, buf, sizeof(buf)); assert(rc != 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_OLD); } }
/* Check that we can dynamically rewrite code. */ void test_replacing_code_unaligned(void) { uint8_t *load_area = allocate_code_space(1); uint8_t buf[BUF_SIZE]; int first_diff = 0; int rc; int (*func)(void); copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end); rc = nacl_dyncode_create(load_area, buf, sizeof(buf)); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_OLD); /* write replacement to the same location, unaligned */ copy_and_pad_fragment(buf, sizeof(buf), &template_func_replacement, &template_func_replacement_end); /* we find first byte where old and new code differs */ while (buf[first_diff] == load_area[first_diff] && first_diff < sizeof buf) { first_diff++; } /* and check, that there is some data in common, and some different */ assert(first_diff > 0 && first_diff < sizeof(buf)); rc = nacl_dyncode_modify(load_area+first_diff, buf+first_diff, sizeof(buf)-first_diff); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_NEW); }
int test_simle_replacement(const char *fragment1, const char *fragment1_end, const char *fragment2, const char *fragment2_end) { uint8_t *load_area = allocate_code_space(1); uint8_t buf[BUF_SIZE]; int rc; /* The original version is fine. */ copy_and_pad_fragment(buf, sizeof(buf), fragment1, fragment1_end); rc = nacl_dyncode_create(load_area, buf, sizeof(buf)); assert(rc == 0); copy_and_pad_fragment(buf, sizeof(buf), fragment2, fragment2_end); rc = nacl_dyncode_modify(load_area, buf, sizeof(buf)); return rc; }
/* Check that we can dynamically rewrite code. */ void test_replacing_code(void) { uint8_t *load_area = allocate_code_space(1); uint8_t buf[BUF_SIZE]; int rc; int (*func)(void); copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end); rc = nacl_dyncode_create(load_area, buf, sizeof(buf)); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_OLD); /* write replacement to the same location */ copy_and_pad_fragment(buf, sizeof(buf), &template_func_replacement, &template_func_replacement_end); rc = nacl_dyncode_modify(load_area, buf, sizeof(buf)); assert(rc == 0); func = (int (*)(void)) (uintptr_t) load_area; rc = func(); assert(rc == MARKER_NEW); }
int is_replacement_enabled(void) { char trash; return (0 == nacl_dyncode_modify(allocate_code_space(1), &trash, 0)); }
void test_syscall_wrappers(void) { /* * This tests whether various IRT calls generate * blocking-notification callbacks. The test expectations here are * subject to change. We might need to update them when the IRT or * the NaCl trusted runtime are changed. * * For example, if the IRT's mutex_lock() is always reported as * blocking today, it might not be reported as blocking in the * uncontended case in the future. * * Conversely, while the IRT's mutex_unlock() might always be * reported as non-blocking today, in a future implementation it * might briefly hold a lock to inspect a futex wait queue, which * might be reported as blocking. * * The user-code libpthread implementation is similarly subject to * change, but it is one level removed from the IRT interfaces that * generate blocking-notification callbacks. Therefore, we test the * IRT interfaces rather than testing pthread_mutex, pthread_cond, * etc. */ unsigned int local_pre_call_count = nacl_pre_calls; unsigned int local_post_call_count = nacl_pre_calls; /* A set of nonsense arguments to keep from having a bunch * of literal values below. */ const int fd = -1; void* ptr = NULL; const size_t size = 0; /* Test all syscalls to make sure we are wrapping all the * syscalls we are trying to wrap. We don't care about the * args or return values as long as the syscall is made. */ CHECK_SYSCALL_PRE(); read(fd, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); write(fd, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_create(ptr, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_modify(ptr, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_delete(ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nanosleep(ptr, ptr); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); open(ptr, 0, O_RDWR); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); sched_yield(); CHECK_SYSCALL_WRAPPED(); /* * This initializes __nc_irt_mutex, __nc_irt_cond and __nc_irt_sem * as a side effect. */ struct nacl_irt_thread irt_thread; __nc_initialize_interfaces(&irt_thread); /* Check the IRT's mutex interface */ int mutex_handle; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_create(&mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_lock(mutex_handle) == 0); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_trylock(mutex_handle) == EBUSY); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_unlock(mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_destroy(mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); /* Check the IRT's condvar interface */ int cond_handle; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_create(&cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_signal(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_broadcast(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK(__nc_irt_mutex.mutex_create(&mutex_handle) == 0); CHECK(__nc_irt_mutex.mutex_lock(mutex_handle) == 0); struct timespec abstime = { 0, 0 }; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_timed_wait_abs(cond_handle, mutex_handle, &abstime) == ETIMEDOUT); CHECK_SYSCALL_WRAPPED(); CHECK(__nc_irt_mutex.mutex_unlock(mutex_handle) == 0); CHECK(__nc_irt_mutex.mutex_destroy(mutex_handle) == 0); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_destroy(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); /* Check the IRT's semaphore interface */ /* Semaphore with value 1 (we're the only user of it) */ int sem_handle; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_create(&sem_handle, 1) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_wait(sem_handle) == 0); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_post(sem_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_destroy(sem_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); }
void test_syscall_wrappers(void) { /* * This tests whether various IRT calls generate * blocking-notification callbacks. The test expectations here are * subject to change. We might need to update them when the IRT or * the NaCl trusted runtime are changed. * * For example, if the IRT's mutex_lock() is always reported as * blocking today, it might not be reported as blocking in the * uncontended case in the future. * * Conversely, while the IRT's mutex_unlock() might always be * reported as non-blocking today, in a future implementation it * might briefly hold a lock to inspect a futex wait queue, which * might be reported as blocking. * * The user-code libpthread implementation is similarly subject to * change, but it is one level removed from the IRT interfaces that * generate blocking-notification callbacks. Therefore, we test the * IRT interfaces rather than testing pthread_mutex, pthread_cond, * etc. */ unsigned int local_pre_call_count = nacl_pre_calls; unsigned int local_post_call_count = nacl_pre_calls; /* A set of nonsense arguments to keep from having a bunch * of literal values below. */ const int fd = -1; void* ptr = NULL; const size_t size = 0; /* Test all syscalls to make sure we are wrapping all the * syscalls we are trying to wrap. We don't care about the * args or return values as long as the syscall is made. */ CHECK_SYSCALL_PRE(); read(fd, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); write(fd, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_create(ptr, ptr, size); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_modify(ptr, ptr, size); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_delete(ptr, size); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); nanosleep(ptr, ptr); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); open(ptr, 0, O_RDWR); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); sched_yield(); CHECK_SYSCALL_WRAPPED(); /* * We only test the following threading-related interfaces when * using the IRT, because it is awkward to test this when using * nacl_sys_private, and it doesn't really matter whether * nacl_sys_private supports the "blockhooks" (a.k.a. "gc_hooks") * interface because nacl_sys_private bypasses NaCl's stable ABI and * is not officially supported. */ #if TESTS_USE_IRT struct nacl_irt_futex irt_futex; struct nacl_irt_mutex irt_mutex; struct nacl_irt_cond irt_cond; struct nacl_irt_sem irt_sem; __libnacl_mandatory_irt_query(NACL_IRT_FUTEX_v0_1, &irt_futex, sizeof(irt_futex)); __libnacl_mandatory_irt_query(NACL_IRT_MUTEX_v0_1, &irt_mutex, sizeof(irt_mutex)); __libnacl_mandatory_irt_query(NACL_IRT_COND_v0_1, &irt_cond, sizeof(irt_cond)); __libnacl_mandatory_irt_query(NACL_IRT_SEM_v0_1, &irt_sem, sizeof(irt_sem)); /* Check the IRT's futex interface */ int futex_value = 123; CHECK_SYSCALL_PRE(); CHECK(irt_futex.futex_wait_abs(&futex_value, futex_value + 1, NULL) == EWOULDBLOCK); CHECK_SYSCALL_WRAPPED(); int woken_count; CHECK_SYSCALL_PRE(); CHECK(irt_futex.futex_wake(&futex_value, 1, &woken_count) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK(woken_count == 0); /* Check the IRT's mutex interface */ int mutex_handle; CHECK_SYSCALL_PRE(); CHECK(irt_mutex.mutex_create(&mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_mutex.mutex_lock(mutex_handle) == 0); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_mutex.mutex_trylock(mutex_handle) == EBUSY); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_mutex.mutex_unlock(mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_mutex.mutex_destroy(mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); /* Check the IRT's condvar interface */ int cond_handle; CHECK_SYSCALL_PRE(); CHECK(irt_cond.cond_create(&cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_cond.cond_signal(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_cond.cond_broadcast(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK(irt_mutex.mutex_create(&mutex_handle) == 0); CHECK(irt_mutex.mutex_lock(mutex_handle) == 0); struct timespec abstime = { 0, 0 }; CHECK_SYSCALL_PRE(); CHECK(irt_cond.cond_timed_wait_abs(cond_handle, mutex_handle, &abstime) == ETIMEDOUT); CHECK_SYSCALL_WRAPPED(); CHECK(irt_mutex.mutex_unlock(mutex_handle) == 0); CHECK(irt_mutex.mutex_destroy(mutex_handle) == 0); CHECK_SYSCALL_PRE(); CHECK(irt_cond.cond_destroy(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); /* Check the IRT's semaphore interface */ /* Semaphore with value 1 (we're the only user of it) */ int sem_handle; CHECK_SYSCALL_PRE(); CHECK(irt_sem.sem_create(&sem_handle, 1) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_sem.sem_wait(sem_handle) == 0); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_sem.sem_post(sem_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(irt_sem.sem_destroy(sem_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); #endif }