int main(void) { __nc_initialize_interfaces(&irt_thread); int offset; for (offset = 0; offset <= 32; offset++) { char *stack_top = stack + sizeof(stack) - offset; printf("Checking offset %i: stack_top=%p...\n", offset, stack_top); g_stack_ptr = NULL; g_stack_in_use = 1; void *dummy_tls = &dummy_tls; int rc = irt_thread.thread_create(ThreadStartWrapper, stack_top, dummy_tls); assert(rc == 0); /* Spin until the thread exits. */ while (g_stack_in_use) { sched_yield(); } printf("got g_stack_ptr=%p\n", g_stack_ptr); assert(g_stack_ptr <= stack_top); assert(((uintptr_t) g_stack_ptr + kStackPadBelowAlign) % kStackAlignment == 0); } return 0; }
/* Initializes all globals except for the initial thread structure. */ void __nc_initialize_globals(void) { /* * Fetch the ABI tables from the IRT. If we don't have these, all is lost. */ __nc_initialize_interfaces(&irt_thread); if (pthread_mutex_init(&__nc_thread_management_lock, NULL) != 0) nc_abort(); /* * Tell ThreadSanitizer to not generate happens-before arcs between uses of * this mutex. Otherwise we miss to many real races. * When not running under ThreadSanitizer, this is just a call to an empty * function. */ ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(&__nc_thread_management_lock); if (pthread_cond_init(&__nc_last_thread_cond, NULL) != 0) nc_abort(); STAILQ_INIT(&__nc_thread_memory_blocks[0]); STAILQ_INIT(&__nc_thread_memory_blocks[1]); __nc_thread_initialized = 1; }
void test_syscall_wrappers(void) { /* * This tests whether various IRT calls generate * blocking-notification callbacks. The test expectations here are * subject to change. We might need to update them when the IRT or * the NaCl trusted runtime are changed. * * For example, if the IRT's mutex_lock() is always reported as * blocking today, it might not be reported as blocking in the * uncontended case in the future. * * Conversely, while the IRT's mutex_unlock() might always be * reported as non-blocking today, in a future implementation it * might briefly hold a lock to inspect a futex wait queue, which * might be reported as blocking. * * The user-code libpthread implementation is similarly subject to * change, but it is one level removed from the IRT interfaces that * generate blocking-notification callbacks. Therefore, we test the * IRT interfaces rather than testing pthread_mutex, pthread_cond, * etc. */ unsigned int local_pre_call_count = nacl_pre_calls; unsigned int local_post_call_count = nacl_pre_calls; /* A set of nonsense arguments to keep from having a bunch * of literal values below. */ const int fd = -1; void* ptr = NULL; const size_t size = 0; /* Test all syscalls to make sure we are wrapping all the * syscalls we are trying to wrap. We don't care about the * args or return values as long as the syscall is made. */ CHECK_SYSCALL_PRE(); read(fd, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); write(fd, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_create(ptr, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_modify(ptr, ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nacl_dyncode_delete(ptr, size); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); nanosleep(ptr, ptr); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); open(ptr, 0, O_RDWR); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); sched_yield(); CHECK_SYSCALL_WRAPPED(); /* * This initializes __nc_irt_mutex, __nc_irt_cond and __nc_irt_sem * as a side effect. */ struct nacl_irt_thread irt_thread; __nc_initialize_interfaces(&irt_thread); /* Check the IRT's mutex interface */ int mutex_handle; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_create(&mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_lock(mutex_handle) == 0); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_trylock(mutex_handle) == EBUSY); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_unlock(mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_mutex.mutex_destroy(mutex_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); /* Check the IRT's condvar interface */ int cond_handle; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_create(&cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_signal(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_broadcast(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK(__nc_irt_mutex.mutex_create(&mutex_handle) == 0); CHECK(__nc_irt_mutex.mutex_lock(mutex_handle) == 0); struct timespec abstime = { 0, 0 }; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_timed_wait_abs(cond_handle, mutex_handle, &abstime) == ETIMEDOUT); CHECK_SYSCALL_WRAPPED(); CHECK(__nc_irt_mutex.mutex_unlock(mutex_handle) == 0); CHECK(__nc_irt_mutex.mutex_destroy(mutex_handle) == 0); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_cond.cond_destroy(cond_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); /* Check the IRT's semaphore interface */ /* Semaphore with value 1 (we're the only user of it) */ int sem_handle; CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_create(&sem_handle, 1) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_wait(sem_handle) == 0); CHECK_SYSCALL_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_post(sem_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); CHECK_SYSCALL_PRE(); CHECK(__nc_irt_sem.sem_destroy(sem_handle) == 0); CHECK_SYSCALL_NOT_WRAPPED(); }