static int timer_stress_worker(void* void_arg) { timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg); while (!atomic_load(&args->timer_stress_done)) { timer_t t = TIMER_INITIAL_VALUE(t); zx_duration_t timer_duration = rand_duration(ZX_MSEC(5)); // Set a timer, then switch to a different CPU to ensure we race with it. arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + timer_duration); timer_set(&t, deadline, timer_stress_cb, void_arg); thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); // We're now running on something other than timer_cpu. atomic_add_u64(&args->num_set, 1); // Sleep for the timer duration so that this thread's timer_cancel races with the timer // callback. We want to race to ensure there are no synchronization or memory visibility // issues. thread_sleep_relative(timer_duration); timer_cancel(&t); } return 0; }
void kmain(void) { task_t *task_shell; int ret; /*************** Init Arch ****************/ arch_early_init(); show_logo(); /*************** Init Platform ****************/ platform_init(); timer_init(); buses_init(); /*************** Init Task ****************/ task_init(); task_create_init(); /*************** Init Workqueu ****************/ init_workqueues(); /*************** Init File System ****************/ register_filesystem(&fat_fs); /*************** Creating Shell TASK ****************/ task_shell = task_alloc("shell", 0x2000, 5); if (NULL == task_shell) { return; } ret = task_create(task_shell, init_shell, 0); if (ret) { printk("Create init shell task failed\n"); } sema_init(&sem, 1); arch_enable_ints(); while(1) { enter_critical_section(); arch_idle(); task_schedule(); exit_critical_section(); } task_free(task_shell); }
// See that timer_trylock_or_cancel acquires the lock when the holder releases it. static bool trylock_or_cancel_get_lock() { BEGIN_TEST; // We need 2 or more CPUs for this test. if (get_num_cpus_online() < 2) { printf("skipping test trylock_or_cancel_get_lock, not enough online cpus\n"); return true; } timer_args arg{}; timer_t t = TIMER_INITIAL_VALUE(t); SpinLock lock; arg.lock = lock.GetInternal(); arg.wait = 1; arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + ZX_USEC(100)); timer_set(&t, deadline, timer_trylock_cb, &arg); // The timer is set to run on timer_cpu, switch to a different CPU, acquire the spinlock then // signal the callback to proceed. thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); { AutoSpinLock guard(&lock); while (!atomic_load(&arg.timer_fired)) { } // Callback should now be running. Tell it to stop waiting and start trylocking. atomic_store(&arg.wait, 0); } // See that timer_cancel returns false indicating that the timer ran. ASSERT_FALSE(timer_cancel(&t), ""); // Note, we cannot assert the value of arg.result. We have both released the lock and canceled // the timer, but we don't know which of these events the timer observed first. END_TEST; }
static void initial_thread_func(void) { int ret; thread_t *current_thread = get_current_thread(); LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg); /* release the thread lock that was implicitly held across the reschedule */ spin_unlock(&thread_lock); arch_enable_ints(); ret = current_thread->entry(current_thread->arg); LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret); thread_exit(ret); }
static void initial_thread_func(void) { thread_t *ct = get_current_thread(); #if LOCAL_TRACE LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg); dump_thread(ct); #endif /* release the thread lock that was implicitly held across the reschedule */ spin_unlock(&thread_lock); arch_enable_ints(); int ret = ct->entry(ct->arg); LTRACEF("thread %p exiting with %d\n", ct, ret); thread_exit(ret); }