static int timer_stress_worker(void* void_arg) { timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg); while (!atomic_load(&args->timer_stress_done)) { timer_t t = TIMER_INITIAL_VALUE(t); zx_duration_t timer_duration = rand_duration(ZX_MSEC(5)); // Set a timer, then switch to a different CPU to ensure we race with it. arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + timer_duration); timer_set(&t, deadline, timer_stress_cb, void_arg); thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); // We're now running on something other than timer_cpu. atomic_add_u64(&args->num_set, 1); // Sleep for the timer duration so that this thread's timer_cancel races with the timer // callback. We want to race to ensure there are no synchronization or memory visibility // issues. thread_sleep_relative(timer_duration); timer_cancel(&t); } return 0; }
void arm_generic_timer_init(int irq, uint32_t freq_override) { uint32_t cntfrq; if (freq_override == 0) { cntfrq = read_cntfrq(); if (!cntfrq) { TRACEF("Failed to initialize timer, frequency is 0\n"); return; } } else { cntfrq = freq_override; } #if LOCAL_TRACE LTRACEF("Test min cntfrq\n"); arm_generic_timer_init_conversion_factors(1); test_time_conversions(1); LTRACEF("Test max cntfrq\n"); arm_generic_timer_init_conversion_factors(~0); test_time_conversions(~0); LTRACEF("Set actual cntfrq\n"); #endif arm_generic_timer_init_conversion_factors(cntfrq); test_time_conversions(cntfrq); LTRACEF("register irq %d on cpu %d\n", irq, arch_curr_cpu_num()); register_int_handler(irq, &platform_tick, NULL); unmask_interrupt(irq); timer_irq = irq; }
// See that timer_trylock_or_cancel acquires the lock when the holder releases it. static bool trylock_or_cancel_get_lock() { BEGIN_TEST; // We need 2 or more CPUs for this test. if (get_num_cpus_online() < 2) { printf("skipping test trylock_or_cancel_get_lock, not enough online cpus\n"); return true; } timer_args arg{}; timer_t t = TIMER_INITIAL_VALUE(t); SpinLock lock; arg.lock = lock.GetInternal(); arg.wait = 1; arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + ZX_USEC(100)); timer_set(&t, deadline, timer_trylock_cb, &arg); // The timer is set to run on timer_cpu, switch to a different CPU, acquire the spinlock then // signal the callback to proceed. thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); { AutoSpinLock guard(&lock); while (!atomic_load(&arg.timer_fired)) { } // Callback should now be running. Tell it to stop waiting and start trylocking. atomic_store(&arg.wait, 0); } // See that timer_cancel returns false indicating that the timer ran. ASSERT_FALSE(timer_cancel(&t), ""); // Note, we cannot assert the value of arg.result. We have both released the lock and canceled // the timer, but we don't know which of these events the timer observed first. END_TEST; }
void kernel_evlog_add(uintptr_t id, uintptr_t arg0, uintptr_t arg1) { if (kernel_evlog_enable) { uint index = evlog_bump_head(&kernel_evlog); kernel_evlog.items[index] = (uintptr_t)current_time_hires(); kernel_evlog.items[index+1] = (arch_curr_cpu_num() << 16) | id; kernel_evlog.items[index+2] = arg0; kernel_evlog.items[index+3] = arg1; } }
void lk_secondary_cpu_entry(void) { uint cpu = arch_curr_cpu_num(); if (cpu > secondary_bootstrap_thread_count) { dprintf(CRITICAL, "Invalid secondary cpu num %d, SMP_MAX_CPUS %d, secondary_bootstrap_thread_count %d\n", cpu, SMP_MAX_CPUS, secondary_bootstrap_thread_count); return; } thread_secondary_cpu_init_early(); thread_resume(secondary_bootstrap_threads[cpu - 1]); dprintf(SPEW, "entering scheduler on cpu %d\n", cpu); thread_secondary_cpu_entry(); }
static int timer_do_one_thread(void* arg) { event_t event; timer_t timer; event_init(&event, false, 0); timer_init(&timer); const Deadline deadline = Deadline::no_slack(current_time() + ZX_MSEC(10)); timer_set(&timer, deadline, timer_diag_cb, &event); event_wait(&event); printf("got timer on cpu %u\n", arch_curr_cpu_num()); event_destroy(&event); return 0; }
void arm64_secondary_entry(ulong asm_cpu_num) { uint cpu = arch_curr_cpu_num(); if (cpu != asm_cpu_num) return; arm64_cpu_early_init(); spin_lock(&arm_boot_cpu_lock); spin_unlock(&arm_boot_cpu_lock); /* run early secondary cpu init routines up to the threading level */ lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1); arch_mp_init_percpu(); LTRACEF("cpu num %d\n", cpu); /* we're done, tell the main cpu we're up */ atomic_add(&secondaries_to_init, -1); __asm__ volatile("sev"); lk_secondary_cpu_entry(); }
static void arm_generic_timer_init_secondary_cpu(uint level) { LTRACEF("register irq %d on cpu %d\n", timer_irq, arch_curr_cpu_num()); register_int_handler(timer_irq, &platform_tick, NULL); unmask_interrupt(timer_irq); }
enum handler_return platform_irq(struct arm_iframe *frame) { uint vector; uint cpu = arch_curr_cpu_num(); THREAD_STATS_INC(interrupts); // see what kind of irq it is uint32_t pend = *REG32(INTC_LOCAL_IRQ_PEND0 + cpu * 4); pend &= ~(1 << (INTERRUPT_ARM_LOCAL_GPU_FAST % 32)); // mask out gpu interrupts if (pend != 0) { // it's a local interrupt LTRACEF("local pend 0x%x\n", pend); vector = ARM_IRQ_LOCAL_BASE + ctz(pend); goto decoded; } // XXX disable for now, since all of the interesting irqs are mirrored into the other banks #if 0 // look in bank 0 (ARM interrupts) pend = *REG32(INTC_PEND0); LTRACEF("pend0 0x%x\n", pend); pend &= ~((1<<8)|(1<<9)); // mask out bit 8 and 9 if (pend != 0) { // it's a bank 0 interrupt vector = ARM_IRQ0_BASE + ctz(pend); goto decoded; } #endif // look for VC interrupt bank 1 pend = *REG32(INTC_PEND1); LTRACEF("pend1 0x%x\n", pend); if (pend != 0) { // it's a bank 1 interrupt vector = ARM_IRQ1_BASE + ctz(pend); goto decoded; } // look for VC interrupt bank 2 pend = *REG32(INTC_PEND2); LTRACEF("pend2 0x%x\n", pend); if (pend != 0) { // it's a bank 2 interrupt vector = ARM_IRQ2_BASE + ctz(pend); goto decoded; } vector = 0xffffffff; decoded: LTRACEF("cpu %u vector %u\n", cpu, vector); // dispatch the irq enum handler_return ret = INT_NO_RESCHEDULE; #if WITH_SMP if (vector == INTERRUPT_ARM_LOCAL_MAILBOX0) { pend = *REG32(INTC_LOCAL_MAILBOX0_CLR0 + 0x10 * cpu); LTRACEF("mailbox0 clr 0x%x\n", pend); // ack it *REG32(INTC_LOCAL_MAILBOX0_CLR0 + 0x10 * cpu) = pend; if (pend & (1 << MP_IPI_GENERIC)) { PANIC_UNIMPLEMENTED; } if (pend & (1 << MP_IPI_RESCHEDULE)) { ret = mp_mbx_reschedule_irq(); } } else #endif // WITH_SMP if (vector == 0xffffffff) { ret = INT_NO_RESCHEDULE; } else if (int_handler_table[vector].handler) { ret = int_handler_table[vector].handler(int_handler_table[vector].arg); } else { panic("irq %u fired on cpu %u but no handler set!\n", vector, cpu); } return ret; }