void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) { #if ENABLE_PANIC_SHELL if (reason == HALT_REASON_SW_PANIC) { dprintf(ALWAYS, "CRASH: starting debug shell... (reason = %d)\n", reason); arch_disable_ints(); panic_shell_start(); } #endif // ENABLE_PANIC_SHELL switch (suggested_action) { default: case HALT_ACTION_SHUTDOWN: case HALT_ACTION_HALT: dprintf(ALWAYS, "HALT: spinning forever... (reason = %d)\n", reason); arch_disable_ints(); for (;;) arch_idle(); break; case HALT_ACTION_REBOOT: dprintf(INFO, "REBOOT\n"); arch_disable_ints(); for (;;) { NVIC_SystemReset(); } break; } dprintf(ALWAYS, "HALT: spinning forever... (reason = %d)\n", reason); arch_disable_ints(); for (;;); }
static int timer_stress_worker(void* void_arg) { timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg); while (!atomic_load(&args->timer_stress_done)) { timer_t t = TIMER_INITIAL_VALUE(t); zx_duration_t timer_duration = rand_duration(ZX_MSEC(5)); // Set a timer, then switch to a different CPU to ensure we race with it. arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + timer_duration); timer_set(&t, deadline, timer_stress_cb, void_arg); thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); // We're now running on something other than timer_cpu. atomic_add_u64(&args->num_set, 1); // Sleep for the timer duration so that this thread's timer_cancel races with the timer // callback. We want to race to ensure there are no synchronization or memory visibility // issues. thread_sleep_relative(timer_duration); timer_cancel(&t); } return 0; }
/* * default implementations of these routines, if the platform code * chooses not to implement. */ __WEAK void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) { #if ENABLE_PANIC_SHELL if (reason == HALT_REASON_SW_PANIC) { dprintf(ALWAYS, "CRASH: starting debug shell... (reason = %d)\n", reason); arch_disable_ints(); panic_shell_start(); } #endif // ENABLE_PANIC_SHELL dprintf(ALWAYS, "HALT: spinning forever... (reason = %d)\n", reason); arch_disable_ints(); for(;;); }
void arch_early_init(void) { arch_disable_ints(); #if (__CORTEX_M >= 0x03) || (CORTEX_SC >= 300) uint i; /* set the vector table base */ SCB->VTOR = (uint32_t)&vectab; #if ARM_CM_DYNAMIC_PRIORITY_SIZE /* number of priorities */ for (i=0; i < 7; i++) { __set_BASEPRI(1 << i); if (__get_BASEPRI() != 0) break; } arm_cm_num_irq_pri_bits = 8 - i; arm_cm_irq_pri_mask = ~((1 << i) - 1) & 0xff; #endif /* clear any pending interrupts and set all the vectors to medium priority */ uint groups = (SCnSCB->ICTR & 0xf) + 1; for (i = 0; i < groups; i++) { NVIC->ICER[i] = 0xffffffff; NVIC->ICPR[i] = 0xffffffff; for (uint j = 0; j < 32; j++) { NVIC_SetPriority(i*32 + j, arm_cm_medium_priority()); } } /* leave BASEPRI at 0 */ __set_BASEPRI(0); /* set priority grouping to 0 */ NVIC_SetPriorityGrouping(0); /* enable certain faults */ SCB->SHCSR |= (SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_BUSFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk); /* set the svc and pendsv priority level to pretty low */ #endif NVIC_SetPriority(SVCall_IRQn, arm_cm_lowest_priority()); NVIC_SetPriority(PendSV_IRQn, arm_cm_lowest_priority()); /* set systick and debugmonitor to medium priority */ NVIC_SetPriority(SysTick_IRQn, arm_cm_medium_priority()); #if (__CORTEX_M >= 0x03) NVIC_SetPriority(DebugMonitor_IRQn, arm_cm_medium_priority()); #endif #if ARM_WITH_CACHE arch_enable_cache(UCACHE); #endif }
/* zynq specific halt */ void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) { switch (suggested_action) { default: case HALT_ACTION_SHUTDOWN: case HALT_ACTION_HALT: printf("HALT: spinning forever... (reason = %d)\n", reason); arch_disable_ints(); for(;;) arch_idle(); break; case HALT_ACTION_REBOOT: printf("REBOOT\n"); arch_disable_ints(); for (;;) { zynq_slcr_unlock(); SLCR->PSS_RST_CTRL = 1; } break; } }
// See that timer_trylock_or_cancel acquires the lock when the holder releases it. static bool trylock_or_cancel_get_lock() { BEGIN_TEST; // We need 2 or more CPUs for this test. if (get_num_cpus_online() < 2) { printf("skipping test trylock_or_cancel_get_lock, not enough online cpus\n"); return true; } timer_args arg{}; timer_t t = TIMER_INITIAL_VALUE(t); SpinLock lock; arg.lock = lock.GetInternal(); arg.wait = 1; arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + ZX_USEC(100)); timer_set(&t, deadline, timer_trylock_cb, &arg); // The timer is set to run on timer_cpu, switch to a different CPU, acquire the spinlock then // signal the callback to proceed. thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); { AutoSpinLock guard(&lock); while (!atomic_load(&arg.timer_fired)) { } // Callback should now be running. Tell it to stop waiting and start trylocking. atomic_store(&arg.wait, 0); } // See that timer_cancel returns false indicating that the timer ran. ASSERT_FALSE(timer_cancel(&t), ""); // Note, we cannot assert the value of arg.result. We have both released the lock and canceled // the timer, but we don't know which of these events the timer observed first. END_TEST; }
void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) { arch_disable_ints(); for (;;); }