static int arch_timer_dying_cpu(unsigned int cpu) { struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); arch_timer_stop(clk); return 0; }
static void arch_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *clk) { switch (mode) { case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: arch_timer_stop(); break; default: break; } }
static int arch_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* * Grab cpu pointer in each case to avoid spurious * preemptible warnings */ switch (action & ~CPU_TASKS_FROZEN) { case CPU_STARTING: arch_timer_setup(this_cpu_ptr(arch_timer_evt)); break; case CPU_DYING: arch_timer_stop(this_cpu_ptr(arch_timer_evt)); break; } return NOTIFY_OK; }
static void __cpuinit arch_timer_setup(struct clock_event_device *clk) { /* Let's make sure the timer is off before doing anything else */ arch_timer_stop(); clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->name = "arch_sys_timer"; clk->rating = 400; clk->set_mode = arch_timer_set_mode; clk->set_next_event = arch_timer_set_next_event; clk->irq = arch_timer_ppi; clk->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); enable_percpu_irq(clk->irq, 0); /* Ensure the physical counter is visible to userspace for the vDSO. */ arch_counter_enable_user_access(); }
void local_timer_stop(struct clock_event_device *evt) { arch_timer_stop(evt); }