u64 sched_clock_cpu(int cpu) { u64 now, clock, this_clock, remote_clock; struct sched_clock_data *scd; if (sched_clock_stable) return sched_clock(); scd = cpu_sdc(cpu); /* * Normally this is not called in NMI context - but if it is, * trying to do any locking here is totally lethal. */ if (unlikely(in_nmi())) return scd->clock; if (unlikely(!sched_clock_running)) return 0ull; WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); if (cpu != raw_smp_processor_id()) { struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); this_clock = __update_sched_clock(my_scd, now); remote_clock = scd->clock; /* * Use the opportunity that we have both locks * taken to couple the two clocks: we take the * larger time as the latest time for both * runqueues. (this creates monotonic movement) */ if (likely((s64)(remote_clock - this_clock) < 0)) { clock = this_clock; scd->clock = clock; } else { /* * Should be rare, but possible: */ clock = remote_clock; my_scd->clock = remote_clock; } __raw_spin_unlock(&my_scd->lock); } else { __raw_spin_lock(&scd->lock); clock = __update_sched_clock(scd, now); } __raw_spin_unlock(&scd->lock); return clock; }
u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); u64 now, clock; if (unlikely(!sched_clock_running)) return 0ull; WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); if (cpu != raw_smp_processor_id()) { /* * in order to update a remote cpu's clock based on our * unstable raw time rebase it against: * tick_raw (offset between raw counters) * tick_gotd (tick offset between cpus) */ struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); now -= my_scd->tick_raw; now += scd->tick_raw; now += my_scd->tick_gtod; now -= scd->tick_gtod; __raw_spin_unlock(&my_scd->lock); __update_sched_clock(scd, now, &clock); __raw_spin_unlock(&scd->lock); } else { __raw_spin_lock(&scd->lock); __update_sched_clock(scd, now, NULL); clock = scd->clock; __raw_spin_unlock(&scd->lock); } return clock; }
void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); u64 now, now_gtod; if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); now_gtod = ktime_to_ns(ktime_get()); now = sched_clock(); __raw_spin_lock(&scd->lock); scd->tick_raw = now; scd->tick_gtod = now_gtod; __update_sched_clock(scd, now); __raw_spin_unlock(&scd->lock); }
void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); unsigned long now_jiffies = jiffies; s64 mult, delta_gtod, delta_raw; u64 now, now_gtod; if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); now_gtod = ktime_to_ns(ktime_get()); now = sched_clock(); __raw_spin_lock(&scd->lock); __update_sched_clock(scd, now, NULL); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ delta_gtod = now_gtod - scd->tick_gtod; delta_raw = now - scd->tick_raw; if ((long)delta_raw > 0) { mult = delta_gtod << MULTI_SHIFT; do_div(mult, delta_raw); scd->multi = mult; if (scd->multi > MAX_MULTI) scd->multi = MAX_MULTI; else if (scd->multi < MIN_MULTI) scd->multi = MIN_MULTI; } else scd->multi = 1 << MULTI_SHIFT; scd->tick_raw = now; scd->tick_gtod = now_gtod; scd->tick_jiffies = now_jiffies; __raw_spin_unlock(&scd->lock); }
void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); u64 now, now_gtod; if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); now_gtod = ktime_to_ns(ktime_get()); __raw_spin_lock(&scd->lock); __update_sched_clock(scd, now); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ scd->tick_raw = now; scd->tick_gtod = now_gtod; __raw_spin_unlock(&scd->lock); }
int sc8825_enter_lowpower(void) { int status, ret = 0; unsigned long flags, time; unsigned int cpu = smp_processor_id(); #ifdef CONFIG_SPRD_PM_DEBUG __raw_writel(0xfdffbfff, SPRD_INTC0_BASE + 0xc);//intc0 __raw_writel(0x02004000, SPRD_INTC0_BASE + 0x8);//intc0 __raw_writel(0xffffffff, SPRD_INTC0_BASE + 0x100c);//intc1 #endif time = get_sys_cnt(); if (!hw_irqs_disabled()) { flags = read_cpsr(); printk("##: Error(%s): IRQ is enabled(%08lx)!\n", "wakelock_suspend", flags); } /*TODO: * we need to known clock status in modem side */ #ifdef FORCE_DISABLE_DSP status = 0; #else #ifdef CONFIG_NKERNEL status = sc8825_get_clock_status(); #else /* * TODO: get clock status in native version, force deep sleep now */ status = 0; #endif #endif if (status & DEVICE_AHB) { /*printk("###### %s, DEVICE_AHB ###\n", __func__ );*/ set_sleep_mode(SLP_MODE_ARM); arm_sleep(); } else if (status & DEVICE_APB) { /*printk("###### %s, DEVICE_APB ###\n", __func__ );*/ set_sleep_mode(SLP_MODE_MCU); mcu_sleep(); } else { /*printk("###### %s, DEEP ###\n", __func__ );*/ set_sleep_mode(SLP_MODE_DEP); gic_save_context( ); scu_save_context(); ret = deep_sleep( ); scu_restore_context(); flush_cache_all(); gic_restore_context( ); gic_cpu_enable(cpu); gic_dist_enable( ); #if 1 void notrace __update_sched_clock(void); __update_sched_clock(); #endif } time_add(get_sys_cnt() - time, ret); return ret; }