/* * Updates the kernel user helper area with the current timespec * data, as well as additional fields needed to calculate * gettimeofday, clock_gettime, etc. */ void update_vsyscall(struct timespec *ts, struct timespec *wtm, struct clocksource *c, u32 mult) { unsigned long vectors = (unsigned long)vectors_page; unsigned long flags; unsigned *seqnum = (unsigned *)(vectors + ARM_VSYSCALL_TIMER_SEQ); struct kernel_gtod_t *dgtod = (struct kernel_gtod_t *)(vectors + ARM_VSYSCALL_TIMER_CYCLE_LAST); struct kernel_wtm_t *dgwtm = (struct kernel_wtm_t *)(vectors + ARM_VSYSCALL_TIMER_WTM_TV_SEC); write_seqlock_irqsave(&kuh_time_lock, flags); *seqnum = kuh_time_lock.sequence; dgtod->cycle_last = c->cycle_last; dgtod->mask = c->mask; dgtod->mult = c->mult; dgtod->shift = c->shift; dgtod->tv_sec = ts->tv_sec; dgtod->tv_nsec = ts->tv_nsec; dgwtm->tv_sec = wtm->tv_sec; dgwtm->tv_nsec = wtm->tv_nsec; *seqnum = kuh_time_lock.sequence + 1; write_sequnlock_irqrestore(&kuh_time_lock, flags); }
static int timer_resume(struct sys_device *dev) { unsigned long flags; unsigned long sec; unsigned long sleep_length; #ifdef CONFIG_HPET_TIMER if (is_hpet_enabled()) hpet_reenable(); #endif setup_pit_timer(); sec = get_cmos_time() + clock_cmos_diff; sleep_length = (get_cmos_time() - sleep_start) * HZ; write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = sec; xtime.tv_nsec = 0; jiffies_64 += sleep_length; wall_jiffies += sleep_length; write_sequnlock_irqrestore(&xtime_lock, flags); if (last_timer->resume) last_timer->resume(); cur_timer = last_timer; last_timer = NULL; touch_softlockup_watchdog(); return 0; }
/* * Bring up the timer at 100 Hz. */ void __init swarm_time_init(void) { unsigned int flags; int status; /* Set up the scd general purpose timer 0 to cpu 0 */ sb1250_time_init(); /* Establish communication with the Xicor 1241 RTC */ /* XXXKW how do I share the SMBus with the I2C subsystem? */ __raw_writeq(K_SMB_FREQ_400KHZ, SMB_CSR(R_SMB_FREQ)); __raw_writeq(0, SMB_CSR(R_SMB_CONTROL)); if ((status = xicor_read(X1241REG_SR_RTCF)) < 0) { printk("x1241: couldn't detect on SWARM SMBus 1\n"); } else { if (status & X1241REG_SR_RTCF) printk("x1241: battery failed -- time is probably wrong\n"); write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = get_swarm_time(); xtime.tv_nsec = 0; write_sequnlock_irqrestore(&xtime_lock, flags); } }
void update_vsyscall(struct timespec *wall, struct timespec *wtm, struct clocksource *c, u32 mult) { unsigned long flags; write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; fsyscall_gtod_data.clk_cycle_last = c->cycle_last; /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec + wall->tv_sec; fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec + wall->tv_nsec; /* normalize */ while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; fsyscall_gtod_data.monotonic_time.tv_sec++; } write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); }
void update_vsyscall(struct timespec *wall_time, struct clocksource* clock) { unsigned long flags; write_seqlock_irqsave(&vsyscall_gtod_lock, flags); /* XXX - hackitty hack hack. this is terrible! */ if (curr_clock != clock) { curr_clock = clock; } /* save off wall time as timeval */ vsyscall_gtod_data.wall_time_tv.tv_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_tv.tv_usec = wall_time->tv_nsec/1000; /* copy current clocksource */ vsyscall_gtod_data.clock = *clock; /* save off current timezone */ vsyscall_gtod_data.sys_tz = sys_tz; write_sequnlock_irqrestore(&vsyscall_gtod_lock, flags); }
void vtime_init_idle(struct task_struct *t) { unsigned long flags; write_seqlock_irqsave(&t->vtime_seqlock, flags); t->vtime_snap_whence = VTIME_SYS; t->vtime_snap = sched_clock(); write_sequnlock_irqrestore(&t->vtime_seqlock, flags); }
void update_vsyscall_tz(void) { unsigned long flags; write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); /* sys_tz has changed */ vsyscall_gtod_data.sys_tz = sys_tz; write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); }
int do_settimeofday(struct timespec *tv) { time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; int tb_delta; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irqsave(&xtime_lock, flags); /* Updating the RTC is not the job of this code. If the time is * stepped under NTP, the RTC will be update after STA_UNSYNC * is cleared. Tool like clock/hwclock either copy the RTC * to the system time, in which case there is no point in writing * to the RTC again, or write to the RTC but then they don't call * settimeofday to perform this operation. Note also that * we don't touch the decrementer since: * a) it would lose timer interrupt synchronization on SMP * (if it is working one day) * b) it could make one jiffy spuriously shorter or longer * which would introduce another source of uncertainty potentially * harmful to relatively short timers. */ /* This works perfectly on SMP only if the tb are in sync but * guarantees an error < 1 jiffy even if they are off by eons, * still reasonable when gettimeofday resolution is 1 jiffy. */ tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id())); tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); set_normalized_timespec(&xtime, new_sec, new_nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); /* In case of a large backwards jump in time with NTP, we want the * clock to be updated as soon as the PLL is again in lock. */ last_rtc_update = new_sec - 658; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; write_sequnlock_irqrestore(&xtime_lock, flags); clock_was_set(); return 0; }
static void flush_iowait(struct hfi1_qp *qp) { struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; write_seqlock_irqsave(&dev->iowait_lock, flags); if (!list_empty(&qp->s_iowait.list)) { list_del_init(&qp->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } write_sequnlock_irqrestore(&dev->iowait_lock, flags); }
/* * Initialize the TOD clock and the CPU timer of * the boot cpu. */ void __init time_init(void) { struct timespec ts; unsigned long flags; cycle_t now; /* Reset time synchronization interfaces. */ etr_reset(); stp_reset(); /* request the clock comparator external interrupt */ if (register_early_external_interrupt(0x1004, clock_comparator_interrupt, &ext_int_info_cc) != 0) panic("Couldn't request external interrupt 0x1004"); /* request the timing alert external interrupt */ if (register_early_external_interrupt(0x1406, timing_alert_interrupt, &ext_int_etr_cc) != 0) panic("Couldn't request external interrupt 0x1406"); if (clocksource_register(&clocksource_tod) != 0) panic("Could not register TOD clock source"); /* * The TOD clock is an accurate clock. The xtime should be * initialized in a way that the difference between TOD and * xtime is reasonably small. Too bad that timekeeping_init * sets xtime.tv_nsec to zero. In addition the clock source * change from the jiffies clock source to the TOD clock * source add another error of up to 1/HZ second. The same * function sets wall_to_monotonic to a value that is too * small for /proc/uptime to be accurate. * Reset xtime and wall_to_monotonic to sane values. */ write_seqlock_irqsave(&xtime_lock, flags); now = get_clock(); tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); clocksource_tod.cycle_last = now; clocksource_tod.raw_time = xtime; tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer(); /* Enable cpu timer interrupts on the boot cpu. */ vtime_init(); }
irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs) { unsigned long long nsecs; unsigned long flags; do_timer(regs); write_seqlock_irqsave(&xtime_lock, flags); nsecs = get_time() + local_offset; xtime.tv_sec = nsecs / NSEC_PER_SEC; xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC; write_sequnlock_irqrestore(&xtime_lock, flags); return(IRQ_HANDLED); }
void update_vsyscall_tz(void) { unsigned long vectors = (unsigned long)vectors_page; unsigned long flags; unsigned *seqnum = (unsigned *)(vectors + ARM_VSYSCALL_TIMER_SEQ); struct kernel_tz_t *dgtod = (struct kernel_tz_t *)(vectors + ARM_VSYSCALL_TIMER_TZ); write_seqlock_irqsave(&kuh_time_lock, flags); *seqnum = kuh_time_lock.sequence; dgtod->tz_minuteswest = sys_tz.tz_minuteswest; dgtod->tz_dsttime = sys_tz.tz_dsttime; *seqnum = kuh_time_lock.sequence + 1; write_sequnlock_irqrestore(&kuh_time_lock, flags); }
irqreturn_t um_timer(int irq, void *dev) { unsigned long long nsecs; unsigned long flags; write_seqlock_irqsave(&xtime_lock, flags); do_timer(1); nsecs = get_time(); xtime.tv_sec = nsecs / NSEC_PER_SEC; xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC; write_sequnlock_irqrestore(&xtime_lock, flags); return IRQ_HANDLED; }
static int clocksource_keeper_timer_function(struct timer_t * timer, void * data) { struct clocksource_t * cs = (struct clocksource_t *)(data); u64_t now, delta, offset; irq_flags_t flags; write_seqlock_irqsave(&cs->keeper.lock, flags); now = clocksource_cycle(cs); delta = clocksource_delta(cs, cs->keeper.last, now); offset = clocksource_delta2ns(cs, delta); cs->keeper.nsec += offset; cs->keeper.last = now; write_sequnlock_irqrestore(&cs->keeper.lock, flags); timer_forward_now(timer, ns_to_ktime(cs->keeper.interval)); return 1; }
static int timer_resume(struct sys_device *dev) { unsigned long flags; unsigned long sec; unsigned long sleep_length; #ifdef CONFIG_HPET_TIMER if (is_hpet_enabled()) hpet_reenable(); #endif sec = get_cmos_time() + clock_cmos_diff; sleep_length = get_cmos_time() - sleep_start; write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = sec; xtime.tv_nsec = 0; write_sequnlock_irqrestore(&xtime_lock, flags); jiffies += sleep_length * HZ; return 0; }
/* * linux/arch/arm/kernel/vst.c * * VST code for ARM. * * 2004 VST and IDLE code, by George Anzinger * * 2004 (c) MontaVista Software, Inc. * Copyright 2004 Sony Corporation. * Copyright 2004 Matsushita Electric Industrial Co., Ltd. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/vst.h> #include <linux/hrtime.h> /* get_arch_cycles, arch_cycles_per_jiffy */ #include <linux/time.h> /* xtime_lock */ #include <asm/irq.h> /* to get the disable/enable irq code */ #include <asm/mach/irq.h> #define stop_timer() /* just let it expire.... */ void do_vst_wakeup(struct pt_regs *regs, int irq_flag) { unsigned long jiffies_delta, jiffies_f = jiffies; unsigned long flags; if (!in_vst_sleep()) return; vst_waking(); write_seqlock_irqsave(&xtime_lock, flags); if (irq_flag ) vst_successful_exit++; else vst_external_intr_exit++; stop_timer(); /* * OK, now we need to get jiffies up to the right value. Here * we lean on the HRT patch to give us some notion of where we * are. */ jiffies_delta = get_arch_cycles(jiffies_f) / arch_cycles_per_jiffy; if (jiffies_delta) { /* * One or more jiffie has elapsed. Do all but the last one * here and then call do_timer() to get the last and update * the wall clock. */ jiffies_delta--; vst_bump_jiffies_by(jiffies_delta); vst_skipped_interrupts += jiffies_delta; run_local_timers(); } else { conditional_run_timers(); } write_sequnlock_irqrestore(&xtime_lock, flags); return; }
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, struct clocksource *clock, u32 mult) { unsigned long flags; write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); /* copy vsyscall data */ vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; vsyscall_gtod_data.clock.mask = clock->mask; vsyscall_gtod_data.clock.mult = mult; vsyscall_gtod_data.clock.shift = clock->shift; vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.wall_to_monotonic = *wtm; vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); }
void __init time_init(void) { /* This function is only called on the boot processor */ unsigned long flags; struct rtc_time tm; ppc_md.calibrate_decr(); #ifdef CONFIG_PPC_ISERIES if (!piranha_simulator) #endif ppc_md.get_boot_time(&tm); write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); tb_last_stamp = get_tb(); do_gtod.tb_orig_stamp = tb_last_stamp; do_gtod.varp = &do_gtod.vars[0]; do_gtod.var_idx = 0; do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; do_gtod.varp->tb_to_xs = tb_to_xs; do_gtod.tb_to_us = tb_to_us; xtime_sync_interval = tb_ticks_per_sec - (tb_ticks_per_sec/8); next_xtime_sync_tb = tb_last_stamp + xtime_sync_interval; time_freq = 0; xtime.tv_nsec = 0; last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); }
void hfi1_put_txreq(struct verbs_txreq *tx) { struct hfi1_ibdev *dev; struct rvt_qp *qp; unsigned long flags; unsigned int seq; struct hfi1_qp_priv *priv; qp = tx->qp; dev = to_idev(qp->ibqp.device); if (tx->mr) rvt_put_mr(tx->mr); sdma_txclean(dd_from_dev(dev), &tx->txreq); /* Free verbs_txreq and return to slab cache */ kmem_cache_free(dev->verbs_txreq_cache, tx); do { seq = read_seqbegin(&dev->iowait_lock); if (!list_empty(&dev->txwait)) { struct iowait *wait; write_seqlock_irqsave(&dev->iowait_lock, flags); wait = list_first_entry(&dev->txwait, struct iowait, list); qp = iowait_to_qp(wait); priv = qp->priv; list_del_init(&priv->s_iowait.list); /* refcount held until actual wake up */ write_sequnlock_irqrestore(&dev->iowait_lock, flags); hfi1_qp_wakeup(qp, RVT_S_WAIT_TX); break; } } while (read_seqretry(&dev->iowait_lock, seq)); }
/* * Timer interrupt for 32KHz timer. When dynamic tick is enabled, this * function is also called from other interrupts to remove latency * issues with dynamic tick. In the dynamic tick case, we need to lock * with irqsave. */ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { unsigned long flags; unsigned long now; write_seqlock_irqsave(&xtime_lock, flags); now = omap_32k_sync_timer_read(); while (now - omap_32k_last_tick >= OMAP_32K_TICKS_PER_HZ) { #ifdef OMAP_32K_TICK_MODULO /* Modulo addition may put omap_32k_last_tick ahead of now * and cause unwanted repetition of the while loop. */ if (unlikely(now - omap_32k_last_tick == ~0)) break; modulo_count += OMAP_32K_TICK_MODULO; if (modulo_count > HZ) { ++omap_32k_last_tick; modulo_count -= HZ; } #endif omap_32k_last_tick += OMAP_32K_TICKS_PER_HZ; timer_tick(regs); } /* Restart timer so we don't drift off due to modulo or dynamic tick. * By default we program the next timer to be continuous to avoid * latencies during high system load. During dynamic tick operation the * continuous timer can be overridden from pm_idle to be longer. */ omap_32k_timer_start(omap_32k_last_tick + OMAP_32K_TICKS_PER_HZ - now); write_sequnlock_irqrestore(&xtime_lock, flags); return IRQ_HANDLED; }
int do_settimeofday(struct timespec *tv) { time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; unsigned long delta_xsec; long int tb_delta; unsigned long new_xsec; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irqsave(&xtime_lock, flags); /* Updating the RTC is not the job of this code. If the time is * stepped under NTP, the RTC will be update after STA_UNSYNC * is cleared. Tool like clock/hwclock either copy the RTC * to the system time, in which case there is no point in writing * to the RTC again, or write to the RTC but then they don't call * settimeofday to perform this operation. */ #ifdef CONFIG_PPC_ISERIES if ( first_settimeofday ) { iSeries_tb_recal(); first_settimeofday = 0; } #endif tb_delta = tb_ticks_since(tb_last_stamp); tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; new_nsec -= tb_delta / tb_ticks_per_usec / 1000; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); set_normalized_timespec(&xtime, new_sec, new_nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); /* In case of a large backwards jump in time with NTP, we want the * clock to be updated as soon as the PLL is again in lock. */ last_rtc_update = new_sec - 658; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; delta_xsec = mulhdu( (tb_last_stamp-do_gtod.tb_orig_stamp), do_gtod.varp->tb_to_xs ); new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC; new_xsec += new_sec * XSEC_PER_SEC; if ( new_xsec > delta_xsec ) { do_gtod.varp->stamp_xsec = new_xsec - delta_xsec; } else { /* This is only for the case where the user is setting the time * way back to a time such that the boot time would have been * before 1970 ... eg. we booted ten days ago, and we are setting * the time to Jan 5, 1970 */ do_gtod.varp->stamp_xsec = new_xsec; do_gtod.tb_orig_stamp = tb_last_stamp; } write_sequnlock_irqrestore(&xtime_lock, flags); return 0; }