/* * This version of gettimeofday has microsecond resolution. */ void do_gettimeofday(struct timeval *tv) { unsigned long flags; unsigned long seq; unsigned delta, lost_ticks, usec, sec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; usec = (xtime.tv_nsec / 1000); delta = tb_ticks_since(tb_last_stamp); #ifdef CONFIG_SMP /* As long as timebases are not in sync, gettimeofday can only * have jiffy resolution on SMP. */ if (!smp_tb_synchronized) delta = 0; #endif /* CONFIG_SMP */ lost_ticks = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); usec += mulhwu(tb_to_us, tb_ticks_per_jiffy * lost_ticks + delta); while (usec >= 1000000) { sec++; usec -= 1000000; } tv->tv_sec = sec; tv->tv_usec = usec; }
int do_settimeofday(struct timespec *tv) { time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; int tb_delta; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irqsave(&xtime_lock, flags); /* Updating the RTC is not the job of this code. If the time is * stepped under NTP, the RTC will be update after STA_UNSYNC * is cleared. Tool like clock/hwclock either copy the RTC * to the system time, in which case there is no point in writing * to the RTC again, or write to the RTC but then they don't call * settimeofday to perform this operation. Note also that * we don't touch the decrementer since: * a) it would lose timer interrupt synchronization on SMP * (if it is working one day) * b) it could make one jiffy spuriously shorter or longer * which would introduce another source of uncertainty potentially * harmful to relatively short timers. */ /* This works perfectly on SMP only if the tb are in sync but * guarantees an error < 1 jiffy even if they are off by eons, * still reasonable when gettimeofday resolution is 1 jiffy. */ tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id())); tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); set_normalized_timespec(&xtime, new_sec, new_nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); /* In case of a large backwards jump in time with NTP, we want the * clock to be updated as soon as the PLL is again in lock. */ last_rtc_update = new_sec - 658; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; write_sequnlock_irqrestore(&xtime_lock, flags); clock_was_set(); return 0; }
static int fpga_wait_for_ciar(void __iomem *fpga) { unsigned long start, loops; /* This is the normal case */ if (fpga_read(fpga, BAR0_STAT_LINES) & 0x1000) return 0; #ifdef CONFIG_X86_TSC //start = get_cycles(); //loops = (cpu_khz ) ; /* ~ 512us */ //printk("start: %u loops: %u\n", start, loops); /* Max about 25us */ loops=0; while ((fpga_read(fpga, BAR0_STAT_LINES) & 0x1000) == 0) { if ( loops > 6 ) { //if ((get_cycles() - start) > loops) { PK_PRINT("ERROR: wait_for_ciar timeout\n"); return 1; } cpu_relax(); udelay(100); loops++; } #else start = get_tbl(); loops = tb_ticks_per_usec << 9; /* 512us */ /* Max about 25us */ while ((fpga_read(fpga, BAR0_STAT_LINES) & 0x1000) == 0) { if (tb_ticks_since(start) > loops) { PK_PRINT("ERROR: wait_for_ciar timeout\n"); return 1; } cpu_relax(); } #endif return 0; }
asmlinkage time_t sys64_time(time_t* tloc) { time_t secs; time_t usecs; long tb_delta = tb_ticks_since(tb_last_stamp); tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; secs = xtime.tv_sec; usecs = xtime.tv_usec + tb_delta / tb_ticks_per_usec; while (usecs >= USEC_PER_SEC) { ++secs; usecs -= USEC_PER_SEC; } if (tloc) { if (put_user(secs,tloc)) secs = -EFAULT; } return secs; }
int do_settimeofday(struct timespec *tv) { time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; unsigned long delta_xsec; long int tb_delta; unsigned long new_xsec; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irqsave(&xtime_lock, flags); /* Updating the RTC is not the job of this code. If the time is * stepped under NTP, the RTC will be update after STA_UNSYNC * is cleared. Tool like clock/hwclock either copy the RTC * to the system time, in which case there is no point in writing * to the RTC again, or write to the RTC but then they don't call * settimeofday to perform this operation. */ #ifdef CONFIG_PPC_ISERIES if ( first_settimeofday ) { iSeries_tb_recal(); first_settimeofday = 0; } #endif tb_delta = tb_ticks_since(tb_last_stamp); tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; new_nsec -= tb_delta / tb_ticks_per_usec / 1000; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); set_normalized_timespec(&xtime, new_sec, new_nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); /* In case of a large backwards jump in time with NTP, we want the * clock to be updated as soon as the PLL is again in lock. */ last_rtc_update = new_sec - 658; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; delta_xsec = mulhdu( (tb_last_stamp-do_gtod.tb_orig_stamp), do_gtod.varp->tb_to_xs ); new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC; new_xsec += new_sec * XSEC_PER_SEC; if ( new_xsec > delta_xsec ) { do_gtod.varp->stamp_xsec = new_xsec - delta_xsec; } else { /* This is only for the case where the user is setting the time * way back to a time such that the boot time would have been * before 1970 ... eg. we booted ten days ago, and we are setting * the time to Jan 5, 1970 */ do_gtod.varp->stamp_xsec = new_xsec; do_gtod.tb_orig_stamp = tb_last_stamp; } write_sequnlock_irqrestore(&xtime_lock, flags); return 0; }