Ejemplo n.º 1
0
int do_settimeofday (struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	{
		wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
		wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

		set_normalized_timespec(&xtime, sec, nsec);
		set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

		time_adjust = 0;		/* stop active adjtime() */
		time_status |= STA_UNSYNC;
		time_maxerror = NTP_PHASE_LIMIT;
		time_esterror = NTP_PHASE_LIMIT;
		time_interpolator_reset();
	}
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
	return 0;
}
Ejemplo n.º 2
0
int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	/*
	 * This is revolting. We need to set "xtime" correctly. However, the
	 * value in this location is the value at the most recent update of
	 * wall time.  Discover what correction gettimeofday() would have
	 * made, and then undo it!
	 */
	nsec -= get_timer_offset() * NSEC_PER_USEC;

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

	set_normalized_timespec(&xtime, sec, nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	ntp_clear();
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();

	return 0;
}
Ejemplo n.º 3
0
int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	/*
	 * This is revolting. We need to set "xtime" correctly. However, the
	 * value in this location is the value at the most recent update of
	 * wall time.  Discover what correction gettimeofday() would have
	 * made, and then undo it!
	 */
	nsec -= cur_timer->get_offset() * NSEC_PER_USEC;
	nsec -= (jiffies - wall_jiffies) * TICK_NSEC;

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

	set_normalized_timespec(&xtime, sec, nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	time_adjust = 0;		/* stop active adjtime() */
	time_status |= STA_UNSYNC;
	time_maxerror = NTP_PHASE_LIMIT;
	time_esterror = NTP_PHASE_LIMIT;
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
	return 0;
}
Ejemplo n.º 4
0
int do_settimeofday(struct timespec *tv)
{
	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq (&xtime_lock);

	/* This is revolting. We need to set the xtime.tv_nsec
	 * correctly. However, the value in this location is
	 * is value at the last tick.
	 * Discover what correction gettimeofday
	 * would have done, and then undo it!
	 */
#if 0
	tv->tv_nsec -= mach_gettimeoffset() * 1000;
#endif

	while (tv->tv_nsec < 0) {
		tv->tv_nsec += NSEC_PER_SEC;
		tv->tv_sec--;
	}

	xtime.tv_sec = tv->tv_sec;
	xtime.tv_nsec = tv->tv_nsec;

	time_adjust = 0;		/* stop active adjtime () */
	time_status |= STA_UNSYNC;
	time_maxerror = NTP_PHASE_LIMIT;
	time_esterror = NTP_PHASE_LIMIT;

	write_sequnlock_irq (&xtime_lock);
	clock_was_set();
	return 0;
}
Ejemplo n.º 5
0
Archivo: time.c Proyecto: ivucica/linux
int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	/* This is revolting. We need to set the xtime.tv_nsec
	 * correctly. However, the value in this location is
	 * is value at the last tick.
	 * Discover what correction gettimeofday
	 * would have done, and then undo it!
	 */
	nsec -= 1000 * mach_gettimeoffset();

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

	set_normalized_timespec(&xtime, sec, nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	ntp_clear();
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
	return 0;
}
Ejemplo n.º 6
0
int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	/*
	 * This is revolting. We need to set the xtime.tv_usec
	 * correctly. However, the value in this location is
	 * is value at the last tick.
	 * Discover what correction gettimeofday
	 * would have done, and then undo it!
	 */
	if (mach_gettimeoffset)
		nsec -= (mach_gettimeoffset() * 1000);

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

	set_normalized_timespec(&xtime, sec, nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	time_adjust = 0;		/* stop active adjtime() */
	time_status |= STA_UNSYNC;
	time_maxerror = NTP_PHASE_LIMIT;
	time_esterror = NTP_PHASE_LIMIT;
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
	return 0;
}
Ejemplo n.º 7
0
/*
 * Adjust the time obtained from the CMOS to be UTC time instead of
 * local time.
 * 
 * This is ugly, but preferable to the alternatives.  Otherwise we
 * would either need to write a program to do it in /etc/rc (and risk
 * confusion if the program gets run more than once; it would also be 
 * hard to make the program warp the clock precisely n hours)  or
 * compile in the timezone information into the kernel.  Bad, bad....
 *
 *              				- TYT, 1992-01-01
 *
 * The best thing to do is to keep the CMOS clock in universal time (UTC)
 * as real UNIX machines always do it. This avoids all headaches about
 * daylight saving times and warping kernel clocks.
 */
static inline void warp_clock(void)
{
	write_seqlock_irq(&xtime_lock);
	wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
	xtime.tv_sec += sys_tz.tz_minuteswest * 60;
	time_interpolator_reset();
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
}
Ejemplo n.º 8
0
int do_settimeofday(struct timespec *tv)
{
	int ret;

	write_seqlock_irq(&xtime_lock);
	ret = bus_do_settimeofday(tv);
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
	return ret;
}
Ejemplo n.º 9
0
static void run_hrtimer_softirq(struct softirq_action *h)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);

	if (cpu_base->clock_was_set) {
		cpu_base->clock_was_set = 0;
		clock_was_set();
	}

	hrtimer_peek_ahead_timers();
}
Ejemplo n.º 10
0
static inline void set_time(unsigned long long nsecs)
{
	unsigned long long now;
	unsigned long flags;

	spin_lock_irqsave(&timer_spinlock, flags);
	now = os_nsecs();
	local_offset = nsecs - now;
	spin_unlock_irqrestore(&timer_spinlock, flags);

	clock_was_set();
}
Ejemplo n.º 11
0
int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, new_sec = tv->tv_sec;
	long wtm_nsec, new_nsec = tv->tv_nsec;
	unsigned long flags;
	int tb_delta;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irqsave(&xtime_lock, flags);
	/* Updating the RTC is not the job of this code. If the time is
	 * stepped under NTP, the RTC will be update after STA_UNSYNC
	 * is cleared. Tool like clock/hwclock either copy the RTC
	 * to the system time, in which case there is no point in writing
	 * to the RTC again, or write to the RTC but then they don't call
	 * settimeofday to perform this operation. Note also that
	 * we don't touch the decrementer since:
	 * a) it would lose timer interrupt synchronization on SMP
	 * (if it is working one day)
	 * b) it could make one jiffy spuriously shorter or longer
	 * which would introduce another source of uncertainty potentially
	 * harmful to relatively short timers.
	 */

	/* This works perfectly on SMP only if the tb are in sync but
	 * guarantees an error < 1 jiffy even if they are off by eons,
	 * still reasonable when gettimeofday resolution is 1 jiffy.
	 */
	tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id()));
	tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;

	new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta);

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);

	set_normalized_timespec(&xtime, new_sec, new_nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	/* In case of a large backwards jump in time with NTP, we want the
	 * clock to be updated as soon as the PLL is again in lock.
	 */
	last_rtc_update = new_sec - 658;

	time_adjust = 0;                /* stop active adjtime() */
	time_status |= STA_UNSYNC;
	time_maxerror = NTP_PHASE_LIMIT;
	time_esterror = NTP_PHASE_LIMIT;
	write_sequnlock_irqrestore(&xtime_lock, flags);
	clock_was_set();
	return 0;
}
Ejemplo n.º 12
0
static void do_suspend(void)
{
	int err;
	int cancelled = 1;

	shutting_down = SHUTDOWN_SUSPEND;

#ifdef CONFIG_PREEMPT
	/* If the kernel is preemptible, we need to freeze all the processes
	   to prevent them from being in the middle of a pagetable update
	   during suspend. */
	err = freeze_processes();
	if (err) {
		printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
		return;
	}
#endif

	err = device_suspend(PMSG_SUSPEND);
	if (err) {
		printk(KERN_ERR "xen suspend: device_suspend %d\n", err);
		goto out;
	}

	printk("suspending xenbus...\n");
	/* XXX use normal device tree? */
	xenbus_suspend();

	err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
	if (err) {
		printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
		goto out;
	}

	if (!cancelled) {
		xen_arch_resume();
		xenbus_resume();
	} else
		xenbus_suspend_cancel();

	device_resume(PMSG_RESUME);

	/* Make sure timer events get retriggered on all CPUs */
	clock_was_set();
out:
#ifdef CONFIG_PREEMPT
	thaw_processes();
#endif
	shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 13
0
int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);
	/*
	 * This is revolting. We need to set the xtime.tv_usec
	 * correctly. However, the value in this location is
	 * is value at the last tick.
	 * Discover what correction gettimeofday
	 * would have done, and then undo it!
	 */
	nsec -= (gettimeoffset() * 1000);

	wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

	set_normalized_timespec(&xtime, sec, nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	time_adjust = 0;	/* stop active adjtime() */
	time_status |= STA_UNSYNC;
	time_maxerror = NTP_PHASE_LIMIT;
	time_esterror = NTP_PHASE_LIMIT;

	write_sequnlock_irq(&xtime_lock);
	clock_was_set();

	/*
	 *  rtc_set() busy-waits up to a second (the next tick of the RTC)
	 *  for completion of the write.
	 *  We release xtime_lock before updating the RTC so as not to
	 *  lock out the timer_interrupt() routine which also acquires
	 *  xtime_lock.  Locking out timer_interrupt() loses ticks!
	 */
#ifdef CONFIG_BFIN_HAVE_RTC
	rtc_set(sec);
#endif

	return 0;
}
Ejemplo n.º 14
0
int do_settimeofday(struct timespec *tv)
{
	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_lock_irq(&xtime_lock);
	/* This is revolting. We need to set the xtime.tv_usec
	 * correctly. However, the value in this location is
	 * is value at the last tick.
	 * Discover what correction gettimeofday
	 * would have done, and then undo it!
	 */
	while (tv->tv_nsec < 0) {
		tv->tv_nsec += NSEC_PER_SEC;
		tv->tv_sec--;
	}

	xtime.tv_sec = tv->tv_sec;
	xtime.tv_nsec = tv->tv_nsec;
	ntp_clear();
	write_sequnlock_irq(&xtime_lock);
	clock_was_set();
	return 0;
}
Ejemplo n.º 15
0
static void clock_was_set_work(struct work_struct *work)
{
	clock_was_set();
}
Ejemplo n.º 16
0
static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

#ifdef CONFIG_PREEMPT
	/* If the kernel is preemptible, we need to freeze all the processes
	   to prevent them from being in the middle of a pagetable update
	   during suspend. */
	err = freeze_processes();
	if (err) {
		pr_err("%s: freeze failed %d\n", __func__, err);
		goto out;
	}
#endif

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_end(PMSG_FREEZE);
	if (err) {
		pr_err("dpm_suspend_end failed: %d\n", err);
		si.cancelled = 0;
		goto out_resume;
	}

	si.cancelled = 1;

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	/* Resume console as early as possible. */
	if (!si.cancelled)
		xen_console_resume();

	raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);

	dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		pr_err("failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

out_resume:
	if (!si.cancelled) {
		xen_arch_resume();
		xs_resume();
	} else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	/* Make sure timer events get retriggered on all CPUs */
	clock_was_set();

out_thaw:
#ifdef CONFIG_PREEMPT
	thaw_processes();
out:
#endif
	shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 17
0
static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

	err = freeze_processes();
	if (err) {
		printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
		goto out;
	}

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_end(PMSG_FREEZE);
	if (err) {
		printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
		si.cancelled = 0;
		goto out_resume;
	}

	si.cancelled = 1;

	if (xen_hvm_domain()) {
		si.arg = 0UL;
		si.pre = NULL;
		si.post = &xen_hvm_post_suspend;
	} else {
		si.arg = virt_to_mfn(xen_start_info);
		si.pre = &xen_pre_suspend;
		si.post = &xen_post_suspend;
	}

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

out_resume:
	if (!si.cancelled) {
		xen_arch_resume();
		xs_resume();
	} else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	/* Make sure timer events get retriggered on all CPUs */
	clock_was_set();

out_thaw:
	thaw_processes();
out:
	shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 18
0
static void do_suspend(void)
{
    int err;
    int cancelled = 1;

    shutting_down = SHUTDOWN_SUSPEND;

    err = stop_machine_create();
    if (err) {
        printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
        goto out;
    }

#ifdef CONFIG_PREEMPT
    /* If the kernel is preemptible, we need to freeze all the processes
       to prevent them from being in the middle of a pagetable update
       during suspend. */
    err = freeze_processes();
    if (err) {
        printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
        goto out_destroy_sm;
    }
#endif

    err = dpm_suspend_start(PMSG_SUSPEND);
    if (err) {
        printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
        goto out_thaw;
    }

    printk(KERN_DEBUG "suspending xenstore...\n");
    xs_suspend();

    err = dpm_suspend_noirq(PMSG_SUSPEND);
    if (err) {
        printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
        goto out_resume;
    }

    err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));

    dpm_resume_noirq(PMSG_RESUME);

    if (err) {
        printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
        cancelled = 1;
    }

out_resume:
    if (!cancelled) {
        xen_arch_resume();
        xs_resume();
    } else
        xs_suspend_cancel();

    dpm_resume_end(PMSG_RESUME);

    /* Make sure timer events get retriggered on all CPUs */
    clock_was_set();

out_thaw:
#ifdef CONFIG_PREEMPT
    thaw_processes();

out_destroy_sm:
#endif
    stop_machine_destroy();

out:
    shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 19
0
static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

#ifdef CONFIG_PREEMPT
	/* If the kernel is preemptible, we need to freeze all the processes
	   to prevent them from being in the middle of a pagetable update
	   during suspend. */
	err = freeze_processes();
	if (err) {
		printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
		goto out;
	}
#endif

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_noirq(PMSG_FREEZE);
	if (err) {
		printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
		goto out_resume;
	}

	si.cancelled = 1;

	if (xen_hvm_domain()) {
		si.arg = 0UL;
		si.pre = NULL;
		si.post = &xen_hvm_post_suspend;
	} else {
		si.arg = virt_to_mfn(xen_start_info);
		si.pre = &xen_pre_suspend;
		si.post = &xen_post_suspend;
	}

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

out_resume:
	if (!si.cancelled) {
		xen_arch_resume();
		xs_resume();
	} else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	/* Make sure timer events get retriggered on all CPUs */
	clock_was_set();

out_thaw:
#ifdef CONFIG_PREEMPT
	thaw_processes();
out:
#endif
	shutting_down = SHUTDOWN_INVALID;
}