Example #1
0
void clk_put(struct clk *clk)
{
	clock_lock();
	if (clk && !IS_ERR(clk))
		module_put(clk->owner);
	clock_unlock();
}
Example #2
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_set_calendar_microtime:
 *
 *	Sets the current calendar value by
 *	recalculating the epoch and offset
 *	from the system clock.
 *
 *	Also adjusts the boottime to keep the
 *	value consistent, writes the new
 *	calendar value to the platform clock,
 *	and sends calendar change notifications.
 */
void
clock_set_calendar_microtime(
	clock_sec_t			secs,
	clock_usec_t		microsecs)
{
	clock_sec_t			sys;
	clock_usec_t		microsys;
	clock_sec_t			newsecs;
	spl_t				s;

	newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;

	s = splclock();
	clock_lock();

	commpage_disable_timestamp();

	/*
	 *	Calculate the new calendar epoch based on
	 *	the new value and the system clock.
	 */
	clock_get_system_microtime(&sys, &microsys);
	TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);

	/*
	 *	Adjust the boottime based on the delta.
	 */
	clock_boottime += secs - clock_calend.epoch;

	/*
	 *	Set the new calendar epoch.
	 */
	clock_calend.epoch = secs;

	nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);

	/*
	 *	Cancel any adjustment in progress.
	 */
	calend_adjtotal = clock_calend.adjdelta = 0;

	clock_unlock();

	/*
	 *	Set the new value for the platform clock.
	 */
	PESetGMTTimeOfDay(newsecs);

	splx(s);

	/*
	 *	Send host notifications.
	 */
	host_notify_calendar_change();
	
#if CONFIG_DTRACE
	clock_track_calend_nowait();
#endif
}
Example #3
0
unsigned long clk_get_rate(struct clk *clk)
{
	unsigned long ret;
	clock_lock();
	ret = clk->rate;
	clock_unlock();
	return ret;
}
Example #4
0
int clk_enable(struct clk *clk)
{
	int ret;

	clock_lock();
	ret = local_clk_enable(clk);
	clock_unlock();
	return ret;
}
Example #5
0
long clk_round_rate(struct clk *clk, unsigned long rate)
{
	long ret;
	clock_lock();
	if (clk->round_rate)
		ret = clk->round_rate(clk, rate);
	else
		ret = clk->rate;
	clock_unlock();
	return ret;
}
Example #6
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_initialize_calendar:
 *
 *	Set the calendar and related clocks
 *	from the platform clock at boot or
 *	wake event.
 *
 *	Also sends host notifications.
 */
void
clock_initialize_calendar(void)
{
	clock_sec_t			sys, secs = PEGetGMTTimeOfDay();
	clock_usec_t 		microsys, microsecs = 0;
	spl_t				s;

	s = splclock();
	clock_lock();

	commpage_disable_timestamp();

	if ((long)secs >= (long)clock_boottime) {
		/*
		 *	Initialize the boot time based on the platform clock.
		 */
		if (clock_boottime == 0)
			clock_boottime = secs;

		/*
		 *	Calculate the new calendar epoch based on
		 *	the platform clock and the system clock.
		 */
		clock_get_system_microtime(&sys, &microsys);
		TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);

		/*
		 *	Set the new calendar epoch.
		 */
		clock_calend.epoch = secs;

		nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);

		/*
		 *	 Cancel any adjustment in progress.
		 */
		calend_adjtotal = clock_calend.adjdelta = 0;
	}

	clock_unlock();
	splx(s);

	/*
	 *	Send host notifications.
	 */
	host_notify_calendar_change();
	
#if CONFIG_DTRACE
	clock_track_calend_nowait();
#endif
}
Example #7
0
/*
 * Fetch current time value from reliable hardware.
 *
 * The cputimer interface requires a 32 bit return value.  If the ACPI timer
 * is only 24 bits then we have to keep track of the upper 8 bits on our
 * own.
 *
 * XXX we could probably get away with using a per-cpu field for this and
 * just use interrupt disablement instead of clock_lock.
 */
static sysclock_t
acpi_timer_get_timecount24(void)
{
    sysclock_t counter;

    clock_lock();
    counter = acpi_timer_read();
    if (counter < acpi_last_counter)
	acpi_cputimer.base += 0x01000000;
    acpi_last_counter = counter;
    counter += acpi_cputimer.base;
    clock_unlock();
    return (counter);
}
Example #8
0
int clk_set_parent(struct clk *clk, struct clk *parent)
{
	int ret = -ENODEV;
	if (!clk->set_parent)
		goto out;

	clock_lock();
	ret = clk->set_parent(clk, parent);
	if (!ret)
		clk->parent = parent;
	clock_unlock();

out:
	return ret;
}
Example #9
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_get_boottime_nanotime:
 *
 *	Return the boottime, used by sysctl.
 */
void
clock_get_boottime_nanotime(
	clock_sec_t			*secs,
	clock_nsec_t		*nanosecs)
{
	spl_t	s;

	s = splclock();
	clock_lock();

	*secs = (clock_sec_t)clock_boottime;
	*nanosecs = 0;

	clock_unlock();
	splx(s);
}
Example #10
0
struct clk *clk_get(struct device *dev, const char *id)
{
	struct clk *clk = ERR_PTR(-ENOENT);
	struct clk **clkp;

	clock_lock();
	for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
	     clkp++) {
		if (strcmp(id, (*clkp)->name) == 0
		    && try_module_get((*clkp)->owner)) {
			clk = (*clkp);
			break;
		}
	}
	clock_unlock();

	return clk;
}
Example #11
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_get_calendar_microtime:
 *
 *	Returns the current calendar value,
 *	microseconds as the fraction.
 */
void
clock_get_calendar_microtime(
	clock_sec_t			*secs,
	clock_usec_t		*microsecs)
{
	uint64_t		now;
	spl_t			s;

	s = splclock();
	clock_lock();

	now = mach_absolute_time();

	if (clock_calend.adjdelta < 0) {
		uint32_t	t32;

		/* 
		 * Since offset is decremented during a negative adjustment,
		 * ensure that time increases monotonically without going
		 * temporarily backwards.
		 * If the delta has not yet passed, now is set to the start
		 * of the current adjustment period; otherwise, we're between
		 * the expiry of the delta and the next call to calend_adjust(),
		 * and we offset accordingly.
		 */
		if (now > clock_calend.adjstart) {
			t32 = (uint32_t)(now - clock_calend.adjstart);

			if (t32 > clock_calend.adjoffset)
				now -= clock_calend.adjoffset;
			else
				now = clock_calend.adjstart;
		}
	}

	now += clock_calend.offset;

	absolutetime_to_microtime(now, secs, microsecs);

	*secs += (clock_sec_t)clock_calend.epoch;

	clock_unlock();
	splx(s);
}
Example #12
0
/*
 * Return the current cpu timer count as a 32 bit integer.
 */
static
sysclock_t
i8254_cputimer_count(void)
{
	static __uint16_t cputimer_last;
	__uint16_t count;
	sysclock_t ret;

	clock_lock();
	outb(TIMER_MODE, i8254_walltimer_sel | TIMER_LATCH);
	count = (__uint8_t)inb(i8254_walltimer_cntr);		/* get countdown */
	count |= ((__uint8_t)inb(i8254_walltimer_cntr) << 8);
	count = -count;					/* -> countup */
	if (count < cputimer_last)			/* rollover */
		i8254_cputimer.base += 0x00010000;
	ret = i8254_cputimer.base | count;
	cputimer_last = count;
	clock_unlock();
	return(ret);
}
Example #13
0
File: clock.c Project: Prajna/xnu
static void
calend_adjust_call(void)
{
	uint32_t	interval;
	spl_t		s;

	s = splclock();
	clock_lock();

	if (--calend_adjactive == 0) {
		interval = calend_adjust();
		if (interval != 0) {
			clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);

			if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
				calend_adjactive++;
		}
	}

	clock_unlock();
	splx(s);
}
Example #14
0
int clk_set_rate(struct clk *clk, unsigned long rate)
{
	int ret = -EINVAL;

	if (clk->flags & FIXED_RATE)
		goto out;

	clock_lock();
	if ((clk->flags & PARENT_SET_RATE) && clk->parent) {

		clk->user_rate = clk->round_rate(clk, rate);
		/* parent clock needs to be refreshed
		   for the setting to take effect */
	} else {
		ret = local_set_rate(clk, rate);
	}
	ret = 0;
	clock_unlock();

out:
	return ret;
}
Example #15
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_gettimeofday:
 *
 *	Kernel interface for commpage implementation of
 *	gettimeofday() syscall.
 *
 *	Returns the current calendar value, and updates the
 *	commpage info as appropriate.  Because most calls to
 *	gettimeofday() are handled in user mode by the commpage,
 *	this routine should be used infrequently.
 */
void
clock_gettimeofday(
	clock_sec_t		*secs,
	clock_usec_t	*microsecs)
{
	uint64_t		now;
	spl_t			s;

	s = splclock();
	clock_lock();

	now = mach_absolute_time();

	if (clock_calend.adjdelta >= 0) {
		clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
	}
	else {
		uint32_t	t32;

		if (now > clock_calend.adjstart) {
			t32 = (uint32_t)(now - clock_calend.adjstart);

			if (t32 > clock_calend.adjoffset)
				now -= clock_calend.adjoffset;
			else
				now = clock_calend.adjstart;
		}

		now += clock_calend.offset;

		absolutetime_to_microtime(now, secs, microsecs);

		*secs += (clock_sec_t)clock_calend.epoch;
	}

	clock_unlock();
	splx(s);
}
Example #16
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_get_calendar_nanotime:
 *
 *	Returns the current calendar value,
 *	nanoseconds as the fraction.
 *
 *	Since we do not have an interface to
 *	set the calendar with resolution greater
 *	than a microsecond, we honor that here.
 */
void
clock_get_calendar_nanotime(
	clock_sec_t			*secs,
	clock_nsec_t		*nanosecs)
{
	uint64_t		now;
	spl_t			s;

	s = splclock();
	clock_lock();

	now = mach_absolute_time();

	if (clock_calend.adjdelta < 0) {
		uint32_t	t32;

		if (now > clock_calend.adjstart) {
			t32 = (uint32_t)(now - clock_calend.adjstart);

			if (t32 > clock_calend.adjoffset)
				now -= clock_calend.adjoffset;
			else
				now = clock_calend.adjstart;
		}
	}

	now += clock_calend.offset;

	absolutetime_to_microtime(now, secs, nanosecs);

	*nanosecs *= NSEC_PER_USEC;

	*secs += (clock_sec_t)clock_calend.epoch;

	clock_unlock();
	splx(s);
}
Example #17
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_adjtime:
 *
 *	Interface to adjtime() syscall.
 *
 *	Calculates adjustment variables and
 *	initiates adjustment.
 */
void
clock_adjtime(
	long		*secs,
	int			*microsecs)
{
	uint32_t	interval;
	spl_t		s;

	s = splclock();
	clock_lock();

	interval = calend_set_adjustment(secs, microsecs);
	if (interval != 0) {
		calend_adjdeadline = mach_absolute_time() + interval;
		if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
			calend_adjactive++;
	}
	else
	if (timer_call_cancel(&calend_adjcall))
		calend_adjactive--;

	clock_unlock();
	splx(s);
}
Example #18
0
/*
 * Reload for the next timeout.  It is possible for the reload value
 * to be 0 or negative, indicating that an immediate timer interrupt
 * is desired.  For now make the minimum 2 ticks.
 *
 * We may have to convert from the system timebase to the 8254 timebase.
 */
static void
i8254_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
{
    __uint16_t count;

    if (i8254_cputimer_div)
	reload /= i8254_cputimer_div;
    else
	reload = (int64_t)reload * cti->freq / sys_cputimer->freq;

    if ((int)reload < 2)
	reload = 2;

    clock_lock();
    if (timer0_running) {
	outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);	/* count-down timer */
	count = (__uint8_t)inb(TIMER_CNTR0);		/* lsb */
	count |= ((__uint8_t)inb(TIMER_CNTR0) << 8);	/* msb */
	if (reload < count) {
	    outb(TIMER_MODE, TIMER_SEL0 | TIMER_SWSTROBE | TIMER_16BIT);
	    outb(TIMER_CNTR0, (__uint8_t)reload); 	/* lsb */
	    outb(TIMER_CNTR0, (__uint8_t)(reload >> 8)); /* msb */
	}
    } else {
Example #19
0
/*
 * Fetch current time value from hardware that may not correctly
 * latch the counter.  We need to read until we have three monotonic
 * samples and then use the middle one, otherwise we are not protected
 * against the fact that the bits can be wrong in two directions.  If
 * we only cared about monosity, two reads would be enough.
 */
static sysclock_t
acpi_timer_get_timecount_safe(void)
{
    u_int u1, u2, u3;

    if (acpi_counter_mask != 0xffffffff)
	clock_lock();

    u2 = acpi_timer_read();
    u3 = acpi_timer_read();
    do {
	u1 = u2;
	u2 = u3;
	u3 = acpi_timer_read();
    } while (u1 > u2 || u2 > u3);

    if (acpi_counter_mask != 0xffffffff) {
	if (u2 < acpi_last_counter)
	    acpi_cputimer.base += 0x01000000;
	acpi_last_counter = u2;
	clock_unlock();
    }
    return (u2 + acpi_cputimer.base);
}
Example #20
0
/*
 * Fetch current time value from hardware that may not correctly
 * latch the counter.  We need to read until we have three monotonic
 * samples and then use the middle one, otherwise we are not protected
 * against the fact that the bits can be wrong in two directions.  If
 * we only cared about monosity, two reads would be enough.
 */
static sysclock_t
acpi_timer_get_timecount_safe(void)
{
    u_int u1, u2, u3;

    if (acpi_timer_resolution != 32)
	clock_lock();

    AcpiGetTimer(&u2);
    AcpiGetTimer(&u3);
    do {
	u1 = u2;
	u2 = u3;
	AcpiGetTimer(&u3);
    } while (u1 > u2 || u2 > u3);

    if (acpi_timer_resolution != 32) {
	if (u2 < acpi_last_counter)
	    acpi_cputimer.base += 0x01000000;
	acpi_last_counter = u2;
	clock_unlock();
    }
    return (u2 + acpi_cputimer.base);
}
Example #21
0
void clk_disable(struct clk *clk)
{
	clock_lock();
	local_clk_disable(clk);
	clock_unlock();
}