return count;
}

static struct clock_event_device clockevent_gptimer = {
	.name = "gpt_event_1",
	.features = CLOCK_EVT_FEAT_ONESHOT,
	.shift = 32,
	.set_next_event = gptimer_set_next_event,
	.set_mode = gptimer_set_mode
};

static struct clocksource clksrc_gptimer = {
	.name = "gpt_source_2",
	.rating = 200,
	.read = gptimer_clksrc_read,
	.mask = CLOCKSOURCE_MASK(32),	/* Although Kona timers have 64 bit counters,
					   To avail all the four channels of HUB_TIMER
					   the match register is compared with 32 bit value
					   and to make everything in sync, the Linux framework
					   is informed that CS timer is 32 bit.
					 */
	.shift = 16,		/* Fix shift as 16 and calculate mult based on this during init */
	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};

static void __init gptimer_clockevents_init(void)
{
	clockevent_gptimer.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
					 clockevent_gptimer.shift);

	clockevent_gptimer.max_delta_ns =
Exemplo n.º 2
0
	*CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
	return 0;
}

static void cksrc_dc21285_disable(struct clocksource *cs)
{
	*CSR_TIMER2_CNTL = 0;
}

static struct clocksource cksrc_dc21285 = {
	.name		= "dc21285_timer2",
	.rating		= 200,
	.read		= cksrc_dc21285_read,
	.enable		= cksrc_dc21285_enable,
	.disable	= cksrc_dc21285_disable,
	.mask		= CLOCKSOURCE_MASK(24),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
	struct clock_event_device *c)
{
	switch (mode) {
	case CLOCK_EVT_MODE_RESUME:
	case CLOCK_EVT_MODE_PERIODIC:
		*CSR_TIMER1_CLR = 0;
		*CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
		*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
				   TIMER_CNTL_DIV16;
		break;
Exemplo n.º 3
0
static struct msm_clock msm_clocks[] = {
	{
		.clockevent = {
			.name           = "gp_timer",
			.features       = CLOCK_EVT_FEAT_ONESHOT,
			.shift          = 32,
			.rating         = 200,
			.set_next_event = msm_timer_set_next_event,
			.set_mode       = msm_timer_set_mode,
		},
		.clocksource = {
			.name           = "gp_timer",
			.rating         = 200,
			.read           = msm_gpt_read,
			.mask           = CLOCKSOURCE_MASK(32),
			.shift          = 24,
			.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
		},
		.irq = {
			.name    = "gp_timer",
			.flags   = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
			.handler = msm_timer_interrupt,
			.dev_id  = &msm_clocks[0].clockevent,
			.irq     = INT_GP_TIMER_EXP
		},
		.regbase = MSM_GPT_BASE,
		.freq = GPT_HZ
	},
	{
		.clockevent = {
Exemplo n.º 4
0
	WARN_ON(!p->cs_enabled);

	iowrite8(0, p->mapbase1 + TCR);
	iowrite8(0, p->mapbase2 + TCR);
	p->cs_enabled = false;
}

static struct tpu_priv tpu_priv = {
	.cs = {
		.name = "H8S_TPU",
		.rating = 200,
		.read = tpu_clocksource_read,
		.enable = tpu_clocksource_enable,
		.disable = tpu_clocksource_disable,
		.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8),
		.flags = CLOCK_SOURCE_IS_CONTINUOUS,
	},
};

#define CH_L 0
#define CH_H 1

static int __init h8300_tpu_init(struct device_node *node)
{
	void __iomem *base[2];
	struct clk *clk;
	int ret = -ENXIO;

	clk = of_clk_get(node, 0);
	if (IS_ERR(clk)) {
Exemplo n.º 5
0
	else
		return arch_counter_get_cntpct_mem();
}
EXPORT_SYMBOL(arch_counter_get_cntpct);

u64 arch_counter_get_cntvct(void)
{
	return arch_timer_read_counter();
}
EXPORT_SYMBOL(arch_counter_get_cntvct);

static struct clocksource clocksource_counter = {
	.name	= "arch_sys_counter",
	.rating	= 400,
	.read	= arch_counter_read,
	.mask	= CLOCKSOURCE_MASK(56),
	.flags	= CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};

static struct cyclecounter cyclecounter = {
	.read	= arch_counter_read_cc,
	.mask	= CLOCKSOURCE_MASK(56),
};

static struct timecounter timecounter;

struct timecounter *arch_timer_get_timecounter(void)
{
	return &timecounter;
}
Exemplo n.º 6
0
					&nuc900_clockevent_device);
	nuc900_clockevent_device.cpumask = cpumask_of(0);

	clockevents_register_device(&nuc900_clockevent_device);
}

static cycle_t nuc900_get_cycles(struct clocksource *cs)
{
	return (~__raw_readl(REG_TDR1)) & TDR_MASK;
}

static struct clocksource clocksource_nuc900 = {
	.name	= "nuc900-timer1",
	.rating	= 200,
	.read	= nuc900_get_cycles,
	.mask	= CLOCKSOURCE_MASK(TDR_SHIFT),
	.shift	= 10,
	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
};

static void __init nuc900_clocksource_init(void)
{
	unsigned int val;
	unsigned int rate;
	struct clk *clk = clk_get(NULL, "timer1");

	BUG_ON(IS_ERR(clk));

	__raw_writel(0x00, REG_TCSR1);

	clk_enable(clk);
	fout = ((2 * n * fin) / (m * (0x01 << p)));

	pr_info("MIPS Clock Freq=%d kHz\n", fout);

	return fout;
}

static cycle_t c0_hpt_read(struct clocksource *cs)
{
	return read_c0_count();
}

static struct clocksource clocksource_mips = {
	.name		= "powertv-counter",
	.read		= c0_hpt_read,
	.mask		= CLOCKSOURCE_MASK(32),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static void __init powertv_c0_hpt_clocksource_init(void)
{
	unsigned int pll_freq = mips_get_pll_freq();

	pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000,
		(pll_freq % 1000) * 100 / 1000);

	mips_hpt_frequency = pll_freq / 2 * 1000;

	clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;

	clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
Exemplo n.º 8
0
		nsec = lguest_data.time.tv_nsec;
		/* Make sure we've done that. */
		rmb();
		/* Now if the seconds part has changed, try again. */
	} while (unlikely(lguest_data.time.tv_sec != sec));

	/* Our lguest clock is in real nanoseconds. */
	return sec*1000000000ULL + nsec;
}

/* This is the fallback clocksource: lower priority than the TSC clocksource. */
static struct clocksource lguest_clock = {
	.name		= "lguest",
	.rating		= 200,
	.read		= lguest_clock_read,
	.mask		= CLOCKSOURCE_MASK(64),
	.mult		= 1 << 22,
	.shift		= 22,
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

/* We also need a "struct clock_event_device": Linux asks us to set it to go
 * off some time in the future.  Actually, James Morris figured all this out, I
 * just applied the patch. */
static int lguest_clockevent_set_next_event(unsigned long delta,
                                           struct clock_event_device *evt)
{
	/* FIXME: I don't think this can ever happen, but James tells me he had
	 * to put this code in.  Maybe we should remove it now.  Anyone? */
	if (delta < LG_CLOCK_MIN_DELTA) {
		if (printk_ratelimit())
/**
 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
 * @adapter: pointer to the adapter structure
 *
 * This function should be called to set the proper values for the TIMINCA
 * register and tell the cyclecounter structure what the tick rate of SYSTIME
 * is. It does not directly modify SYSTIME registers or the timecounter
 * structure. It should be called whenever a new TIMINCA value is necessary,
 * such as during initialization or when the link speed changes.
 */
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	u32 incval = 0;
	u32 shift = 0;
	unsigned long flags;

	/**
	 * Scale the NIC cycle counter by a large factor so that
	 * relatively small corrections to the frequency can be added
	 * or subtracted. The drawbacks of a large factor include
	 * (a) the clock register overflows more quickly, (b) the cycle
	 * counter structure must be able to convert the systime value
	 * to nanoseconds using only a multiplier and a right-shift,
	 * and (c) the value must fit within the timinca register space
	 * => math based on internal DMA clock rate and available bits
	 *
	 * Note that when there is no link, internal DMA clock is same as when
	 * link speed is 10Gb. Set the registers correctly even when link is
	 * down to preserve the clock setting
	 */
	switch (adapter->link_speed) {
	case IXGBE_LINK_SPEED_100_FULL:
		incval = IXGBE_INCVAL_100;
		shift = IXGBE_INCVAL_SHIFT_100;
		break;
	case IXGBE_LINK_SPEED_1GB_FULL:
		incval = IXGBE_INCVAL_1GB;
		shift = IXGBE_INCVAL_SHIFT_1GB;
		break;
	case IXGBE_LINK_SPEED_10GB_FULL:
	default:
		incval = IXGBE_INCVAL_10GB;
		shift = IXGBE_INCVAL_SHIFT_10GB;
		break;
	}

	/**
	 * Modify the calculated values to fit within the correct
	 * number of bits specified by the hardware. The 82599 doesn't
	 * have the same space as the X540, so bitshift the calculated
	 * values to fit.
	 */
	switch (hw->mac.type) {
	case ixgbe_mac_X540:
		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
		break;
	case ixgbe_mac_82599EB:
		incval >>= IXGBE_INCVAL_SHIFT_82599;
		shift -= IXGBE_INCVAL_SHIFT_82599;
		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
				(1 << IXGBE_INCPER_SHIFT_82599) |
				incval);
		break;
	default:
		/* other devices aren't supported */
		return;
	}

	/* update the base incval used to calculate frequency adjustment */
	ACCESS_ONCE(adapter->base_incval) = incval;
	smp_mb();

	/* need lock to prevent incorrect read while modifying cyclecounter */
	spin_lock_irqsave(&adapter->tmreg_lock, flags);

	memset(&adapter->hw_cc, 0, sizeof(adapter->hw_cc));
	adapter->hw_cc.read = ixgbe_ptp_read_82599;
	adapter->hw_cc.mask = CLOCKSOURCE_MASK(64);
	adapter->hw_cc.shift = shift;
	adapter->hw_cc.mult = 1;

	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
}
Exemplo n.º 10
0
	.name           = "U300 Timer Tick",
	.flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
	.handler        = u300_timer_interrupt,
};

/* Use general purpose timer 2 as clock source */
static cycle_t u300_get_cycles(struct clocksource *cs)
{
	return (cycles_t) readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
}

static struct clocksource clocksource_u300_1mhz = {
	.name           = "GPT2",
	.rating         = 300, /* Reasonably fast and accurate clock source */
	.read           = u300_get_cycles,
	.mask           = CLOCKSOURCE_MASK(32), /* 32 bits */
	/* 22 calculated using the algorithm in arch/mips/kernel/time.c */
	.shift          = 22,
	.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
};

unsigned long long notrace sched_clock(void)
{
	return clocksource_cyc2ns(clocksource_u300_1mhz.read(
				  &clocksource_u300_1mhz),
				  clocksource_u300_1mhz.mult,
				  clocksource_u300_1mhz.shift);
}


static void __init u300_timer_init(void)
Exemplo n.º 11
0
static int __init omap_32k_sync_probe(struct platform_device *pdev)
{
	struct omap_32k_sync_device             *omap;
	struct resource                 *res;
	struct clk                      *ick;

	int                             ret;

	void __iomem                    *base;

	omap = kzalloc(sizeof(*omap), GFP_KERNEL);
	if (!omap) {
		dev_dbg(&pdev->dev, "unable to allocate memory\n");
		ret = -ENOMEM;
		goto err0;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_dbg(&pdev->dev, "couldn't get resource\n");
		ret = -ENODEV;
		goto err1;
	}

	base = ioremap(res->start, resource_size(res));
	if (!base) {
		dev_dbg(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err2;
	}

	ick = clk_get(&pdev->dev, "ick");
	if (IS_ERR(ick)) {
		dev_dbg(&pdev->dev, "couldn't get clock\n");
		ret = PTR_ERR(ick);
		goto err3;
	}

	ret = clk_enable(ick);
	if (ret) {
		dev_dbg(&pdev->dev, "couldn't enable clock\n");
		goto err4;
	}

	omap->base      = base;
	omap->dev       = &pdev->dev;
	omap->ick       = ick;

	omap->cs.name   = "timer-32k";
	omap->cs.rating = 250;
	omap->cs.read   = omap_32k_sync_32k_read;
	omap->cs.mask   = CLOCKSOURCE_MASK(32);
	omap->cs.shift  = 10;
	omap->cs.flags  = CLOCK_SOURCE_IS_CONTINUOUS;
	omap->cs.mult   = clocksource_hz2mult(32768, omap->cs.shift);

	platform_set_drvdata(pdev, omap);

	ret = clocksource_register(&omap->cs);
	if (ret) {
		dev_dbg(&pdev->dev, "failed to register clocksource\n");
		goto err5;
	}

	/* initialize our offset */
	omap->offset_32k        =  omap_32k_sync_32k_read(&omap->cs);

	/*
	 * REVISIT for now we need to keep a global static pointer
	 * to this clocksource instance. Would it make any sense
	 * to provide a get_clocksource() to fetch the clocksource
	 * we just registered ?
	 */
	thecs = omap;

	omap_32k_sync_register_chrdev();

	return 0;

err5:
	clk_disable(ick);

err4:
	clk_put(ick);

err3:
	iounmap(base);

err2:
err1:
	kfree(omap);

err0:
	return ret;
}
	if (timer_cs_used == -1)
		return 0;

	ret = ext_timer_read_count(timer_cs_used, &count);
	if (ret == 0)
		return (cycle_t)count;
	else
		return 0;
}

static struct clocksource bcm63xx_clocksource = {
	.name = "timer_cs",
	.rating = 350,
	.read = bcm63xx_read_timer_count,
	.mask = CLOCKSOURCE_MASK(30), 
	.flags = CLOCK_SOURCE_IS_CONTINUOUS,

};

static void __init periph_timer_clocksource_init(void)
{

	if (timer_cs_used != -1)
		return;

	timer_cs_used = ext_timer_alloc(-1, PERIPH_TIMER_PERIOD_MAX, NULL, 0);

	/* cannot allocate timer, just quit.  Shouldn't happen! */
	if (timer_cs_used == -1)
		return;
Exemplo n.º 13
0
static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;

static cycle_t txx9_cs_read(void)
{
	return __raw_readl(&txx9_cs_tmrptr->trr);
}

/* Use 1 bit smaller width to use full bits in that width */
#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)

static struct clocksource txx9_clocksource = {
	.name		= "TXx9",
	.rating		= 200,
	.read		= txx9_cs_read,
	.mask		= CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

void __init txx9_clocksource_init(unsigned long baseaddr,
				  unsigned int imbusclk)
{
	struct txx9_tmr_reg __iomem *tmrptr;

	clocksource_set_clock(&txx9_clocksource, TIMER_CLK(imbusclk));
	clocksource_register(&txx9_clocksource);

	tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
	__raw_writel(TCR_BASE, &tmrptr->tcr);
	__raw_writel(0, &tmrptr->tisr);
	__raw_writel(TIMER_CCD, &tmrptr->ccdr);
Exemplo n.º 14
0
{
	/*
	 * The value should be inverted, because the SysTick timer counts down,
	 * and we need a value that counts up.
	 */
	return (cycle_t)(CM3_SYSTICK->val ^ CM3_SYSTICK_LOAD_RELOAD_MSK);
}

/*
 * SysTick clock source device
 */
static struct clocksource clocksource_systick = {
	.name		= "cm3-systick",
	.rating		= 200,
	.read		= clocksource_systick_value_get,
	.mask		= CLOCKSOURCE_MASK(CM3_SYSTICK_LOAD_RELOAD_BITWIDTH),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

/*
 * Register the SysTick timer as a clocksource
 */
void cortex_m3_register_systick_clocksource(u32 systick_clk)
{
	/*
	 * Configure and enable the SysTick timer if it was not enabled
	 * in the bootloader.
	 */
	CM3_SYSTICK->load = CM3_SYSTICK_LOAD_RELOAD_MSK;
	CM3_SYSTICK->val = 0;
	CM3_SYSTICK->ctrl |= CM3_SYSTICK_CTRL_EN;
Exemplo n.º 15
0
    }

    /* this irq is shared ... */
    return IRQ_NONE;
}

static cycle_t read_clk32k(struct clocksource *cs)
{
    return read_CRTR();
}

static struct clocksource clk32k = {
    .name		= "32k_counter",
    .rating		= 150,
    .read		= read_clk32k,
    .mask		= CLOCKSOURCE_MASK(20),
    .flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static void clkdev32k_disable_and_flush_irq(void)
{
    unsigned int val;

    /* Disable and flush pending timer interrupts */
    regmap_write(regmap_st, AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
    regmap_read(regmap_st, AT91_ST_SR, &val);
    last_crtr = read_CRTR();
}

static int clkevt32k_shutdown(struct clock_event_device *evt)
{
Exemplo n.º 16
0
	} while (xchg(&rt_timer_irq, irq));

	set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
	setup_irq(irq, &hub_rt_irqaction);
}

static cycle_t hub_rt_read(struct clocksource *cs)
{
	return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}

struct clocksource hub_rt_clocksource = {
	.name	= "HUB-RT",
	.rating	= 200,
	.read	= hub_rt_read,
	.mask	= CLOCKSOURCE_MASK(52),
	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
};

static void __init hub_rt_clocksource_init(void)
{
	struct clocksource *cs = &hub_rt_clocksource;

	clocksource_set_clock(cs, CYCLES_PER_SEC);
	clocksource_register(cs);
}

void __init plat_time_init(void)
{
	hub_rt_clocksource_init();
	hub_rt_clock_event_global_init();
Exemplo n.º 17
0
void igb_ptp_init(struct igb_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct net_device *netdev = adapter->netdev;

	switch (hw->mac.type) {
	case e1000_82576:
		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 999999881;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
		adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
		adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
		adapter->ptp_caps.settime = igb_ptp_settime_82576;
		adapter->ptp_caps.enable = igb_ptp_enable;
		adapter->cc.read = igb_ptp_read_82576;
		adapter->cc.mask = CLOCKSOURCE_MASK(64);
		adapter->cc.mult = 1;
		adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
		/* Dial the nominal frequency. */
		E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
						   INCVALUE_82576);
		break;
	case e1000_82580:
	case e1000_i350:
	case e1000_i354:
		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 62499999;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
		adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
		adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
		adapter->ptp_caps.settime = igb_ptp_settime_82576;
		adapter->ptp_caps.enable = igb_ptp_enable;
		adapter->cc.read = igb_ptp_read_82580;
		adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
		adapter->cc.mult = 1;
		adapter->cc.shift = 0;
		/* Enable the timer functions by clearing bit 31. */
		E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
		break;
	case e1000_i210:
	case e1000_i211:
		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 62499999;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
		adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
		adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
		adapter->ptp_caps.settime = igb_ptp_settime_i210;
		adapter->ptp_caps.enable = igb_ptp_enable;
		/* Enable the timer functions by clearing bit 31. */
		E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
		break;
	default:
		adapter->ptp_clock = NULL;
		return;
	}

	E1000_WRITE_FLUSH(hw);

	spin_lock_init(&adapter->tmreg_lock);
	INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);

	/* Initialize the clock and overflow work for devices that need it. */
	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
		struct timespec ts = ktime_to_timespec(ktime_get_real());

		igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
	} else {
		timecounter_init(&adapter->tc, &adapter->cc,
				 ktime_to_ns(ktime_get_real()));

		INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
				  igb_ptp_overflow_check);

		schedule_delayed_work(&adapter->ptp_overflow_work,
				      IGB_SYSTIM_OVERFLOW_PERIOD);
	}

	/* Initialize the time sync interrupts for devices that support it. */
	if (hw->mac.type >= e1000_82580) {
		E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
		E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
	}

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
						&adapter->pdev->dev);
	if (IS_ERR(adapter->ptp_clock)) {
		adapter->ptp_clock = NULL;
		dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
	} else {
		dev_info(&adapter->pdev->dev, "added PHC on %s\n",
			 adapter->netdev->name);
		adapter->flags |= IGB_FLAG_PTP;
	}
}
	.flags		= IRQF_DISABLED ,
	.handler	= s5p_sched_timer_interrupt,
};


static cycle_t s5p_sched_timer_read(struct clocksource *cs)
{

	return (cycle_t)~__raw_readl(S5P_SYSTIMER_TICNTO);
}

struct clocksource clocksource_s5p = {
	.name		= "clock_source_systimer",
	.rating		= 300,
	.read		= s5p_sched_timer_read,
	.mask		= CLOCKSOURCE_MASK(32),
	.shift		= 20,
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static void s5p_init_clocksource(unsigned long rate)
{
	static char err[] __initdata = KERN_ERR
			"%s: can't register clocksource!\n";

	clocksource_s5p.mult
		= clocksource_khz2mult(rate/1000, clocksource_s5p.shift);

	s5p_sched_timer_start(~0, 1);

	if (clocksource_register(&clocksource_s5p))
Exemplo n.º 19
0
 * again.
 */
static cycle_t sb1250_hpt_read(struct clocksource *cs)
{
	unsigned int count;

	count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));

	return SB1250_HPT_VALUE - count;
}

struct clocksource bcm1250_clocksource = {
	.name	= "bcm1250-counter-3",
	.rating = 200,
	.read	= sb1250_hpt_read,
	.mask	= CLOCKSOURCE_MASK(23),
	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
};

void __init sb1250_clocksource_init(void)
{
	struct clocksource *cs = &bcm1250_clocksource;

	/* Setup hpt using timer #3 but do not enable irq for it */
	__raw_writeq(0,
		     IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
						 R_SCD_TIMER_CFG)));
	__raw_writeq(SB1250_HPT_VALUE,
		     IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
						 R_SCD_TIMER_INIT)));
	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
Exemplo n.º 20
0
	if (timrot_is_v1())
		mxs_clockevent_device.set_next_event = timrotv1_set_next_event;
	mxs_clockevent_device.cpumask = cpumask_of(0);
	clockevents_config_and_register(&mxs_clockevent_device,
					clk_get_rate(timer_clk),
					timrot_is_v1() ? 0xf : 0x2,
					timrot_is_v1() ? 0xfffe : 0xfffffffe);

	return 0;
}

static struct clocksource clocksource_mxs = {
	.name		= "mxs_timer",
	.rating		= 200,
	.read		= timrotv1_get_cycles,
	.mask		= CLOCKSOURCE_MASK(16),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static u64 notrace mxs_read_sched_clock_v2(void)
{
	return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
}

static int __init mxs_clocksource_init(struct clk *timer_clk)
{
	unsigned int c = clk_get_rate(timer_clk);

	if (timrot_is_v1())
		clocksource_register_hz(&clocksource_mxs, c);
	else {
     * 1/HZ period (instead of a compile-time constant LATCH).
     */
    pit_rate = clk_get_rate(clk_get(NULL, "mck")) / 16;
    pit_cycle = (pit_rate + HZ/2) / HZ;
    WARN_ON(((pit_cycle - 1) & ~AT91_PIT_PIV) != 0);

    /* Initialize and enable the timer */
    at91sam926x_pit_reset();

    /*
     * Register clocksource.  The high order bits of PIV are unused,
     * so this isn't a 32-bit counter unless we get clockevent irqs.
     */
    <<<<<<< HEAD
    bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
    pit_clk.mask = CLOCKSOURCE_MASK(bits);
    clocksource_register_hz(&pit_clk, pit_rate);
    =======
        pit_clk.mult = clocksource_hz2mult(pit_rate, pit_clk.shift);
    bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
    pit_clk.mask = CLOCKSOURCE_MASK(bits);
    clocksource_register(&pit_clk);
    >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a

    /* Set up irq handler */
    setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);

    /* Set up and register clockevents */
    pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
    pit_clkevt.cpumask = cpumask_of(0);
    clockevents_register_device(&pit_clkevt);