static void sh_cmt_clocksource_resume(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); pm_genpd_syscore_poweron(&p->pdev->dev); sh_cmt_start(p, FLAG_CLOCKSOURCE); }
static int sh_cmt_clocksource_enable(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); p->total_cycles = 0; return sh_cmt_start(p, FLAG_CLOCKSOURCE); }
static int sh_cmt_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct sh_cmt_priv *p = platform_get_drvdata(pdev); sh_cmt_start(p, p->flags_suspend); return 0; }
static int sh_cmt_clocksource_enable(struct clocksource *cs) { int ret; struct sh_cmt_priv *p = cs_to_sh_cmt(cs); p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); if (!ret) __clocksource_updatefreq_hz(cs, p->rate); return ret; }
static int sh_cmt_clocksource_enable(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); int ret; p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); if (ret) return ret; /* TODO: calculate good shift from rate and counter bit width */ cs->shift = 0; cs->mult = clocksource_hz2mult(p->rate, cs->shift); return 0; }
static int sh_cmt_clocksource_enable(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); int ret; p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); if (ret) return ret; cs->shift = 0; cs->mult = clocksource_hz2mult(p->rate, cs->shift); return 0; }
static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) { struct clock_event_device *ced = &p->ced; sh_cmt_start(p, FLAG_CLOCKEVENT); /* TODO: calculate good shift from rate and counter bit width */ ced->shift = 32; ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); if (periodic) sh_cmt_set_next(p, (p->rate + HZ/2) / HZ); else sh_cmt_set_next(p, p->max_match_value); }
static void sh_cmt_clocksource_resume(struct clocksource *cs) { sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); }