static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; int ret; ret = clk_enable(p->clk); if (ret) { pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); return ret; } sh_cmt_start_stop_ch(p, 0); if (p->width == 16) { *rate = clk_get_rate(p->clk) / 512; sh_cmt_write(p, CMCSR, 0x43); } else { *rate = clk_get_rate(p->clk) / 8; sh_cmt_write(p, CMCSR, 0x01a4); } sh_cmt_write(p, CMCOR, 0xffffffff); sh_cmt_write(p, CMCNT, 0); sh_cmt_start_stop_ch(p, 1); return 0; }
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; int ret; /* enable clock */ ret = clk_enable(p->clk); if (ret) { pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); return ret; } /* make sure channel is disabled */ sh_cmt_start_stop_ch(p, 0); /* configure channel, periodic mode and maximum timeout */ if (p->width == 16) { *rate = clk_get_rate(p->clk) / 512; sh_cmt_write(p, CMCSR, 0x43); } else { *rate = clk_get_rate(p->clk) / 8; sh_cmt_write(p, CMCSR, 0x01a4); } sh_cmt_write(p, CMCOR, 0xffffffff); sh_cmt_write(p, CMCNT, 0); /* enable channel */ sh_cmt_start_stop_ch(p, 1); return 0; }
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { int k, ret; /* enable clock */ ret = clk_enable(p->clk); if (ret) { dev_err(&p->pdev->dev, "cannot enable clock\n"); goto err0; } /* make sure channel is disabled */ sh_cmt_start_stop_ch(p, 0); /* configure channel, periodic mode and maximum timeout */ if (p->width == 16) { *rate = clk_get_rate(p->clk) / 512; sh_cmt_write(p, CMCSR, 0x43); } else { *rate = clk_get_rate(p->clk) / 8; sh_cmt_write(p, CMCSR, 0x01a4); } sh_cmt_write(p, CMCOR, 0xffffffff); sh_cmt_write(p, CMCNT, 0); /* * According to the sh73a0 user's manual, as CMCNT can be operated * only by the RCLK (Pseudo 32 KHz), there's one restriction on * modifying CMCNT register; two RCLK cycles are necessary before * this register is either read or any modification of the value * it holds is reflected in the LSI's actual operation. * * While at it, we're supposed to clear out the CMCNT as of this * moment, so make sure it's processed properly here. This will * take RCLKx2 at maximum. */ for (k = 0; k < 100; k++) { if (!sh_cmt_read(p, CMCNT)) break; udelay(1); } if (sh_cmt_read(p, CMCNT)) { dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); ret = -ETIMEDOUT; goto err1; } /* enable channel */ sh_cmt_start_stop_ch(p, 1); return 0; err1: /* stop clock */ clk_disable(p->clk); err0: return ret; }
static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, int absolute) { unsigned long new_match; unsigned long value = p->next_match_value; unsigned long delay = 0; unsigned long now = 0; int has_wrapped; now = sh_cmt_get_counter(p, &has_wrapped); p->flags |= FLAG_REPROGRAM; if (has_wrapped) { p->flags |= FLAG_SKIPEVENT; return; } if (absolute) now = 0; do { new_match = now + value + delay; if (new_match > p->max_match_value) new_match = p->max_match_value; sh_cmt_write(p, CMCOR, new_match); now = sh_cmt_get_counter(p, &has_wrapped); if (has_wrapped && (new_match > p->match_value)) { p->flags |= FLAG_SKIPEVENT; break; } if (has_wrapped) { p->match_value = new_match; break; } if (now < new_match) { p->match_value = new_match; break; } if (delay) delay <<= 1; else delay = 1; if (!delay) pr_warning("sh_cmt: too long delay\n"); } while (delay); }
static void sh_cmt_disable(struct sh_cmt_priv *p) { /* disable channel */ sh_cmt_start_stop_ch(p, 0); /* disable interrupts in CMT block */ sh_cmt_write(p, CMCSR, 0); /* stop clock */ clk_disable(p->clk); }
static void sh_cmt_disable(struct sh_cmt_priv *p) { sh_cmt_start_stop_ch(p, 0); sh_cmt_write(p, CMCSR, 0); clk_disable(p->clk); }
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_priv *p = dev_id; /* clear flags */ sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); /* update clock source counter to begin with if enabled * the wrap flag should be cleared by the timer specific * isr before we end up here. */ if (p->flags & FLAG_CLOCKSOURCE) p->total_cycles += p->match_value; if (!(p->flags & FLAG_REPROGRAM)) p->next_match_value = p->max_match_value; p->flags |= FLAG_IRQCONTEXT; if (p->flags & FLAG_CLOCKEVENT) { if (!(p->flags & FLAG_SKIPEVENT)) { if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { p->next_match_value = p->max_match_value; p->flags |= FLAG_REPROGRAM; } p->ced.event_handler(&p->ced); } } p->flags &= ~FLAG_SKIPEVENT; if (p->flags & FLAG_REPROGRAM) { p->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(p, 1); if (p->flags & FLAG_CLOCKEVENT) if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) || (p->match_value == p->next_match_value)) p->flags &= ~FLAG_REPROGRAM; } p->flags &= ~FLAG_IRQCONTEXT; return IRQ_HANDLED; }
static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ spin_lock_irqsave(&sh_cmt_lock, flags); value = sh_cmt_read(p, CMSTR); if (start) value |= 1 << cfg->timer_bit; else value &= ~(1 << cfg->timer_bit); sh_cmt_write(p, CMSTR, value); spin_unlock_irqrestore(&sh_cmt_lock, flags); }
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_priv *p = dev_id; sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); if (p->flags & FLAG_CLOCKSOURCE) p->total_cycles += p->match_value; if (!(p->flags & FLAG_REPROGRAM)) p->next_match_value = p->max_match_value; p->flags |= FLAG_IRQCONTEXT; if (p->flags & FLAG_CLOCKEVENT) { if (!(p->flags & FLAG_SKIPEVENT)) { if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { p->next_match_value = p->max_match_value; p->flags |= FLAG_REPROGRAM; } p->ced.event_handler(&p->ced); } } p->flags &= ~FLAG_SKIPEVENT; if (p->flags & FLAG_REPROGRAM) { p->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(p, 1); if (p->flags & FLAG_CLOCKEVENT) if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) || (p->match_value == p->next_match_value)) p->flags &= ~FLAG_REPROGRAM; } p->flags &= ~FLAG_IRQCONTEXT; return IRQ_HANDLED; }
static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, int absolute) { unsigned long new_match; unsigned long value = p->next_match_value; unsigned long delay = 0; unsigned long now = 0; int has_wrapped; now = sh_cmt_get_counter(p, &has_wrapped); p->flags |= FLAG_REPROGRAM; /* force reprogram */ if (has_wrapped) { /* we're competing with the interrupt handler. * -> let the interrupt handler reprogram the timer. * -> interrupt number two handles the event. */ p->flags |= FLAG_SKIPEVENT; return; } if (absolute) now = 0; do { /* reprogram the timer hardware, * but don't save the new match value yet. */ new_match = now + value + delay; if (new_match > p->max_match_value) new_match = p->max_match_value; sh_cmt_write(p, CMCOR, new_match); now = sh_cmt_get_counter(p, &has_wrapped); if (has_wrapped && (new_match > p->match_value)) { /* we are changing to a greater match value, * so this wrap must be caused by the counter * matching the old value. * -> first interrupt reprograms the timer. * -> interrupt number two handles the event. */ p->flags |= FLAG_SKIPEVENT; break; } if (has_wrapped) { /* we are changing to a smaller match value, * so the wrap must be caused by the counter * matching the new value. * -> save programmed match value. * -> let isr handle the event. */ p->match_value = new_match; break; } /* be safe: verify hardware settings */ if (now < new_match) { /* timer value is below match value, all good. * this makes sure we won't miss any match events. * -> save programmed match value. * -> let isr handle the event. */ p->match_value = new_match; break; } /* the counter has reached a value greater * than our new match value. and since the * has_wrapped flag isn't set we must have * programmed a too close event. * -> increase delay and retry. */ if (delay) delay <<= 1; else delay = 1; if (!delay) pr_warning("sh_cmt: too long delay\n"); } while (delay); }