static int mt_gpt_set_next_event(unsigned long cycles, struct clock_event_device *evt) { struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID); /* printk("[%s]entry, evt=%lu\n", __func__, cycles); */ __gpt_stop(dev); __gpt_set_cmp(dev, cycles, 0); __gpt_start_from_zero(dev); return 0; }
int gpt_set_cmp(unsigned int id, unsigned int val) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev) return -EINVAL; if (dev->mode == GPT_FREE_RUN) return -EINVAL; gpt_update_lock(save_flags); __gpt_set_cmp(dev, val, 0); gpt_update_unlock(save_flags); return 0; }
static void setup_gpt_dev_locked(struct gpt_device *dev, unsigned int mode, unsigned int clksrc, unsigned int clkdiv, unsigned int cmp, void (*func)(unsigned long), unsigned int flags) { __gpt_set_flags(dev, flags | GPT_IN_USE); __gpt_set_mode(dev, mode & GPT_OPMODE_MASK); __gpt_set_clk(dev, clksrc & GPT_CLKSRC_MASK, clkdiv & GPT_CLKDIV_MASK); if (func) __gpt_set_handler(dev, func); if (dev->mode != GPT_FREE_RUN) { __gpt_set_cmp(dev, cmp, 0); if (!(dev->flags & GPT_NOIRQEN)) __gpt_enable_irq(dev); } if (!(dev->flags & GPT_NOAUTOEN)) __gpt_start(dev); }