static void syscnt_assist_resume(void) { unsigned int old_loop; unsigned int assist_cnt1, assist_cnt2; unsigned int syscnt_cnt[2] = {0}; struct gpt_device *assist_dev = id_to_dev(GPT_SYSCNT_ASSIST_ID); struct gpt_device *syscnt_dev = id_to_dev(GPT_SYSCNT_ID); do { __gpt_get_cnt(assist_dev, &assist_cnt1); __gpt_get_cnt(syscnt_dev, syscnt_cnt); __gpt_ack_irq(assist_dev); __gpt_get_cnt(assist_dev, &assist_cnt2); } while (assist_cnt1 > assist_cnt2); old_loop = loop; loop = syscnt_cnt[1]; printk("[%s]assist(0x%08x, 0x%08x),syscnt(0x%08x,0x%08x),loop(%u->%u)\n", __func__, assist_cnt1, assist_cnt2, syscnt_cnt[0], syscnt_cnt[1], old_loop, loop); }
int gpt_get_cmp(unsigned int id, unsigned int *ptr) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev || !ptr) return -EINVAL; gpt_update_lock(save_flags); __gpt_get_cmp(dev, ptr); gpt_update_unlock(save_flags); return 0; }
static inline void setup_clksrc(void) { struct clocksource *cs = &mt6582_gpt.clocksource; struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID); cs->mult = clocksource_hz2mult(SYS_CLK_RATE, cs->shift); setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) setup_sched_clock_needs_suspend((void *)mt_read_sched_clock, 32, SYS_CLK_RATE); #else setup_sched_clock((void *)mt_read_sched_clock, 32, SYS_CLK_RATE); #endif }
static cycle_t mt_gpt_read(struct clocksource *cs) { cycle_t cycles; unsigned int cnt[2] = {0, 0}; struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID); __gpt_get_cnt(dev, cnt); if(GPT_CLKSRC_ID != GPT6) { cycles = (GPT_BIT_MASK & (cycle_t) (cnt[0])); }else { cycles = ((cycle_t) (cnt[1])) << 32 | (cycle_t) (cnt[0]); } return cycles; }
static void syscnt_assist_handler(unsigned long data) { unsigned int assist_cnt; unsigned int syscnt_cnt[2] = {0}; unsigned int cnth; unsigned int pct_lo, pct_hi; int cnt = 0; struct gpt_device *assist_dev = id_to_dev(GPT_SYSCNT_ASSIST_ID); struct gpt_device *syscnt_dev = id_to_dev(GPT_SYSCNT_ID); __gpt_get_cnt(assist_dev, &assist_cnt); __gpt_get_cnt(syscnt_dev, syscnt_cnt); loop++; do { cnt++; cnth = DRV_Reg32(syscnt_dev->base_addr + GPT_CNTH); if ((cnt / CHECK_WARNING_TIMERS) && !(cnt % CHECK_WARNING_TIMERS)) { printk("[%s]WARNING: fail to sync GPT_CNTH!! assist(0x%08x)," "syscnt(0x%08x,0x%08x),cnth(0x%08x),loop(0x%08x),cnt(%d)\n", __func__, assist_cnt, syscnt_cnt[0], syscnt_cnt[1], cnth, loop, cnt); } } while (cnth != loop); read_cntpct(pct_lo, pct_hi); WARN_ON(pct_hi != loop); printk("[%s]syscnt assist IRQ!! assist(0x%08x),syscnt(0x%08x,0x%08x)," "cnth:pct_hi:loop(0x%08x,0x%08x,0x%08x),cnt(%d)\n", __func__, assist_cnt, syscnt_cnt[0], syscnt_cnt[1], cnth, pct_hi, loop, cnt); }
static irqreturn_t gpt_handler(int irq, void *dev_id) { unsigned int id = gpt_get_and_ack_irq(); struct gpt_device *dev = id_to_dev(id); if (likely(dev)) { if (!(dev->flags & GPT_ISR)) handlers[id](id); else handlers[id]((unsigned long)dev_id); } else pr_err("GPT id is %d\n", id); return IRQ_HANDLED; }
int free_gpt(unsigned int id) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev) return -EINVAL; if (!(dev->flags & GPT_IN_USE)) return 0; gpt_update_lock(save_flags); release_gpt_dev_locked(dev); gpt_update_unlock(save_flags); return 0; }
static inline void setup_syscnt(void) { struct gpt_device *dev = id_to_dev(GPT_SYSCNT_ID); #ifndef CONFIG_MT6582_FPGA setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); #else //use div2 for 6Mhz setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); #endif printk("fwq sysc count \n"); }
int gpt_get_cnt(unsigned int id, unsigned int *ptr) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev || !ptr) return -EINVAL; if (!(dev->features & GPT_FEAT_64_BIT)) { __gpt_get_cnt(dev, ptr); } else { gpt_update_lock(save_flags); __gpt_get_cnt(dev, ptr); gpt_update_unlock(save_flags); } return 0; }
static inline void setup_clkevt(void) { unsigned int cmp; struct clock_event_device *evt = &mt6589_gpt.clockevent; struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID); evt->mult = div_sc(SYS_CLK_RATE, NSEC_PER_SEC, evt->shift); evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt); evt->min_delta_ns = clockevent_delta2ns(3, evt); evt->cpumask = cpumask_of(0); setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, SYS_CLK_RATE / HZ, clkevt_handler, GPT_ISR); __gpt_get_cmp(dev, &cmp); printk("GPT1_CMP = %d, HZ = %d\n", cmp, HZ); }
int gpt_set_cmp(unsigned int id, unsigned int val) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev) return -EINVAL; if (dev->mode == GPT_FREE_RUN) return -EINVAL; gpt_update_lock(save_flags); __gpt_set_cmp(dev, val, 0); gpt_update_unlock(save_flags); return 0; }
int stop_gpt(unsigned int id) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev) return -EINVAL; if (!(dev->flags & GPT_IN_USE)) { printk(KERN_ERR "%s: GPT%d is not in use!\n", __func__, id); return -EBUSY; } gpt_update_lock(save_flags); __gpt_stop(dev); gpt_update_unlock(save_flags); return 0; }
static inline void setup_clksrc(void) { struct clocksource *cs = &mt6582_gpt.clocksource; struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID); cs->mult = clocksource_hz2mult(SYS_CLK_RATE, cs->shift); #ifndef CONFIG_MT6582_FPGA setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); #else setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); #endif g_clksrc_init = 1; }
static cycle_t mt_gpt_read(struct clocksource *cs) { cycle_t cycles; unsigned int cnt[2] = {0, 0}; struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID); __gpt_get_cnt(dev, cnt); if (GPT_CLKSRC_ID != GPT6) { /* * force do mask for high 32-bit to avoid unpredicted alignment */ cycles = (GPT_BIT_MASK_L & (cycle_t) (cnt[0])); } else { cycles = (GPT_BIT_MASK_H & (((cycle_t) (cnt[1])) << 32)) | (GPT_BIT_MASK_L&((cycle_t) (cnt[0]))); } return cycles; }
static inline void setup_clkevt(u32 freq) { unsigned int cmp; struct clock_event_device *evt = &gpt_clockevent; struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID); evt->mult = div_sc(freq, NSEC_PER_SEC, evt->shift); evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt); evt->min_delta_ns = clockevent_delta2ns(3, evt); evt->cpumask = cpumask_of(0); setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, freq / HZ, clkevt_handler, GPT_ISR); __gpt_get_cmp(dev, &cmp); pr_alert("GPT1_CMP = %d, HZ = %d\n", cmp, HZ); clockevents_register_device(evt); }
int request_gpt(unsigned int id, unsigned int mode, unsigned int clksrc, unsigned int clkdiv, unsigned int cmp, void (*func)(unsigned long), unsigned int flags) { unsigned long save_flags; struct gpt_device *dev = id_to_dev(id); if (!dev) return -EINVAL; if (dev->flags & GPT_IN_USE) { printk(KERN_ERR "%s: GPT%d is in use!\n", __func__, (id + 1)); return -EBUSY; } gpt_update_lock(save_flags); setup_gpt_dev_locked(dev, mode, clksrc, clkdiv, cmp, func, flags); gpt_update_unlock(save_flags); return 0; }
int gpt_is_counting(unsigned int id) { unsigned long save_flags; int is_counting; struct gpt_device *dev = id_to_dev(id); if (!dev) return -EINVAL; if (!(dev->flags & GPT_IN_USE)) { printk(KERN_ERR "%s: GPT%d is not in use!\n", __func__, id); return -EBUSY; } gpt_update_lock(save_flags); is_counting = __gpt_get_status(dev); gpt_update_unlock(save_flags); return is_counting; }
static inline void start_syscnt_assist(void) { struct gpt_device *dev = id_to_dev(GPT_SYSCNT_ASSIST_ID); __gpt_start(dev); }