static bool clk_requires_trigger(struct kona_clk *bcm_clk) { struct peri_clk_data *peri = bcm_clk->u.peri; struct bcm_clk_sel *sel; struct bcm_clk_div *div; if (bcm_clk->type != bcm_clk_peri) return false; sel = &peri->sel; if (sel->parent_count && selector_exists(sel)) return true; div = &peri->div; if (!divider_exists(div)) return false; /* Fixed dividers don't need triggers */ if (!divider_is_fixed(div)) return true; div = &peri->pre_div; return divider_exists(div) && !divider_is_fixed(div); }
/* * If a clock has two dividers, the combined number of fractional * bits must be representable in a 32-bit unsigned value. This * is because we scale up a dividend using both dividers before * dividing to improve accuracy, and we need to avoid overflow. */ static bool kona_dividers_valid(struct kona_clk *bcm_clk) { struct peri_clk_data *peri = bcm_clk->u.peri; struct bcm_clk_div *div; struct bcm_clk_div *pre_div; u32 limit; BUG_ON(bcm_clk->type != bcm_clk_peri); if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div)) return true; div = &peri->div; pre_div = &peri->pre_div; if (divider_is_fixed(div) || divider_is_fixed(pre_div)) return true; limit = BITS_PER_BYTE * sizeof(u32); return div->u.s.frac_width + pre_div->u.s.frac_width <= limit; }
/* Get the rate of a peripheral clock */ static unsigned long peri_clk_get_rate(struct clk *c) { struct peri_clock *peri_clk = to_peri_clk(c); struct peri_clk_data *cd = peri_clk->data; void *base = (void *)c->ccu_clk_mgr_base; int div = 1; const char **clock; struct refclk *ref; u32 reg; debug("%s: %s\n", __func__, c->name); if (selector_exists(&cd->sel)) { reg = readl(base + cd->sel.offset); c->sel = bitfield_extract(reg, cd->sel.shift, cd->sel.width); } else { /* * For peri clocks that don't have a selector, the single * reference clock will always exist at index 0. */ c->sel = 0; } if (divider_exists(&cd->div)) { reg = readl(base + cd->div.offset); div = bitfield_extract(reg, cd->div.shift, cd->div.width); div += 1; } clock = cd->clocks; ref = refclk_str_to_clk(clock[c->sel]); if (!ref) { printf("%s: Can't lookup %s\n", __func__, clock[c->sel]); return 0; } c->parent = &ref->clk; c->div = div; c->rate = c->parent->rate / c->div; debug("%s parent rate %lu div %d sel %d rate %lu\n", __func__, c->parent->rate, div, c->sel, c->rate); return c->rate; }
static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) { struct peri_clk_data *peri; struct bcm_clk_policy *policy; struct bcm_clk_gate *gate; struct bcm_clk_hyst *hyst; struct bcm_clk_div *div; struct bcm_clk_sel *sel; struct bcm_clk_trig *trig; const char *name; u32 range; u32 limit; BUG_ON(bcm_clk->type != bcm_clk_peri); peri = bcm_clk->u.peri; name = bcm_clk->init_data.name; range = bcm_clk->ccu->range; limit = range - sizeof(u32); limit = round_down(limit, sizeof(u32)); policy = &peri->policy; if (policy_exists(policy)) { if (policy->offset > limit) { pr_err("%s: bad policy offset for %s (%u > %u)\n", __func__, name, policy->offset, limit); return false; } } gate = &peri->gate; hyst = &peri->hyst; if (gate_exists(gate)) { if (gate->offset > limit) { pr_err("%s: bad gate offset for %s (%u > %u)\n", __func__, name, gate->offset, limit); return false; } if (hyst_exists(hyst)) { if (hyst->offset > limit) { pr_err("%s: bad hysteresis offset for %s " "(%u > %u)\n", __func__, name, hyst->offset, limit); return false; } } } else if (hyst_exists(hyst)) { pr_err("%s: hysteresis but no gate for %s\n", __func__, name); return false; } div = &peri->div; if (divider_exists(div)) { if (div->u.s.offset > limit) { pr_err("%s: bad divider offset for %s (%u > %u)\n", __func__, name, div->u.s.offset, limit); return false; } } div = &peri->pre_div; if (divider_exists(div)) { if (div->u.s.offset > limit) { pr_err("%s: bad pre-divider offset for %s " "(%u > %u)\n", __func__, name, div->u.s.offset, limit); return false; } } sel = &peri->sel; if (selector_exists(sel)) { if (sel->offset > limit) { pr_err("%s: bad selector offset for %s (%u > %u)\n", __func__, name, sel->offset, limit); return false; } } trig = &peri->trig; if (trigger_exists(trig)) { if (trig->offset > limit) { pr_err("%s: bad trigger offset for %s (%u > %u)\n", __func__, name, trig->offset, limit); return false; } } trig = &peri->pre_trig; if (trigger_exists(trig)) { if (trig->offset > limit) { pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n", __func__, name, trig->offset, limit); return false; } } return true; }
/* Determine whether the set of peripheral clock registers are valid. */ static bool peri_clk_data_valid(struct kona_clk *bcm_clk) { struct peri_clk_data *peri; struct bcm_clk_policy *policy; struct bcm_clk_gate *gate; struct bcm_clk_hyst *hyst; struct bcm_clk_sel *sel; struct bcm_clk_div *div; struct bcm_clk_div *pre_div; struct bcm_clk_trig *trig; const char *name; BUG_ON(bcm_clk->type != bcm_clk_peri); /* * First validate register offsets. This is the only place * where we need something from the ccu, so we do these * together. */ if (!peri_clk_data_offsets_valid(bcm_clk)) return false; peri = bcm_clk->u.peri; name = bcm_clk->init_data.name; policy = &peri->policy; if (policy_exists(policy) && !policy_valid(policy, name)) return false; gate = &peri->gate; if (gate_exists(gate) && !gate_valid(gate, "gate", name)) return false; hyst = &peri->hyst; if (hyst_exists(hyst) && !hyst_valid(hyst, name)) return false; sel = &peri->sel; if (selector_exists(sel)) { if (!sel_valid(sel, "selector", name)) return false; } else if (sel->parent_count > 1) { pr_err("%s: multiple parents but no selector for %s\n", __func__, name); return false; } div = &peri->div; pre_div = &peri->pre_div; if (divider_exists(div)) { if (!div_valid(div, "divider", name)) return false; if (divider_exists(pre_div)) if (!div_valid(pre_div, "pre-divider", name)) return false; } else if (divider_exists(pre_div)) { pr_err("%s: pre-divider but no divider for %s\n", __func__, name); return false; } trig = &peri->trig; if (trigger_exists(trig)) { if (!trig_valid(trig, "trigger", name)) return false; if (trigger_exists(&peri->pre_trig)) { if (!trig_valid(trig, "pre-trigger", name)) { return false; } } if (!clk_requires_trigger(bcm_clk)) { pr_warn("%s: ignoring trigger for %s (not needed)\n", __func__, name); trigger_clear_exists(trig); } } else if (trigger_exists(&peri->pre_trig)) { pr_err("%s: pre-trigger but no trigger for %s\n", __func__, name); return false; } else if (clk_requires_trigger(bcm_clk)) { pr_err("%s: required trigger missing for %s\n", __func__, name); return false; } return kona_dividers_valid(bcm_clk); }
/* Enable a peripheral clock */ static int peri_clk_enable(struct clk *c, int enable) { int ret = 0; u32 reg; struct peri_clock *peri_clk = to_peri_clk(c); struct peri_clk_data *cd = peri_clk->data; struct bcm_clk_gate *gate = &cd->gate; void *base = (void *)c->ccu_clk_mgr_base; debug("%s: %s\n", __func__, c->name); clk_get_rate(c); /* Make sure rate and sel are filled in */ /* enable access */ writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET); if (enable) { debug("%s %s set rate %lu div %lu sel %d parent %lu\n", __func__, c->name, c->rate, c->div, c->sel, c->parent->rate); /* * clkgate - only software controllable gates are * supported by u-boot which includes all clocks * that matter. This avoids bringing in a lot of extra * complexity as done in the kernel framework. */ if (gate_exists(gate)) { reg = readl(base + cd->gate.offset); reg |= (1 << cd->gate.en_bit); writel(reg, base + cd->gate.offset); } /* div and pll select */ if (divider_exists(&cd->div)) { reg = readl(base + cd->div.offset); bitfield_replace(reg, cd->div.shift, cd->div.width, c->div - 1); writel(reg, base + cd->div.offset); } /* frequency selector */ if (selector_exists(&cd->sel)) { reg = readl(base + cd->sel.offset); bitfield_replace(reg, cd->sel.shift, cd->sel.width, c->sel); writel(reg, base + cd->sel.offset); } /* trigger */ if (trigger_exists(&cd->trig)) { writel((1 << cd->trig.bit), base + cd->trig.offset); /* wait for trigger status bit to go to 0 */ ret = wait_bit(base, cd->trig.offset, cd->trig.bit, 0); if (ret) return ret; } /* wait for running (status_bit = 1) */ ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit, 1); if (ret) return ret; } else { debug("%s disable clock %s\n", __func__, c->name); /* clkgate */ reg = readl(base + cd->gate.offset); reg &= ~(1 << cd->gate.en_bit); writel(reg, base + cd->gate.offset); /* wait for stop (status_bit = 0) */ ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit, 0); } /* disable access */ writel(0, base + WR_ACCESS_OFFSET); return ret; }
static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) { struct peri_clk_data *peri; struct bcm_clk_gate *gate; struct bcm_clk_div *div; struct bcm_clk_sel *sel; struct bcm_clk_trig *trig; const char *name; u32 range; u32 limit; BUG_ON(bcm_clk->type != bcm_clk_peri); peri = bcm_clk->peri; name = bcm_clk->name; range = bcm_clk->ccu->range; limit = range - sizeof(u32); limit = round_down(limit, sizeof(u32)); gate = &peri->gate; if (gate_exists(gate)) { if (gate->offset > limit) { pr_err("%s: bad gate offset for %s (%u > %u)\n", __func__, name, gate->offset, limit); return false; } } div = &peri->div; if (divider_exists(div)) { if (div->offset > limit) { pr_err("%s: bad divider offset for %s (%u > %u)\n", __func__, name, div->offset, limit); return false; } } div = &peri->pre_div; if (divider_exists(div)) { if (div->offset > limit) { pr_err("%s: bad pre-divider offset for %s " "(%u > %u)\n", __func__, name, div->offset, limit); return false; } } sel = &peri->sel; if (selector_exists(sel)) { if (sel->offset > limit) { pr_err("%s: bad selector offset for %s (%u > %u)\n", __func__, name, sel->offset, limit); return false; } } trig = &peri->trig; if (trigger_exists(trig)) { if (trig->offset > limit) { pr_err("%s: bad trigger offset for %s (%u > %u)\n", __func__, name, trig->offset, limit); return false; } } trig = &peri->pre_trig; if (trigger_exists(trig)) { if (trig->offset > limit) { pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n", __func__, name, trig->offset, limit); return false; } } return true; }