struct clk *clk_get(struct device *dev, const char *name) { struct clk *clk = NULL; down(&clocks_sem); clk = __clk_get(dev, name); up(&clocks_sem); return clk; }
/* * FIXME: All slow clk users are not properly claiming it (get + prepare + * enable) before using it. * If all users properly claiming this clock decide that they don't need it * anymore (or are removed), it is disabled while faulty users are still * requiring it, and the system hangs. * Prevent this clock from being disabled until all users are properly * requesting it. * Once this is done we should remove this function and the slow_clk variable. */ static int __init of_at91_clk_slow_retain(void) { if (!slow_clk) return 0; __clk_get(slow_clk); clk_prepare_enable(slow_clk); return 0; }
int __clk_get(struct clk *clk) { int ret ; ret = atomic_inc_return( &clk->use_cnt ); if( ret > 1 ) return 1; if( clk->parent ) return __clk_get( clk->parent ); return 1; }
static int zclk_set_rate(struct clk *clk, unsigned long rate) { void __iomem *frqcrc; int ret; unsigned long step, p_rate; u32 val; if (!clk->parent || !__clk_get(clk->parent)) return -ENODEV; if (!atomic_inc_and_test(&frqcr_lock) || !frqcr_kick_check(clk)) { ret = -EBUSY; goto done; } /* * Users are supposed to first call clk_set_rate() only with * clk_round_rate() results. So, we don't fix wrong rates here, but * guard against them anyway */ p_rate = clk_get_rate(clk->parent); if (rate == p_rate) { val = 0; } else { step = DIV_ROUND_CLOSEST(p_rate, 32); if (rate > p_rate || rate < step) { ret = -EINVAL; goto done; } val = 32 - rate / step; } frqcrc = clk->mapped_reg + (FRQCRC - (u32)clk->enable_reg); iowrite32((ioread32(frqcrc) & ~(clk->div_mask << clk->enable_bit)) | (val << clk->enable_bit), frqcrc); ret = frqcr_kick_do(clk); done: atomic_dec(&frqcr_lock); __clk_put(clk->parent); return ret; }
static void __init sun9i_a80_gt_setup(struct device_node *node) { void __iomem *reg; struct clk *gt; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { pr_err("Could not get registers for a80-gt-clk: %s\n", node->name); return; } gt = sunxi_factors_register(node, &sun9i_a80_gt_data, &sun9i_a80_gt_lock, reg); /* The GT bus clock needs to be always enabled */ __clk_get(gt); clk_prepare_enable(gt); }
static void __init sun8i_a23_mbus_setup(struct device_node *node) { int num_parents = of_clk_get_parent_count(node); const char **parents; const char *clk_name = node->name; struct resource res; struct clk_divider *div; struct clk_gate *gate; struct clk_mux *mux; struct clk *clk; void __iomem *reg; int err; parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); if (!parents) return; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { pr_err("Could not get registers for sun8i-mbus-clk\n"); goto err_free_parents; } div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) goto err_unmap; mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) goto err_free_div; gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto err_free_mux; of_property_read_string(node, "clock-output-names", &clk_name); of_clk_parent_fill(node, parents, num_parents); gate->reg = reg; gate->bit_idx = SUN8I_MBUS_ENABLE; gate->lock = &sun8i_a23_mbus_lock; div->reg = reg; div->shift = SUN8I_MBUS_DIV_SHIFT; div->width = SUN8I_MBUS_DIV_WIDTH; div->lock = &sun8i_a23_mbus_lock; mux->reg = reg; mux->shift = SUN8I_MBUS_MUX_SHIFT; mux->mask = SUN8I_MBUS_MUX_MASK; mux->lock = &sun8i_a23_mbus_lock; clk = clk_register_composite(NULL, clk_name, parents, num_parents, &mux->hw, &clk_mux_ops, &div->hw, &clk_divider_ops, &gate->hw, &clk_gate_ops, 0); if (IS_ERR(clk)) goto err_free_gate; err = of_clk_add_provider(node, of_clk_src_simple_get, clk); if (err) goto err_unregister_clk; kfree(parents); /* parents is deep copied */ /* The MBUS clocks needs to be always enabled */ __clk_get(clk); clk_prepare_enable(clk); return; err_unregister_clk: /* TODO: The composite clock stuff will leak a bit here. */ clk_unregister(clk); err_free_gate: kfree(gate); err_free_mux: kfree(mux); err_free_div: kfree(div); err_unmap: iounmap(reg); of_address_to_resource(node, 0, &res); release_mem_region(res.start, resource_size(&res)); err_free_parents: kfree(parents); }