/** * cdns_i2c_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the i2c * device. User can modify the address mode to 10 bit address mode using the * ioctl call with option I2C_TENBIT. * * Return: 0 on success, negative error otherwise */ static int cdns_i2c_probe(struct platform_device *pdev) { struct resource *r_mem; struct cdns_i2c *id; int ret; const struct of_device_id *match; id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; platform_set_drvdata(pdev, id); match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node); if (match && match->data) { const struct cdns_platform_data *data = match->data; id->quirks = data->quirks; } r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); id->membase = devm_ioremap_resource(&pdev->dev, r_mem); if (IS_ERR(id->membase)) return PTR_ERR(id->membase); id->irq = platform_get_irq(pdev, 0); id->adap.dev.of_node = pdev->dev.of_node; id->adap.algo = &cdns_i2c_algo; id->adap.timeout = CDNS_I2C_TIMEOUT; id->adap.retries = 3; /* Default retry value. */ id->adap.algo_data = id; id->adap.dev.parent = &pdev->dev; init_completion(&id->xfer_done); snprintf(id->adap.name, sizeof(id->adap.name), "Cadence I2C at %08lx", (unsigned long)r_mem->start); id->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(id->clk)) { dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(id->clk); } ret = clk_prepare_enable(id->clk); if (ret) { dev_err(&pdev->dev, "Unable to enable clock.\n"); return ret; } id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb; if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); id->input_clk = clk_get_rate(id->clk); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &id->i2c_clk); if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX)) id->i2c_clk = CDNS_I2C_SPEED_DEFAULT; cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS, CDNS_I2C_CR_OFFSET); ret = cdns_i2c_setclk(id->input_clk, id); if (ret) { dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk); ret = -EINVAL; goto err_clk_dis; } ret = devm_request_irq(&pdev->dev, id->irq, cdns_i2c_isr, 0, DRIVER_NAME, id); if (ret) { dev_err(&pdev->dev, "cannot get irq %d\n", id->irq); goto err_clk_dis; } ret = i2c_add_adapter(&id->adap); if (ret < 0) { dev_err(&pdev->dev, "reg adap failed: %d\n", ret); goto err_clk_dis; } /* * Cadence I2C controller has a bug wherein it generates * invalid read transaction after HW timeout in master receiver mode. * HW timeout is not used by this driver and the interrupt is disabled. * But the feature itself cannot be disabled. Hence maximum value * is written to this register to reduce the chances of error. */ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); return 0; err_clk_dis: clk_disable_unprepare(id->clk); return ret; }
static int twd_clk_init(void) { if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) return clk_notifier_register(twd_clk, &twd_clk_nb); return 0; }
struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf, void __iomem *reg_base, spinlock_t *lock) { struct clk *clk; struct clk *pclk; struct meson_clk_cpu *clk_cpu; struct clk_init_data init; int ret; clk_cpu = kzalloc(sizeof(*clk_cpu), GFP_KERNEL); if (!clk_cpu) return ERR_PTR(-ENOMEM); clk_cpu->base = reg_base; clk_cpu->reg_off = clk_conf->reg_off; clk_cpu->div_table = clk_conf->conf.div_table; clk_cpu->clk_nb.notifier_call = meson_clk_cpu_notifier_cb; init.name = clk_conf->clk_name; init.ops = &meson_clk_cpu_ops; init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE; init.flags |= CLK_SET_RATE_PARENT; init.parent_names = clk_conf->clks_parent; init.num_parents = 1; clk_cpu->hw.init = &init; pclk = __clk_lookup(clk_conf->clks_parent[0]); if (!pclk) { pr_err("%s: could not lookup parent clock %s\n", __func__, clk_conf->clks_parent[0]); ret = -EINVAL; goto free_clk; } ret = clk_notifier_register(pclk, &clk_cpu->clk_nb); if (ret) { pr_err("%s: failed to register clock notifier for %s\n", __func__, clk_conf->clk_name); goto free_clk; } clk = clk_register(NULL, &clk_cpu->hw); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto unregister_clk_nb; } return clk; unregister_clk_nb: clk_notifier_unregister(pclk, &clk_cpu->clk_nb); free_clk: kfree(clk_cpu); return ERR_PTR(ret); }
void owl_gpu_clk_notifier_register(struct notifier_block *notifier) { struct clk *clk = NULL; int ret = -1; clk = clk_get(NULL, "GPU3D_CORECLK"); ret = clk_notifier_register(clk, notifier); clk = clk_get(NULL, "GPU3D_SYSCLK"); ret = clk_notifier_register(clk, notifier); clk = clk_get(NULL, "GPU3D_HYDCLK"); ret = clk_notifier_register(clk, notifier); clk = clk_get(NULL, "GPU3D_NIC_MEMCLK"); ret = clk_notifier_register(clk, notifier); }
static int __init sun5i_setup_clocksource(struct device_node *node, void __iomem *base, struct clk *clk, int irq) { struct sun5i_timer_clksrc *cs; unsigned long rate; int ret; cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; ret = clk_prepare_enable(clk); if (ret) { pr_err("Couldn't enable parent clock\n"); goto err_free; } rate = clk_get_rate(clk); cs->timer.base = base; cs->timer.clk = clk; cs->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clksrc; cs->timer.clk_rate_cb.next = NULL; ret = clk_notifier_register(clk, &cs->timer.clk_rate_cb); if (ret) { pr_err("Unable to register clock notifier.\n"); goto err_disable_clk; } writel(~0, base + TIMER_INTVAL_LO_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, base + TIMER_CTL_REG(1)); cs->clksrc.name = node->name; cs->clksrc.rating = 340; cs->clksrc.read = sun5i_clksrc_read; cs->clksrc.mask = CLOCKSOURCE_MASK(32); cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; ret = clocksource_register_hz(&cs->clksrc, rate); if (ret) { pr_err("Couldn't register clock source.\n"); goto err_remove_notifier; } return 0; err_remove_notifier: clk_notifier_unregister(clk, &cs->timer.clk_rate_cb); err_disable_clk: clk_disable_unprepare(clk); err_free: kfree(cs); return ret; }
static void __init ttc_setup_clockevent(struct clk *clk, void __iomem *base, u32 irq) { struct ttc_timer_clockevent *ttcce; int err; ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); if (WARN_ON(!ttcce)) return; ttcce->ttc.clk = clk; err = clk_prepare_enable(ttcce->ttc.clk); if (WARN_ON(err)) { kfree(ttcce); return; } ttcce->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clockevent_cb; ttcce->ttc.clk_rate_change_nb.next = NULL; if (clk_notifier_register(ttcce->ttc.clk, &ttcce->ttc.clk_rate_change_nb)) pr_warn("Unable to register clock notifier.\n"); ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); ttcce->ttc.base_addr = base; ttcce->ce.name = "ttc_clockevent"; ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ttcce->ce.set_next_event = ttc_set_next_event; ttcce->ce.set_mode = ttc_set_mode; ttcce->ce.rating = 200; ttcce->ce.irq = irq; ttcce->ce.cpumask = cpu_possible_mask; /* * Setup the clock event timer to be an interval timer which * is prescaled by 32 using the interval interrupt. Leave it * disabled for now. */ __raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); err = request_irq(irq, ttc_clock_event_interrupt, IRQF_DISABLED | IRQF_TIMER, ttcce->ce.name, ttcce); if (WARN_ON(err)) { kfree(ttcce); return; } clockevents_config_and_register(&ttcce->ce, ttcce->ttc.freq / PRESCALE, 1, 0xfffe); }
static int hb_cpufreq_driver_init(void) { struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; struct device *cpu_dev; struct clk *cpu_clk; struct device_node *np; int ret; if ((!of_machine_is_compatible("calxeda,highbank")) && (!of_machine_is_compatible("calxeda,ecx-2000"))) return -ENODEV; for_each_child_of_node(of_find_node_by_path("/cpus"), np) if (of_get_property(np, "operating-points", NULL)) break; if (!np) { pr_err("failed to find highbank cpufreq node\n"); return -ENOENT; } cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get highbank cpufreq device\n"); ret = -ENODEV; goto out_put_node; } cpu_dev->of_node = np; cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); pr_err("failed to get cpu0 clock: %d\n", ret); goto out_put_node; } ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb); if (ret) { pr_err("failed to register clk notifier: %d\n", ret); goto out_put_node; } /* Instantiate cpufreq-cpu0 */ platform_device_register_full(&devinfo); out_put_node: of_node_put(np); return ret; } module_init(hb_cpufreq_driver_init); MODULE_AUTHOR("Mark Langsdorf <*****@*****.**>"); MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); MODULE_LICENSE("GPL");
static int krait_notifier_register(struct device *dev, struct clk *clk, struct krait_mux_clk *mux) { int ret = 0; mux->clk_nb.notifier_call = krait_notifier_cb; ret = clk_notifier_register(clk, &mux->clk_nb); if (ret) dev_err(dev, "failed to register clock notifier: %d\n", ret); return ret; }
static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) { struct ttc_timer_clocksource *ttccs; int err; ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); if (WARN_ON(!ttccs)) return; ttccs->ttc.clk = clk; err = clk_prepare_enable(ttccs->ttc.clk); if (WARN_ON(err)) { kfree(ttccs); return; } ttccs->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clocksource_cb; ttccs->ttc.clk_rate_change_nb.next = NULL; if (clk_notifier_register(ttccs->ttc.clk, &ttccs->ttc.clk_rate_change_nb)) pr_warn("Unable to register clock notifier.\n"); ttccs->ttc.base_addr = base; ttccs->cs.name = "ttc_clocksource"; ttccs->cs.rating = 200; ttccs->cs.read = __ttc_clocksource_read; ttccs->cs.mask = CLOCKSOURCE_MASK(16); ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; /* * Setup the clock source counter to be an incrementing counter * with no interrupt and it rolls over at 0xFFFF. Pre-scale * it by 32 also. Let it start running now. */ __raw_writel(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET); __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); __raw_writel(CNT_CNTRL_RESET, ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); err = clocksource_register_hz(&ttccs->cs, clk_get_rate(ttccs->ttc.clk) / PRESCALE); if (WARN_ON(err)) { kfree(ttccs); return; } ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; setup_sched_clock(ttc_sched_clock_read, 16, clk_get_rate(ttccs->ttc.clk) / PRESCALE); }
static int __init gic_clocksource_of_init(struct device_node *node) { struct clk *clk; int ret; if (!mips_gic_present() || !node->parent || !of_device_is_compatible(node->parent, "mti,gic")) { pr_warn("No DT definition\n"); return -ENXIO; } clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { ret = clk_prepare_enable(clk); if (ret < 0) { pr_err("Failed to enable clock\n"); clk_put(clk); return ret; } gic_frequency = clk_get_rate(clk); } else if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) { pr_err("Frequency not specified\n"); return -EINVAL; } gic_timer_irq = irq_of_parse_and_map(node, 0); if (!gic_timer_irq) { pr_err("IRQ not specified\n"); return -EINVAL; } ret = __gic_clocksource_init(); if (ret) return ret; ret = gic_clockevent_init(); if (!ret && !IS_ERR(clk)) { if (clk_notifier_register(clk, &gic_clk_nb) < 0) pr_warn("Unable to register clock notifier\n"); } /* And finally start the counter */ clear_gic_config(GIC_CONFIG_COUNTSTOP); return 0; }
static int hsi_clk_notifier_register(struct clk *clk, struct notifier_block *nb) { #ifdef OMAP_HSI_EXAMPLE_PWR_CODE struct hsi_internal_clk *hsi_clk; if (!clk || !nb) return -EINVAL; hsi_clk = container_of(clk, struct hsi_internal_clk, clk); hsi_clk->drv_nb = nb; hsi_clk->nb.priority = nb->priority; /* NOTE: We only want notifications from the functional clock */ return clk_notifier_register(hsi_clk->childs[1], &hsi_clk->nb); pr_debug("%s called\n", __func__); #endif return 0; }
/** * clock_cooling_register - function to create clock cooling device. * @dev: struct device pointer to the device used as clock cooling device. * @clock_name: string containing the clock used as cooling mechanism. * * This interface function registers the clock cooling device with the name * "thermal-clock-%x". The cooling device is based on clock frequencies. * The struct device is assumed to be capable of DVFS transitions. * The OPP layer is used to fetch and fill the available frequencies for * the referred device. The ordered frequency table is used to control * the clock cooling device cooling states and to limit clock transitions * based on the cooling state requested by the thermal framework. * * Return: a valid struct thermal_cooling_device pointer on success, * on failure, it returns a corresponding ERR_PTR(). */ struct thermal_cooling_device * clock_cooling_register(struct device *dev, const char *clock_name) { struct thermal_cooling_device *cdev; struct clock_cooling_device *ccdev = NULL; char dev_name[THERMAL_NAME_LENGTH]; int ret = 0; ccdev = devm_kzalloc(dev, sizeof(*ccdev), GFP_KERNEL); if (!ccdev) return ERR_PTR(-ENOMEM); ccdev->dev = dev; ccdev->clk = devm_clk_get(dev, clock_name); if (IS_ERR(ccdev->clk)) return ERR_CAST(ccdev->clk); ret = clock_cooling_get_idr(&ccdev->id); if (ret) return ERR_PTR(-EINVAL); snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id); cdev = thermal_cooling_device_register(dev_name, ccdev, &clock_cooling_ops); if (IS_ERR(cdev)) { release_idr(ccdev->id); return ERR_PTR(-EINVAL); } ccdev->cdev = cdev; ccdev->clk_rate_change_nb.notifier_call = clock_cooling_clock_notifier; /* Assuming someone has already filled the opp table for this device */ ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table); if (ret) { release_idr(ccdev->id); return ERR_PTR(ret); } ccdev->clock_state = 0; ccdev->clock_val = clock_cooling_get_frequency(ccdev, 0); clk_notifier_register(ccdev->clk, &ccdev->clk_rate_change_nb); return cdev; }
/** * cdns_uart_probe - Platform driver probe * @pdev: Pointer to the platform device structure * * Return: 0 on success, negative errno otherwise */ static int cdns_uart_probe(struct platform_device *pdev) { int rc, id; struct uart_port *port; struct resource *res, *res2; struct cdns_uart *cdns_uart_data; cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), GFP_KERNEL); if (!cdns_uart_data) return -ENOMEM; cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(cdns_uart_data->pclk)) { cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk"); if (!IS_ERR(cdns_uart_data->pclk)) dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n"); } if (IS_ERR(cdns_uart_data->pclk)) { dev_err(&pdev->dev, "pclk clock not found.\n"); return PTR_ERR(cdns_uart_data->pclk); } cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk"); if (IS_ERR(cdns_uart_data->uartclk)) { cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "ref_clk"); if (!IS_ERR(cdns_uart_data->uartclk)) dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n"); } if (IS_ERR(cdns_uart_data->uartclk)) { dev_err(&pdev->dev, "uart_clk clock not found.\n"); return PTR_ERR(cdns_uart_data->uartclk); } rc = clk_prepare_enable(cdns_uart_data->pclk); if (rc) { dev_err(&pdev->dev, "Unable to enable pclk clock.\n"); return rc; } rc = clk_prepare_enable(cdns_uart_data->uartclk); if (rc) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto err_out_clk_dis_pclk; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { rc = -ENODEV; goto err_out_clk_disable; } res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res2) { rc = -ENODEV; goto err_out_clk_disable; } #ifdef CONFIG_COMMON_CLK cdns_uart_data->clk_rate_change_nb.notifier_call = cdns_uart_clk_notifier_cb; if (clk_notifier_register(cdns_uart_data->uartclk, &cdns_uart_data->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); #endif /* Look for a serialN alias */ id = of_alias_get_id(pdev->dev.of_node, "serial"); if (id < 0) id = 0; /* Initialize the port structure */ port = cdns_uart_get_port(id); if (!port) { dev_err(&pdev->dev, "Cannot get uart_port structure\n"); rc = -ENODEV; goto err_out_notif_unreg; } else { /* Register the port. * This function also registers this device with the tty layer * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; port->irq = res2->start; port->dev = &pdev->dev; port->uartclk = clk_get_rate(cdns_uart_data->uartclk); port->private_data = cdns_uart_data; cdns_uart_data->port = port; platform_set_drvdata(pdev, port); rc = uart_add_one_port(&cdns_uart_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); goto err_out_notif_unreg; } return 0; } err_out_notif_unreg: #ifdef CONFIG_COMMON_CLK clk_notifier_unregister(cdns_uart_data->uartclk, &cdns_uart_data->clk_rate_change_nb); #endif err_out_clk_disable: clk_disable_unprepare(cdns_uart_data->uartclk); err_out_clk_dis_pclk: clk_disable_unprepare(cdns_uart_data->pclk); return rc; }
int quadd_power_clk_start(void) { struct power_clk_source *s; int status; struct timer_list *timer = &power_ctx.timer; struct quadd_parameters *param = &power_ctx.quadd_ctx->param; if (param->power_rate_freq == 0) { pr_info("power_clk is not started\n"); return 0; } #ifdef CONFIG_COMMON_CLK power_ctx.period = 0; #else power_ctx.period = MSEC_PER_SEC / param->power_rate_freq; #endif pr_info("power_clk: start, freq: %d\n", param->power_rate_freq); /* setup gpu frequency */ s = &power_ctx.gpu; s->clkp = clk_get_sys("3d", NULL); if (s->clkp) { #ifdef CONFIG_COMMON_CLK status = clk_notifier_register(s->clkp, s->nb); if (status < 0) { pr_err("error: could not setup gpu freq\n"); return status; } clk_put(s->clkp); #endif reset_data(s); atomic_set(&s->active, 1); } else { pr_err("error: could not setup gpu freq\n"); atomic_set(&s->active, 0); } /* setup emc frequency */ s = &power_ctx.emc; s->clkp = clk_get_sys("cpu", "emc"); if (s->clkp) { #ifdef CONFIG_COMMON_CLK status = clk_notifier_register(s->clkp, s->nb); if (status < 0) { pr_err("error: could not setup emc freq\n"); return status; } clk_put(s->clkp); #endif reset_data(s); atomic_set(&s->active, 1); } else { pr_err("error: could not setup emc freq\n"); atomic_set(&s->active, 0); } /* setup cpu frequency notifier */ s = &power_ctx.cpu; status = register_cpu_notifier(&s->nb); if (status < 0) { pr_err("error: could not setup cpu freq\n"); return status; } reset_data(s); if (power_ctx.period > 0) { init_timer(timer); timer->function = power_clk_timer; timer->expires = jiffies + msecs_to_jiffies(power_ctx.period); timer->data = 0; add_timer(timer); } atomic_set(&s->active, 1); return 0; }
static int meson8b_clkc_probe(struct platform_device *pdev) { void __iomem *clk_base; int ret, clkid, i; struct clk_hw *parent_hw; struct clk *parent_clk; struct device *dev = &pdev->dev; /* Generic clocks and PLLs */ clk_base = of_iomap(dev->of_node, 1); if (!clk_base) { pr_err("%s: Unable to map clk base\n", __func__); return -ENXIO; } /* Populate base address for PLLs */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++) meson8b_clk_plls[i]->base = clk_base; /* Populate base address for MPLLs */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_mplls); i++) meson8b_clk_mplls[i]->base = clk_base; /* Populate the base address for CPU clk */ meson8b_cpu_clk.base = clk_base; /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++) meson8b_clk_gates[i]->reg = clk_base + (u32)meson8b_clk_gates[i]->reg; /* Populate base address for muxes */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_muxes); i++) meson8b_clk_muxes[i]->reg = clk_base + (u32)meson8b_clk_muxes[i]->reg; /* Populate base address for dividers */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_dividers); i++) meson8b_clk_dividers[i]->reg = clk_base + (u32)meson8b_clk_dividers[i]->reg; /* * register all clks * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1 */ for (clkid = CLKID_XTAL; clkid < CLK_NR_CLKS; clkid++) { /* array might be sparse */ if (!meson8b_hw_onecell_data.hws[clkid]) continue; /* FIXME convert to devm_clk_register */ ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]); if (ret) goto iounmap; } /* * Register CPU clk notifier * * FIXME this is wrong for a lot of reasons. First, the muxes should be * struct clk_hw objects. Second, we shouldn't program the muxes in * notifier handlers. The tricky programming sequence will be handled * by the forthcoming coordinated clock rates mechanism once that * feature is released. * * Furthermore, looking up the parent this way is terrible. At some * point we will stop allocating a default struct clk when registering * a new clk_hw, and this hack will no longer work. Releasing the ccr * feature before that time solves the problem :-) */ parent_hw = clk_hw_get_parent(&meson8b_cpu_clk.hw); parent_clk = parent_hw->clk; ret = clk_notifier_register(parent_clk, &meson8b_cpu_clk.clk_nb); if (ret) { pr_err("%s: failed to register clock notifier for cpu_clk\n", __func__); goto iounmap; } return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, &meson8b_hw_onecell_data); iounmap: iounmap(clk_base); return ret; }
static void __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; int err = 0; /* * In r2p0 the comparators for each processor with the global timer * fire when the timer value is greater than or equal to. In previous * revisions the comparators fired when the timer value was equal to. */ if ((read_cpuid_id() & 0xf0000f) < 0x200000) { pr_warn("global-timer: non support for this cpu version.\n"); return; } gt_ppi = irq_of_parse_and_map(np, 0); if (!gt_ppi) { pr_warn("global-timer: unable to parse irq\n"); return; } gt_base = of_iomap(np, 0); if (!gt_base) { pr_warn("global-timer: invalid base address\n"); return; } gt_clk = of_clk_get(np, 0); if (!IS_ERR(gt_clk)) { err = clk_prepare_enable(gt_clk); if (err) goto out_unmap; } else { pr_warn("global-timer: clk not found\n"); err = -EINVAL; goto out_unmap; } gt_clk_rate = clk_get_rate(gt_clk); gt_evt = alloc_percpu(struct clock_event_device); if (!gt_evt) { pr_warn("global-timer: can't allocate memory\n"); err = -ENOMEM; goto out_clk; } clk_rate_change_nb.notifier_call = arm_global_timer_clockevent_cb; clk_rate_change_nb.next = NULL; if (clk_notifier_register(gt_clk, &clk_rate_change_nb)) { pr_warn("Unable to register clock notifier.\n"); } err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, "gt", gt_evt); if (err) { pr_warn("global-timer: can't register interrupt %d (%d)\n", gt_ppi, err); goto out_free; } err = register_cpu_notifier(>_cpu_nb); if (err) { pr_warn("global-timer: unable to register cpu notifier.\n"); goto out_irq; } /* Immediately configure the timer on the boot CPU */ gt_clocksource_init(); gt_clockevents_init(this_cpu_ptr(gt_evt)); return; out_irq: free_percpu_irq(gt_ppi, gt_evt); out_free: free_percpu(gt_evt); out_clk: clk_disable_unprepare(gt_clk); out_unmap: iounmap(gt_base); WARN(err, "ARM Global timer register failed (%d)\n", err); }
static int __devinit omap34xx_bridge_probe(struct platform_device *pdev) { int status; u32 initStatus; u32 temp; dev_t dev = 0 ; int result; struct dspbridge_platform_data *pdata = pdev->dev.platform_data; omap_dspbridge_dev = pdev; /* use 2.6 device model */ result = alloc_chrdev_region(&dev, 0, 1, driver_name); if (result < 0) { pr_err("%s: Can't get major %d\n", __func__, driver_major); goto err1; } driver_major = MAJOR(dev); cdev_init(&bridge_cdev, &bridge_fops); bridge_cdev.owner = THIS_MODULE; status = cdev_add(&bridge_cdev, dev, 1); if (status) { pr_err("%s: Failed to add bridge device\n", __func__); goto err2; } /* udev support */ bridge_class = class_create(THIS_MODULE, "ti_bridge"); if (IS_ERR(bridge_class)) pr_err("%s: Error creating bridge class\n", __func__); device_create(bridge_class, NULL, MKDEV(driver_major, 0), NULL, "DspBridge"); bridge_create_sysfs(); GT_init(); GT_create(&driverTrace, "LD"); #ifdef CONFIG_BRIDGE_DEBUG if (GT_str) GT_set(GT_str); #elif defined(DDSP_DEBUG_PRODUCT) && GT_TRACE GT_set("**=67"); #endif #ifdef CONFIG_PM /* Initialize the wait queue */ bridge_suspend_data.suspended = 0; init_waitqueue_head(&bridge_suspend_data.suspend_wq); #endif SERVICES_Init(); /* Autostart flag. This should be set to true if the DSP image should * be loaded and run during bridge module initialization */ if (base_img) { temp = true; REG_SetValue(AUTOSTART, (u8 *)&temp, sizeof(temp)); REG_SetValue(DEFEXEC, (u8 *)base_img, strlen(base_img) + 1); } else { temp = false; REG_SetValue(AUTOSTART, (u8 *)&temp, sizeof(temp)); REG_SetValue(DEFEXEC, (u8 *) "\0", (u32)2); } if (shm_size >= 0x10000) { /* 64 KB */ initStatus = REG_SetValue(SHMSIZE, (u8 *)&shm_size, sizeof(shm_size)); } else { initStatus = DSP_EINVALIDARG; status = -1; pr_err("%s: SHM size must be at least 64 KB\n", __func__); } GT_1trace(driverTrace, GT_7CLASS, "requested shm_size = 0x%x\n", shm_size); if (pdata->phys_mempool_base && pdata->phys_mempool_size) { phys_mempool_base = pdata->phys_mempool_base; phys_mempool_size = pdata->phys_mempool_size; } GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_base = 0x%x \n", phys_mempool_base); GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_size = 0x%x\n", phys_mempool_base); if ((phys_mempool_base > 0x0) && (phys_mempool_size > 0x0)) MEM_ExtPhysPoolInit(phys_mempool_base, phys_mempool_size); if (tc_wordswapon) { GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is enabled\n"); REG_SetValue(TCWORDSWAP, (u8 *)&tc_wordswapon, sizeof(tc_wordswapon)); } else { GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is disabled\n"); REG_SetValue(TCWORDSWAP, (u8 *)&tc_wordswapon, sizeof(tc_wordswapon)); } if (DSP_SUCCEEDED(initStatus)) { #ifdef CONFIG_BRIDGE_DVFS clk_handle = clk_get(NULL, "iva2_ck"); if (!clk_handle) pr_err("%s: clk_get failed to get iva2_ck\n", __func__); if (clk_notifier_register(clk_handle, &iva_clk_notifier)) pr_err("%s: clk_notifier_register failed for iva2_ck\n", __func__); if (!min_dsp_freq) min_dsp_freq = pdata->mpu_min_speed; #endif driverContext = DSP_Init(&initStatus); if (DSP_FAILED(initStatus)) { status = -1; pr_err("DSP Bridge driver initialization failed\n"); } else { pr_info("DSP Bridge driver loaded\n"); } } #ifdef CONFIG_BRIDGE_RECOVERY bridge_rec_queue = create_workqueue("bridge_rec_queue"); INIT_WORK(&bridge_recovery_work, bridge_recover); INIT_COMPLETION(bridge_comp); #endif DBC_Assert(status == 0); DBC_Assert(DSP_SUCCEEDED(initStatus)); return 0; err2: unregister_chrdev_region(dev, 1); err1: return result; }
/** * xi2cps_probe - Platform registration call * @pdev: Handle to the platform device structure * * Returns zero on success, negative error otherwise * * This function does all the memory allocation and registration for the i2c * device. User can modify the address mode to 10 bit address mode using the * ioctl call with option I2C_TENBIT. */ static int xi2cps_probe(struct platform_device *pdev) { struct resource *r_mem = NULL; struct xi2cps *id; int ret = 0; const unsigned int *prop; /* * Allocate memory for xi2cps structure. * Initialize the structure to zero and set the platform data. * Obtain the resource base address from platform data and remap it. * Get the irq resource from platform data.Initialize the adapter * structure members and also xi2cps structure. */ id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; platform_set_drvdata(pdev, id); r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); id->membase = devm_ioremap_resource(&pdev->dev, r_mem); if (IS_ERR(id->membase)) { dev_err(&pdev->dev, "ioremap failed\n"); return PTR_ERR(id->membase); } id->irq = platform_get_irq(pdev, 0); prop = of_get_property(pdev->dev.of_node, "bus-id", NULL); if (prop) { id->adap.nr = be32_to_cpup(prop); } else { dev_err(&pdev->dev, "couldn't determine bus-id\n"); return -ENXIO; } id->adap.dev.of_node = pdev->dev.of_node; id->adap.algo = (struct i2c_algorithm *) &xi2cps_algo; id->adap.timeout = 0x1F; /* Default timeout value */ id->adap.retries = 3; /* Default retry value. */ id->adap.algo_data = id; id->adap.dev.parent = &pdev->dev; snprintf(id->adap.name, sizeof(id->adap.name), "XILINX I2C at %08lx", (unsigned long)r_mem->start); id->cur_timeout = id->adap.timeout; id->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(id->clk)) { dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(id->clk); } ret = clk_prepare_enable(id->clk); if (ret) { dev_err(&pdev->dev, "Unable to enable clock.\n"); return ret; } id->clk_rate_change_nb.notifier_call = xi2cps_clk_notifier_cb; id->clk_rate_change_nb.next = NULL; if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); id->input_clk = (unsigned int)clk_get_rate(id->clk); prop = of_get_property(pdev->dev.of_node, "i2c-clk", NULL); if (prop) { id->i2c_clk = be32_to_cpup(prop); } else { ret = -ENXIO; dev_err(&pdev->dev, "couldn't determine i2c-clk\n"); goto err_clk_dis; } /* * Set Master Mode,Normal addressing mode (7 bit address), * Enable Transmission of Ack in Control Register. * Set the timeout and I2C clock and request the IRQ(ISR mapped). * Call to the i2c_add_numbered_adapter registers the adapter. */ xi2cps_writereg(0x0000000E, XI2CPS_CR_OFFSET); xi2cps_writereg(id->adap.timeout, XI2CPS_TIME_OUT_OFFSET); ret = xi2cps_setclk(id->i2c_clk, id); if (ret < 0) { dev_err(&pdev->dev, "invalid SCL clock: %dkHz\n", id->i2c_clk); ret = -EINVAL; goto err_clk_dis; } ret = devm_request_irq(&pdev->dev, id->irq, xi2cps_isr, 0, DRIVER_NAME, id); if (ret) { dev_err(&pdev->dev, "cannot get irq %d\n", id->irq); goto err_clk_dis; } ret = i2c_add_numbered_adapter(&id->adap); if (ret < 0) { dev_err(&pdev->dev, "reg adap failed: %d\n", ret); goto err_clk_dis; } dev_info(&pdev->dev, "%d kHz mmio %08lx irq %d\n", id->i2c_clk/1000, (unsigned long)r_mem->start, id->irq); return 0; err_clk_dis: clk_disable_unprepare(id->clk); return ret; }
/** * xuartps_probe - Platform driver probe * @pdev: Pointer to the platform device structure * * Returns 0 on success, negative error otherwise */ static int __devinit xuartps_probe(struct platform_device *pdev) { int rc; struct uart_port *port; struct resource *res, *res2; unsigned int clk = 0; int ret = 0; int id = 0; struct xuartps *xuartps; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res2) return -ENODEV; /* Look for a serialN alias */ id = of_alias_get_id(pdev->dev.of_node, "serial"); if (id < 0) { dev_warn(&pdev->dev, "failed to get alias id, errno %d\n", id); id = 0; } port = xuartps_get_port(id); xuartps = kmalloc(sizeof(*xuartps), GFP_KERNEL); if (res2->start == 59) xuartps->uartnum = 0; else xuartps->uartnum = 1; if (xuartps->uartnum) xuartps->aperclk = clk_get_sys("UART1_APER", NULL); else xuartps->aperclk = clk_get_sys("UART0_APER", NULL); if (IS_ERR(xuartps->aperclk)) { dev_err(&pdev->dev, "APER clock not found.\n"); ret = PTR_ERR(xuartps->aperclk); goto err_out_free; } if (xuartps->uartnum) xuartps->devclk = clk_get_sys("UART1", NULL); else xuartps->devclk = clk_get_sys("UART0", NULL); if (IS_ERR(xuartps->devclk)) { dev_err(&pdev->dev, "Device clock not found.\n"); ret = PTR_ERR(xuartps->devclk); goto err_out_clk_put_aper; } ret = clk_prepare_enable(xuartps->aperclk); if (ret) { dev_err(&pdev->dev, "Unable to enable APER clock.\n"); goto err_out_clk_put; } ret = clk_prepare_enable(xuartps->devclk); if (ret) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto err_out_clk_dis_aper; } clk = (unsigned int)clk_get_rate(xuartps->devclk); xuartps->clk_rate_change_nb.notifier_call = xuartps_clk_notifier_cb; xuartps->clk_rate_change_nb.next = NULL; if (clk_notifier_register(xuartps->devclk, &xuartps->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); /* Initialize the port structure */ if (!port) { dev_err(&pdev->dev, "Cannot get uart_port structure\n"); ret = -ENODEV; goto err_out_clk_dis; } else { /* Register the port. * This function also registers this device with the tty layer * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; port->irq = res2->start; port->dev = &pdev->dev; port->uartclk = clk; port->private_data = xuartps; xuartps->port = port; dev_set_drvdata(&pdev->dev, port); rc = uart_add_one_port(&xuartps_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); dev_set_drvdata(&pdev->dev, NULL); port->private_data = NULL; xuartps->port = NULL; ret = rc; goto err_out_clk_dis; } return 0; } err_out_clk_dis: clk_notifier_unregister(xuartps->devclk, &xuartps->clk_rate_change_nb); clk_disable_unprepare(xuartps->devclk); err_out_clk_dis_aper: clk_disable_unprepare(xuartps->aperclk); err_out_clk_put: clk_put(xuartps->devclk); err_out_clk_put_aper: clk_put(xuartps->aperclk); err_out_free: kfree(xuartps); return ret; }
static struct clk *rockchip_clk_register_frac_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, struct rockchip_clk_branch *child, spinlock_t *lock) { struct rockchip_clk_frac *frac; struct clk *clk; struct clk_gate *gate = NULL; struct clk_fractional_divider *div = NULL; const struct clk_ops *div_ops = NULL, *gate_ops = NULL; if (muxdiv_offset < 0) return ERR_PTR(-EINVAL); if (child && child->branch_type != branch_mux) { pr_err("%s: fractional child clock for %s can only be a mux\n", __func__, name); return ERR_PTR(-EINVAL); } frac = kzalloc(sizeof(*frac), GFP_KERNEL); if (!frac) return ERR_PTR(-ENOMEM); if (gate_offset >= 0) { gate = &frac->gate; gate->flags = gate_flags; gate->reg = base + gate_offset; gate->bit_idx = gate_shift; gate->lock = lock; gate_ops = &clk_gate_ops; } div = &frac->div; div->flags = div_flags; div->reg = base + muxdiv_offset; div->mshift = 16; div->mwidth = 16; div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; div->nshift = 0; div->nwidth = 16; div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; div_ops = &clk_fractional_divider_ops; clk = clk_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &div->hw, div_ops, gate ? &gate->hw : NULL, gate_ops, flags | CLK_SET_RATE_UNGATE); if (IS_ERR(clk)) { kfree(frac); return clk; } if (child) { struct clk_mux *frac_mux = &frac->mux; struct clk_init_data init; struct clk *mux_clk; int i, ret; frac->mux_frac_idx = -1; for (i = 0; i < child->num_parents; i++) { if (!strcmp(name, child->parent_names[i])) { pr_debug("%s: found fractional parent in mux at pos %d\n", __func__, i); frac->mux_frac_idx = i; break; } } frac->mux_ops = &clk_mux_ops; frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb; frac_mux->reg = base + child->muxdiv_offset; frac_mux->shift = child->mux_shift; frac_mux->mask = BIT(child->mux_width) - 1; frac_mux->flags = child->mux_flags; frac_mux->lock = lock; frac_mux->hw.init = &init; init.name = child->name; init.flags = child->flags | CLK_SET_RATE_PARENT; init.ops = frac->mux_ops; init.parent_names = child->parent_names; init.num_parents = child->num_parents; mux_clk = clk_register(NULL, &frac_mux->hw); if (IS_ERR(mux_clk)) return clk; rockchip_clk_add_lookup(mux_clk, child->id); /* notifier on the fraction divider to catch rate changes */ if (frac->mux_frac_idx >= 0) { ret = clk_notifier_register(clk, &frac->clk_nb); if (ret) pr_err("%s: failed to register clock notifier for %s\n", __func__, name); } else { pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", __func__, name, child->name); } } return clk; }
/** * xuartps_probe - Platform driver probe * @pdev: Pointer to the platform device structure * * Returns 0 on success, negative error otherwise **/ static int xuartps_probe(struct platform_device *pdev) { int rc; struct uart_port *port; struct resource *res, *res2; struct xuartps *xuartps_data; xuartps_data = devm_kzalloc(&pdev->dev, sizeof(*xuartps_data), GFP_KERNEL); if (!xuartps_data) return -ENOMEM; xuartps_data->aperclk = devm_clk_get(&pdev->dev, "aper_clk"); if (IS_ERR(xuartps_data->aperclk)) { dev_err(&pdev->dev, "aper_clk clock not found.\n"); return PTR_ERR(xuartps_data->aperclk); } xuartps_data->refclk = devm_clk_get(&pdev->dev, "ref_clk"); if (IS_ERR(xuartps_data->refclk)) { dev_err(&pdev->dev, "ref_clk clock not found.\n"); return PTR_ERR(xuartps_data->refclk); } rc = clk_prepare_enable(xuartps_data->aperclk); if (rc) { dev_err(&pdev->dev, "Unable to enable APER clock.\n"); return rc; } rc = clk_prepare_enable(xuartps_data->refclk); if (rc) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto err_out_clk_dis_aper; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { rc = -ENODEV; goto err_out_clk_disable; } res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res2) { rc = -ENODEV; goto err_out_clk_disable; } #ifdef CONFIG_COMMON_CLK xuartps_data->clk_rate_change_nb.notifier_call = xuartps_clk_notifier_cb; if (clk_notifier_register(xuartps_data->refclk, &xuartps_data->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); #endif /* Initialize the port structure */ port = xuartps_get_port(); if (!port) { dev_err(&pdev->dev, "Cannot get uart_port structure\n"); rc = -ENODEV; goto err_out_notif_unreg; } else { /* Register the port. * This function also registers this device with the tty layer * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; port->irq = res2->start; port->dev = &pdev->dev; port->uartclk = clk_get_rate(xuartps_data->refclk); port->private_data = xuartps_data; xuartps_data->port = port; platform_set_drvdata(pdev, port); rc = uart_add_one_port(&xuartps_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); goto err_out_notif_unreg; } return 0; } err_out_notif_unreg: #ifdef CONFIG_COMMON_CLK clk_notifier_unregister(xuartps_data->refclk, &xuartps_data->clk_rate_change_nb); #endif err_out_clk_disable: clk_disable_unprepare(xuartps_data->refclk); err_out_clk_dis_aper: clk_disable_unprepare(xuartps_data->aperclk); return rc; }
struct clk *rockchip_clk_register_cpuclk(const char *name, const char *const *parent_names, u8 num_parents, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock) { struct rockchip_cpuclk *cpuclk; struct clk_init_data init; struct clk *clk, *cclk; int ret; if (num_parents < 2) { pr_err("%s: needs at least two parent clocks\n", __func__); return ERR_PTR(-EINVAL); } cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL); if (!cpuclk) return ERR_PTR(-ENOMEM); init.name = name; init.parent_names = &parent_names[reg_data->mux_core_main]; init.num_parents = 1; init.ops = &rockchip_cpuclk_ops; /* only allow rate changes when we have a rate table */ init.flags = (nrates > 0) ? CLK_SET_RATE_PARENT : 0; /* disallow automatic parent changes by ccf */ init.flags |= CLK_SET_RATE_NO_REPARENT; init.flags |= CLK_GET_RATE_NOCACHE; cpuclk->reg_base = reg_base; cpuclk->lock = lock; cpuclk->reg_data = reg_data; cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb; cpuclk->hw.init = &init; cpuclk->alt_parent = __clk_lookup(parent_names[reg_data->mux_core_alt]); if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent: (%d)\n", __func__, reg_data->mux_core_alt); ret = -EINVAL; goto free_cpuclk; } ret = clk_prepare_enable(cpuclk->alt_parent); if (ret) { pr_err("%s: could not enable alternate parent\n", __func__); goto free_cpuclk; } clk = __clk_lookup(parent_names[reg_data->mux_core_main]); if (!clk) { pr_err("%s: could not lookup parent clock: (%d) %s\n", __func__, reg_data->mux_core_main, parent_names[reg_data->mux_core_main]); ret = -EINVAL; goto free_alt_parent; } ret = clk_notifier_register(clk, &cpuclk->clk_nb); if (ret) { pr_err("%s: failed to register clock notifier for %s\n", __func__, name); goto free_alt_parent; } if (nrates > 0) { cpuclk->rate_count = nrates; cpuclk->rate_table = kmemdup(rates, sizeof(*rates) * nrates, GFP_KERNEL); if (!cpuclk->rate_table) { pr_err("%s: could not allocate memory for cpuclk rates\n", __func__); ret = -ENOMEM; goto unregister_notifier; } } cclk = clk_register(NULL, &cpuclk->hw); if (IS_ERR(cclk)) { pr_err("%s: could not register cpuclk %s\n", __func__, name); ret = PTR_ERR(cclk); goto free_rate_table; } return cclk; free_rate_table: kfree(cpuclk->rate_table); unregister_notifier: clk_notifier_unregister(clk, &cpuclk->clk_nb); free_alt_parent: clk_disable_unprepare(cpuclk->alt_parent); free_cpuclk: kfree(cpuclk); return ERR_PTR(ret); }
static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem *base, struct clk *clk, int irq) { struct sun5i_timer_clkevt *ce; unsigned long rate; int ret; u32 val; ce = kzalloc(sizeof(*ce), GFP_KERNEL); if (!ce) return -ENOMEM; ret = clk_prepare_enable(clk); if (ret) { pr_err("Couldn't enable parent clock\n"); goto err_free; } rate = clk_get_rate(clk); ce->timer.base = base; ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); ce->timer.clk = clk; ce->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clkevt; ce->timer.clk_rate_cb.next = NULL; ret = clk_notifier_register(clk, &ce->timer.clk_rate_cb); if (ret) { pr_err("Unable to register clock notifier.\n"); goto err_disable_clk; } ce->clkevt.name = node->name; ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ce->clkevt.set_next_event = sun5i_clkevt_next_event; ce->clkevt.set_mode = sun5i_clkevt_mode; ce->clkevt.rating = 340; ce->clkevt.irq = irq; ce->clkevt.cpumask = cpu_possible_mask; /* Enable timer0 interrupt */ val = readl(base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), base + TIMER_IRQ_EN_REG); clockevents_config_and_register(&ce->clkevt, rate, TIMER_SYNC_TICKS, 0xffffffff); ret = request_irq(irq, sun5i_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "sun5i_timer0", ce); if (ret) { pr_err("Unable to register interrupt\n"); goto err_remove_notifier; } return 0; err_remove_notifier: clk_notifier_unregister(clk, &ce->timer.clk_rate_cb); err_disable_clk: clk_disable_unprepare(clk); err_free: kfree(ce); return ret; }
/** * cdns_i2c_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the i2c * device. User can modify the address mode to 10 bit address mode using the * ioctl call with option I2C_TENBIT. * * Return: 0 on success, negative error otherwise */ static int cdns_i2c_probe(struct platform_device *pdev) { struct resource *r_mem; struct cdns_i2c *id; int ret; const struct of_device_id *match; id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; id->dev = &pdev->dev; platform_set_drvdata(pdev, id); match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node); if (match && match->data) { const struct cdns_platform_data *data = match->data; id->quirks = data->quirks; } r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); id->membase = devm_ioremap_resource(&pdev->dev, r_mem); if (IS_ERR(id->membase)) return PTR_ERR(id->membase); id->irq = platform_get_irq(pdev, 0); id->adap.owner = THIS_MODULE; id->adap.dev.of_node = pdev->dev.of_node; id->adap.algo = &cdns_i2c_algo; id->adap.timeout = CDNS_I2C_TIMEOUT; id->adap.retries = 3; /* Default retry value. */ id->adap.algo_data = id; id->adap.dev.parent = &pdev->dev; init_completion(&id->xfer_done); snprintf(id->adap.name, sizeof(id->adap.name), "Cadence I2C at %08lx", (unsigned long)r_mem->start); id->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(id->clk)) { dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(id->clk); } ret = clk_prepare_enable(id->clk); if (ret) dev_err(&pdev->dev, "Unable to enable clock.\n"); pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(id->dev); pm_runtime_set_active(id->dev); pm_runtime_enable(id->dev); id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb; if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); id->input_clk = clk_get_rate(id->clk); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &id->i2c_clk); if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX)) id->i2c_clk = CDNS_I2C_SPEED_DEFAULT; id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS; ret = cdns_i2c_setclk(id->input_clk, id); if (ret) { dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk); ret = -EINVAL; goto err_clk_dis; } ret = devm_request_irq(&pdev->dev, id->irq, cdns_i2c_isr, 0, DRIVER_NAME, id); if (ret) { dev_err(&pdev->dev, "cannot get irq %d\n", id->irq); goto err_clk_dis; } ret = i2c_add_adapter(&id->adap); if (ret < 0) { dev_err(&pdev->dev, "reg adap failed: %d\n", ret); goto err_clk_dis; } cdns_i2c_init(id); dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); return 0; err_clk_dis: clk_disable_unprepare(id->clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); return ret; }
static int clk_wzrd_probe(struct platform_device *pdev) { int i, ret; u32 reg; unsigned long rate; const char *clk_name; struct clk_wzrd *clk_wzrd; struct resource *mem; struct device_node *np = pdev->dev.of_node; clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL); if (!clk_wzrd) return -ENOMEM; platform_set_drvdata(pdev, clk_wzrd); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(clk_wzrd->base)) return PTR_ERR(clk_wzrd->base); ret = of_property_read_u32(np, "speed-grade", &clk_wzrd->speed_grade); if (!ret) { if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) { dev_warn(&pdev->dev, "invalid speed grade '%d'\n", clk_wzrd->speed_grade); clk_wzrd->speed_grade = 0; } } clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1"); if (IS_ERR(clk_wzrd->clk_in1)) { if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER)) dev_err(&pdev->dev, "clk_in1 not found\n"); return PTR_ERR(clk_wzrd->clk_in1); } clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); if (IS_ERR(clk_wzrd->axi_clk)) { if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER)) dev_err(&pdev->dev, "s_axi_aclk not found\n"); return PTR_ERR(clk_wzrd->axi_clk); } ret = clk_prepare_enable(clk_wzrd->axi_clk); if (ret) { dev_err(&pdev->dev, "enabling s_axi_aclk failed\n"); return ret; } rate = clk_get_rate(clk_wzrd->axi_clk); if (rate > WZRD_ACLK_MAX_FREQ) { dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate); ret = -EINVAL; goto err_disable_clk; } /* we don't support fractional div/mul yet */ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) & WZRD_CLKFBOUT_FRAC_EN; reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) & WZRD_CLKOUT0_FRAC_EN; if (reg) dev_warn(&pdev->dev, "fractional div/mul not supported\n"); /* register multiplier */ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) & WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT; clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev)); if (!clk_name) { ret = -ENOMEM; goto err_disable_clk; } clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor( &pdev->dev, clk_name, __clk_get_name(clk_wzrd->clk_in1), 0, reg, 1); kfree(clk_name); if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) { dev_err(&pdev->dev, "unable to register fixed-factor clock\n"); ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]); goto err_disable_clk; } /* register div */ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) & WZRD_DIVCLK_DIVIDE_MASK) >> WZRD_DIVCLK_DIVIDE_SHIFT; clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev)); if (!clk_name) { ret = -ENOMEM; goto err_rm_int_clk; } clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor( &pdev->dev, clk_name, __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]), 0, 1, reg); if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) { dev_err(&pdev->dev, "unable to register divider clock\n"); ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]); goto err_rm_int_clk; } /* register div per output */ for (i = WZRD_NUM_OUTPUTS - 1; i >= 0 ; i--) { const char *clkout_name; if (of_property_read_string_index(np, "clock-output-names", i, &clkout_name)) { dev_err(&pdev->dev, "clock output name not specified\n"); ret = -EINVAL; goto err_rm_int_clks; } reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12); reg &= WZRD_CLKOUT_DIVIDE_MASK; reg >>= WZRD_CLKOUT_DIVIDE_SHIFT; clk_wzrd->clkout[i] = clk_register_fixed_factor(&pdev->dev, clkout_name, clk_name, 0, 1, reg); if (IS_ERR(clk_wzrd->clkout[i])) { int j; for (j = i + 1; j < WZRD_NUM_OUTPUTS; j++) clk_unregister(clk_wzrd->clkout[j]); dev_err(&pdev->dev, "unable to register divider clock\n"); ret = PTR_ERR(clk_wzrd->clkout[i]); goto err_rm_int_clks; } } kfree(clk_name); clk_wzrd->clk_data.clks = clk_wzrd->clkout; clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data); if (clk_wzrd->speed_grade) { clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier; ret = clk_notifier_register(clk_wzrd->clk_in1, &clk_wzrd->nb); if (ret) dev_warn(&pdev->dev, "unable to register clock notifier\n"); ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb); if (ret) dev_warn(&pdev->dev, "unable to register clock notifier\n"); } return 0; err_rm_int_clks: clk_unregister(clk_wzrd->clks_internal[1]); err_rm_int_clk: kfree(clk_name); clk_unregister(clk_wzrd->clks_internal[0]); err_disable_clk: clk_disable_unprepare(clk_wzrd->axi_clk); return ret; }
static int sdhci_zynq_probe(struct platform_device *pdev) { int ret; int irq = platform_get_irq(pdev, 0); const void *prop; struct device_node *np = pdev->dev.of_node; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct xsdhcips *xsdhcips; xsdhcips = kmalloc(sizeof(*xsdhcips), GFP_KERNEL); if (!xsdhcips) { dev_err(&pdev->dev, "unable to allocate memory\n"); return -ENOMEM; } if (irq == 56) xsdhcips->aperclk = clk_get_sys("SDIO0_APER", NULL); else xsdhcips->aperclk = clk_get_sys("SDIO1_APER", NULL); if (IS_ERR(xsdhcips->aperclk)) { dev_err(&pdev->dev, "APER clock not found.\n"); ret = PTR_ERR(xsdhcips->aperclk); goto err_free; } if (irq == 56) xsdhcips->devclk = clk_get_sys("SDIO0", NULL); else xsdhcips->devclk = clk_get_sys("SDIO1", NULL); if (IS_ERR(xsdhcips->devclk)) { dev_err(&pdev->dev, "Device clock not found.\n"); ret = PTR_ERR(xsdhcips->devclk); goto clk_put_aper; } ret = clk_prepare_enable(xsdhcips->aperclk); if (ret) { dev_err(&pdev->dev, "Unable to enable APER clock.\n"); goto clk_put; } ret = clk_prepare_enable(xsdhcips->devclk); if (ret) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto clk_dis_aper; } xsdhcips->clk_rate_change_nb.notifier_call = xsdhcips_clk_notifier_cb; xsdhcips->clk_rate_change_nb.next = NULL; if (clk_notifier_register(xsdhcips->devclk, &xsdhcips->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); ret = sdhci_pltfm_register(pdev, &sdhci_zynq_pdata); if (ret) { dev_err(&pdev->dev, "Platform registration failed\n"); goto clk_notif_unreg; } host = platform_get_drvdata(pdev); pltfm_host = sdhci_priv(host); pltfm_host->priv = xsdhcips; prop = of_get_property(np, "xlnx,has-cd", NULL); if (prop == NULL || (!(u32) be32_to_cpup(prop))) host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; return 0; clk_notif_unreg: clk_notifier_unregister(xsdhcips->devclk, &xsdhcips->clk_rate_change_nb); clk_disable_unprepare(xsdhcips->devclk); clk_dis_aper: clk_disable_unprepare(xsdhcips->aperclk); clk_put: clk_put(xsdhcips->devclk); clk_put_aper: clk_put(xsdhcips->aperclk); err_free: kfree(xsdhcips); return ret; }
static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device *parent = dev->parent; struct clk_regmap_mux_div *a53cc; struct regmap *regmap; struct clk_init_data init = { }; int ret = -ENODEV; regmap = dev_get_regmap(parent, NULL); if (!regmap) { dev_err(dev, "failed to get regmap: %d\n", ret); return ret; } a53cc = devm_kzalloc(dev, sizeof(*a53cc), GFP_KERNEL); if (!a53cc) return -ENOMEM; init.name = "a53mux"; init.parent_names = gpll0_a53cc; init.num_parents = ARRAY_SIZE(gpll0_a53cc); init.ops = &clk_regmap_mux_div_ops; init.flags = CLK_SET_RATE_PARENT; a53cc->clkr.hw.init = &init; a53cc->clkr.regmap = regmap; a53cc->reg_offset = 0x50; a53cc->hid_width = 5; a53cc->hid_shift = 0; a53cc->src_width = 3; a53cc->src_shift = 8; a53cc->parent_map = gpll0_a53cc_map; a53cc->pclk = devm_clk_get(parent, NULL); if (IS_ERR(a53cc->pclk)) { ret = PTR_ERR(a53cc->pclk); dev_err(dev, "failed to get clk: %d\n", ret); return ret; } a53cc->clk_nb.notifier_call = a53cc_notifier_cb; ret = clk_notifier_register(a53cc->pclk, &a53cc->clk_nb); if (ret) { dev_err(dev, "failed to register clock notifier: %d\n", ret); return ret; } ret = devm_clk_register_regmap(dev, &a53cc->clkr); if (ret) { dev_err(dev, "failed to register regmap clock: %d\n", ret); goto err; } ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &a53cc->clkr.hw); if (ret) { dev_err(dev, "failed to add clock provider: %d\n", ret); goto err; } platform_set_drvdata(pdev, a53cc); return 0; err: clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb); return ret; }